input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
from datetime import datetime
#from sklearn import metrics
from theano import tensor as T
import cPickle
import numpy
import os
import sys
import theano
import time
from collections import defaultdict
import subprocess
import pandas as pd
from tqdm import tqdm
import nn_layers
import sgd_trainer
import warnings
warnings.filterwarnings("ignore") # TODO remove
### THEANO DEBUG FLAGS
# theano.config.optimizer = 'fast_compile'
# theano.config.exception_verbosity = 'high'
def main():
# ZEROUT_DUMMY_WORD = False
ZEROUT_DUMMY_WORD = True
## Load data
# mode = 'TRAIN-ALL'
mode = 'train'
if len(sys.argv) > 1:
mode = sys.argv[1]
data_dir = sys.argv[2]
'''if mode == 'cui_train':
data_dir = mode
mode = 'TRAIN'
else:
data_dir = mode
'''
if not mode in ['TRAIN', 'TRAIN-ALL']:
print "ERROR! The two possible training settings are: ['TRAIN', 'TRAIN-ALL']"
sys.exit(1)
print "Running training in the {} setting".format(mode)
if mode in ['TRAIN-ALL']:
q_train = numpy.load(os.path.join(data_dir, 'train-all.questions.npy'))
a_train = numpy.load(os.path.join(data_dir, 'train-all.answers.npy'))
q_overlap_train = numpy.load(os.path.join(data_dir, 'train-all.q_overlap_indices.npy'))
a_overlap_train = numpy.load(os.path.join(data_dir, 'train-all.a_overlap_indices.npy'))
y_train = numpy.load(os.path.join(data_dir, 'train-all.labels.npy'))
else:
q_train = numpy.load(os.path.join(data_dir, 'train.questions.npy'))
a_train = numpy.load(os.path.join(data_dir, 'train.answers.npy'))
q_overlap_train = numpy.load(os.path.join(data_dir, 'train.q_overlap_indices.npy'))
a_overlap_train = numpy.load(os.path.join(data_dir, 'train.a_overlap_indices.npy'))
y_train = numpy.load(os.path.join(data_dir, 'train.labels.npy'))
q_dev = numpy.load(os.path.join(data_dir, 'dev.questions.npy'))
a_dev = numpy.load(os.path.join(data_dir, 'dev.answers.npy'))
q_overlap_dev = numpy.load(os.path.join(data_dir, 'dev.q_overlap_indices.npy'))
a_overlap_dev = numpy.load(os.path.join(data_dir, 'dev.a_overlap_indices.npy'))
y_dev = numpy.load(os.path.join(data_dir, 'dev.labels.npy'))
qids_dev = numpy.load(os.path.join(data_dir, 'dev.qids.npy'))
q_test = numpy.load(os.path.join(data_dir, 'test.questions.npy'))
a_test = numpy.load(os.path.join(data_dir, 'test.answers.npy'))
q_overlap_test = numpy.load(os.path.join(data_dir, 'test.q_overlap_indices.npy'))
a_overlap_test = numpy.load(os.path.join(data_dir, 'test.a_overlap_indices.npy'))
y_test = numpy.load(os.path.join(data_dir, 'test.labels.npy'))
qids_test = numpy.load(os.path.join(data_dir, 'test.qids.npy'))
# x_train = numpy.load(os.path.join(data_dir, 'train.overlap_feats.npy'))
# x_dev = numpy.load(os.path.join(data_dir, 'dev.overlap_feats.npy'))
# x_test = numpy.load(os.path.join(data_dir, 'test.overlap_feats.npy'))
# feats_ndim = x_train.shape[1]
# from sklearn.preprocessing import StandardScaler
# scaler = StandardScaler()
# print "Scaling overlap features"
# x_train = scaler.fit_transform(x_train)
# x_dev = scaler.transform(x_dev)
# x_test = scaler.transform(x_test)
print 'y_train', numpy.unique(y_train, return_counts=True)
print 'y_dev', numpy.unique(y_dev, return_counts=True)
print 'y_test', numpy.unique(y_test, return_counts=True)
print 'q_train', q_train.shape
print 'q_dev', q_dev.shape
print 'q_test', q_test.shape
print 'a_train', a_train.shape
print 'a_dev', a_dev.shape
print 'a_test', a_test.shape
## Get the word embeddings from the nnet trained on SemEval
# ndim = 40
# nnet_outdir = 'exp/ndim=60;batch=100;max_norm=0;learning_rate=0.1;2014-12-02-15:53:14'
# nnet_fname = os.path.join(nnet_outdir, 'nnet.dat')
# params_fname = os.path.join(nnet_outdir, 'best_dev_params.epoch=00;batch=14640;dev_f1=83.12;test_acc=85.00.dat')
# train_nnet, test_nnet = nn_layers.load_nnet(nnet_fname, params_fname)
numpy_rng = numpy.random.RandomState(123)
q_max_sent_size = q_train.shape[1]
a_max_sent_size = a_train.shape[1]
# print 'max', numpy.max(a_train)
# print 'min', numpy.min(a_train)
ndim = 5
print "Generating random vocabulary for word overlap indicator features with dim:", ndim
dummy_word_id = numpy.max(a_overlap_train)
# vocab_emb_overlap = numpy_rng.uniform(-0.25, 0.25, size=(dummy_word_id+1, ndim))
print "Gaussian"
vocab_emb_overlap = numpy_rng.randn(dummy_word_id+1, ndim) * 0.25
# vocab_emb_overlap = numpy_rng.randn(dummy_word_id+1, ndim) * 0.05
# vocab_emb_overlap = numpy_rng.uniform(-0.25, 0.25, size=(dummy_word_id+1, ndim))
vocab_emb_overlap[-1] = 0
# Load word2vec embeddings
fname = os.path.join(data_dir, 'emb_vec_50.bin.npy')
print "Loading word embeddings from", fname
vocab_emb = numpy.load(fname)
ndim = vocab_emb.shape[1]
dummpy_word_idx = numpy.max(a_train)
print "Word embedding matrix size:", vocab_emb.shape
x = T.dmatrix('x')
x_q = T.lmatrix('q')
x_q_overlap = T.lmatrix('q_overlap')
x_a = T.lmatrix('a')
x_a_overlap = T.lmatrix('a_overlap')
y = T.ivector('y')
#######
n_outs = 2
n_epochs = 25
batch_size = 50
learning_rate = 0.1
max_norm = 0
print 'batch_size', batch_size
print 'n_epochs', n_epochs
print 'learning_rate', learning_rate
print 'max_norm', max_norm
## 1st conv layer.
ndim = vocab_emb.shape[1] + vocab_emb_overlap.shape[1]
### Nonlinearity type
# activation = nn_layers.relu_f
activation = T.tanh
dropout_rate = 0.5
nkernels = 50
q_k_max = 1
a_k_max = 1
# filter_widths = [3,4,5]
q_filter_widths = [3]
a_filter_widths = [3]
###### QUESTION ######
lookup_table_words = nn_layers.LookupTableFastStatic(W=vocab_emb, pad=max(q_filter_widths)-1)
lookup_table_overlap = nn_layers.LookupTableFast(W=vocab_emb_overlap, pad=max(q_filter_widths)-1)
lookup_table = nn_layers.ParallelLookupTable(layers=[lookup_table_words, lookup_table_overlap])
num_input_channels = 1
input_shape = (batch_size, num_input_channels, q_max_sent_size + 2*(max(q_filter_widths)-1), ndim)
conv_layers = []
for filter_width in q_filter_widths:
filter_shape = (nkernels, num_input_channels, filter_width, ndim)
conv = nn_layers.Conv2dLayer(rng=numpy_rng, filter_shape=filter_shape, input_shape=input_shape)
non_linearity = nn_layers.NonLinearityLayer(b_size=filter_shape[0], activation=activation)
pooling = nn_layers.KMaxPoolLayer(k_max=q_k_max)
conv2dNonLinearMaxPool = nn_layers.FeedForwardNet(layers=[conv, non_linearity, pooling])
conv_layers.append(conv2dNonLinearMaxPool)
join_layer = nn_layers.ParallelLayer(layers=conv_layers)
flatten_layer = nn_layers.FlattenLayer()
nnet_q = nn_layers.FeedForwardNet(layers=[
lookup_table,
join_layer,
flatten_layer,
])
nnet_q.set_input((x_q, x_q_overlap))
######
###### ANSWER ######
lookup_table_words = nn_layers.LookupTableFastStatic(W=vocab_emb, pad=max(q_filter_widths)-1)
lookup_table_overlap = nn_layers.LookupTableFast(W=vocab_emb_overlap, pad=max(q_filter_widths)-1)
lookup_table = nn_layers.ParallelLookupTable(layers=[lookup_table_words, lookup_table_overlap])
# num_input_channels = len(lookup_table.layers)
input_shape = (batch_size, num_input_channels, a_max_sent_size + 2*(max(a_filter_widths)-1), ndim)
conv_layers = []
for filter_width in a_filter_widths:
filter_shape = (nkernels, num_input_channels, filter_width, ndim)
conv = nn_layers.Conv2dLayer(rng=numpy_rng, filter_shape=filter_shape, input_shape=input_shape)
non_linearity = nn_layers.NonLinearityLayer(b_size=filter_shape[0], activation=activation)
pooling = nn_layers.KMaxPoolLayer(k_max=a_k_max)
conv2dNonLinearMaxPool = nn_layers.FeedForwardNet(layers=[conv, non_linearity, pooling])
conv_layers.append(conv2dNonLinearMaxPool)
join_layer = nn_layers.ParallelLayer(layers=conv_layers)
flatten_layer = nn_layers.FlattenLayer()
nnet_a = nn_layers.FeedForwardNet(layers=[
lookup_table,
join_layer,
flatten_layer,
])
nnet_a.set_input((x_a, x_a_overlap))
#######
# print 'nnet_q.output', nnet_q.output.ndim
q_logistic_n_in = nkernels * len(q_filter_widths) * q_k_max
a_logistic_n_in = nkernels * len(a_filter_widths) * a_k_max
# dropout_q = nn_layers.FastDropoutLayer(rng=numpy_rng)
# dropout_a = nn_layers.FastDropoutLayer(rng=numpy_rng)
# dropout_q.set_input(nnet_q.output)
# dropout_a.set_input(nnet_a.output)
# feats_nout = 10
# x_hidden_layer = nn_layers.LinearLayer(numpy_rng, n_in=feats_ndim, n_out=feats_nout, activation=activation)
# x_hidden_layer.set_input(x)
# feats_nout = feats_ndim
### Dropout
# classifier = nn_layers.PairwiseLogisticWithFeatsRegression(q_in=logistic_n_in,
# a_in=logistic_n_in,
# n_in=feats_nout,
# n_out=n_outs)
# # classifier.set_input((dropout_q.output, dropout_a.output, x_hidden_layer.output))
# classifier.set_input((dropout_q.output, dropout_a.output, x))
# # train_nnet = nn_layers.FeedForwardNet(layers=[nnet_q, nnet_a, x_hidden_layer, dropout_q, dropout_a, classifier],
# train_nnet = nn_layers.FeedForwardNet(layers=[nnet_q, nnet_a, dropout_q, dropout_a, classifier],
# name="Training nnet")
# test_classifier = nn_layers.PairwiseLogisticWithFeatsRegression(q_in=logistic_n_in,
# a_in=logistic_n_in,
# n_in=feats_nout,
# n_out=n_outs,
# W=classifier.W,
# W_feats=classifier.W_feats,
# b=classifier.b)
# # test_classifier.set_input((nnet_q.output, nnet_a.output, x_hidden_layer.output))
# test_classifier.set_input((nnet_q.output, nnet_a.output, x))
# # test_nnet = nn_layers.FeedForwardNet(layers=[nnet_q, nnet_a, x_hidden_layer, test_classifier],
# test_nnet = nn_layers.FeedForwardNet(layers=[nnet_q, nnet_a, test_classifier],
# name="Test nnet")
#########
# pairwise_layer = nn_layers.PairwiseMultiOnlySimWithFeatsLayer(q_in=q_logistic_n_in,
pairwise_layer = nn_layers.PairwiseNoFeatsLayer(q_in=q_logistic_n_in,
# pairwise_layer = nn_layers.PairwiseWithFeatsLayer(q_in=q_logistic_n_in,
# pairwise_layer = nn_layers.PairwiseOnlySimWithFeatsLayer(q_in=q_logistic_n_in,
a_in=a_logistic_n_in)
pairwise_layer.set_input((nnet_q.output, nnet_a.output))
# n_in = q_logistic_n_in + a_logistic_n_in + feats_ndim + a_logistic_n_in
# n_in = q_logistic_n_in + a_logistic_n_in + feats_ndim + 50
# n_in = q_logistic_n_in + a_logistic_n_in + feats_ndim + 1
n_in = q_logistic_n_in + a_logistic_n_in + 1
# n_in = feats_ndim + 1
# n_in = feats_ndim + 50
hidden_layer = nn_layers.LinearLayer(numpy_rng, n_in=n_in, n_out=n_in, activation=activation)
hidden_layer.set_input(pairwise_layer.output)
classifier = nn_layers.LogisticRegression(n_in=n_in, n_out=n_outs)
classifier.set_input(hidden_layer.output)
train_nnet = nn_layers.FeedForwardNet(layers=[nnet_q, nnet_a, pairwise_layer, hidden_layer, classifier],
# train_nnet = nn_layers.FeedForwardNet(layers=[nnet_q, nnet_a, x_hidden_layer, classifier],
name="Training nnet")
test_nnet = train_nnet
#######
print train_nnet
params = train_nnet.params
ts = datetime.now().strftime('%Y-%m-%d-%H.%M.%S')
nnet_outdir = 'exp.out/ndim={};batch={};max_norm={};learning_rate={};{}'.format(ndim, batch_size, max_norm, learning_rate, ts)
if not os.path.exists(nnet_outdir):
os.makedirs(nnet_outdir)
nnet_fname = os.path.join(nnet_outdir, 'nnet.dat')
print "Saving to", nnet_fname
cPickle.dump([train_nnet, test_nnet], open(nnet_fname, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
total_params = sum([numpy.prod(param.shape.eval()) for param in params])
print 'Total params number:', total_params
cost = train_nnet.layers[-1].training_cost(y)
# y_train_counts = numpy.unique(y_train, return_counts=True)[1].astype(numpy.float32)
# weights_data = numpy.sum(y_train_counts) / y_train_counts
# weights_data_norm = numpy.linalg.norm(weights_data)
# weights_data /= weights_data_norm
# print 'weights_data', weights_data
# weights = theano.shared(weights_data, borrow=True)
# cost = train_nnet.layers[-1].training_cost_weighted(y, weights=weights)
predictions = test_nnet.layers[-1].y_pred
predictions_prob = test_nnet.layers[-1].p_y_given_x[:,-1]
### L2 regularization
# L2_word_emb = 1e-4
# L2_conv1d = 3e-5
# # L2_softmax = 1e-3
# L2_softmax = 1e-4
# print "Regularizing nnet weights"
# for w in train_nnet.weights:
# L2_reg = 0.
# if w.name.startswith('W_emb'):
# L2_reg = L2_word_emb
# elif w.name.startswith('W_conv1d'):
# L2_reg = L2_conv1d
# elif w.name.startswith('W_softmax'):
# L2_reg = L2_softmax
# elif w.name == 'W':
# L2_reg = L2_softmax
# print w.name, L2_reg
# cost += T.sum(w**2) * L2_reg
# batch_x = T.dmatrix('batch_x')
batch_x_q = T.lmatrix('batch_x_q')
batch_x_a = T.lmatrix('batch_x_a')
batch_x_q_overlap = T.lmatrix('batch_x_q_overlap')
batch_x_a_overlap = T.lmatrix('batch_x_a_overlap')
batch_y = T.ivector('batch_y')
# updates = sgd_trainer.get_adagrad_updates(cost, params, learning_rate=learning_rate, max_norm=max_norm, _eps=1e-6)
updates = sgd_trainer.get_adadelta_updates(cost, params, rho=0.95, eps=1e-6, max_norm=max_norm, word_vec_name='W_emb')
inputs_pred = [batch_x_q,
batch_x_a,
batch_x_q_overlap,
batch_x_a_overlap,
# batch_x,
]
givens_pred = {x_q: batch_x_q,
x_a: batch_x_a,
x_q_overlap: batch_x_q_overlap,
x_a_overlap: batch_x_a_overlap,
# x: batch_x
}
inputs_train = [batch_x_q,
batch_x_a,
batch_x_q_overlap,
batch_x_a_overlap,
# batch_x,
batch_y,
]
givens_train = {x_q: batch_x_q,
x_a: batch_x_a,
x_q_overlap: batch_x_q_overlap,
x_a_overlap: batch_x_a_overlap,
# x: batch_x,
y: batch_y}
train_fn = theano.function(inputs=inputs_train,
outputs=cost,
updates=updates,
givens=givens_train)
pred_fn = theano.function(inputs=inputs_pred,
outputs=predictions,
givens=givens_pred)
pred_prob_fn = theano.function(inputs=inputs_pred,
outputs=predictions_prob,
givens=givens_pred)
def predict_batch(batch_iterator):
preds = numpy.hstack([pred_fn(batch_x_q, batch_x_a, batch_x_q_overlap, batch_x_a_overlap) for batch_x_q, batch_x_a, batch_x_q_overlap, batch_x_a_overlap, _ in batch_iterator])
return preds[:batch_iterator.n_samples]
def predict_prob_batch(batch_iterator):
preds = numpy.hstack([pred_prob_fn(batch_x_q, batch_x_a, batch_x_q_overlap, batch_x_a_overlap) for batch_x_q, batch_x_a, batch_x_q_overlap, batch_x_a_overlap, _ in batch_iterator])
return preds[:batch_iterator.n_samples]
train_set_iterator = sgd_trainer.MiniBatchIteratorConstantBatchSize(numpy_rng, [q_train, a_train, q_overlap_train, a_overlap_train, y_train], batch_size=batch_size, randomize=True)
dev_set_iterator = sgd_trainer.MiniBatchIteratorConstantBatchSize(numpy_rng, [q_dev, a_dev, q_overlap_dev, a_overlap_dev, y_dev], batch_size=batch_size, randomize=False)
test_set_iterator = sgd_trainer.MiniBatchIteratorConstantBatchSize(numpy_rng, [q_test, a_test, q_overlap_test, a_overlap_test, y_test], batch_size=batch_size, randomize=False)
labels = sorted(numpy.unique(y_test))
print 'labels', labels
def map_score(qids, labels, preds):
qid2cand = defaultdict(list)
for qid, label, pred in zip(qids, labels, preds):
qid2cand[qid].append((pred, label))
average_precs = []
for qid, candidates in qid2cand.iteritems():
average_prec = 0
running_correct_count = 0
for i, (score, label) in enumerate(sorted(candidates, reverse=True), 1):
if label > 0:
running_correct_count += 1
average_prec += float(running_correct_count) / i
average_precs.append(average_prec / (running_correct_count + 1e-6))
map_score = sum(average_precs) / len(average_precs)
return map_score
print "Zero out dummy word:", ZEROUT_DUMMY_WORD
if ZEROUT_DUMMY_WORD:
W_emb_list = [w for | |
<filename>sumatra/launch.py
"""
The launch module handles launching of simulations/analyses as sub-processes, and
obtaining information about the platform(s) on which the simulations are run.
:copyright: Copyright 2006-2015 by the Sumatra team, see doc/authors.txt
:license: BSD 2-clause, see LICENSE for details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
from builtins import object
import platform
import socket
import subprocess
import os
from sumatra.programs import Executable, MatlabExecutable
from sumatra.dependency_finder.matlab import save_dependencies
import warnings
from . import tee
import logging
from sumatra.core import have_internet_connection, component, component_type, get_registered_components
logger = logging.getLogger("Sumatra")
class PlatformInformation(object):
"""
A simple container for information about the machine and environment the
computations are being performed on/in.
"""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
#platform.mac_ver()
#platform.win32_ver()
#platform.dist()
#platform.libc_ver()
# Python compile options? distutils.sys_config?
# some numpy information?
# numpy.distutils.system_info import get_info
# get_info('blas_opt')
def check_files_exist(*paths):
"""
Check that the given paths exist and return the list of paths.
parameter_file may be None, in which case it is not included in the list of
paths.
"""
for path in paths:
if not os.path.exists(path):
raise IOError("%s does not exist." % path)
@component_type
class LaunchMode(object):
"""
Base class for launch modes (serial, distributed, batch, ...)
"""
required_attributes = ("check_files", "generate_command")
def __init__(self, working_directory=None, options=None):
self.working_directory = working_directory or os.getcwd()
self.options = options
def __getstate__(self):
"""
Since each subclass has different attributes, we provide this method
as a standard way of obtaining these attributes, for database storage,
etc. Returns a dict.
"""
return {'working_directory': self.working_directory,
'options': self.options}
def pre_run(self, executable):
"""Run tasks before the simulation/analysis proper.""" # e.g. nrnivmodl
# this implementation is a temporary hack. "pre_run" should probably be an Executable instance, not a string
if hasattr(executable, "pre_run"):
p = subprocess.Popen(executable.pre_run, shell=True, stdout=None,
stderr=None, close_fds=True, cwd=self.working_directory)
result = p.wait()
def check_files(self, executable, main_file):
"""Check that all files exist and are accessible."""
raise NotImplementedError("must be impemented by sub-classes")
def generate_command(self, paths):
"""Return a string containing the command to be launched."""
raise NotImplementedError("must be impemented by sub-classes")
def run(self, executable, main_file, arguments, append_label=None):
"""
Run a computation in a shell, with the given executable, script and
arguments. If `append_label` is provided, it is appended to the
command line. Return resultcode.
"""
self.check_files(executable, main_file)
cmd = self.generate_command(executable, main_file, arguments)
if append_label:
cmd += " " + append_label
if 'matlab' in executable.name.lower():
''' we will be executing Matlab and at the same time saving the
dependencies in order to avoid opening of Matlab shell two times '''
result, output = save_dependencies(cmd, main_file)
else:
result, output = tee.system2(cmd, cwd=self.working_directory, stdout=True) # cwd only relevant for local launch, not for MPI, for example
self.stdout_stderr = "".join(output)
return result
def __key(self):
state = self.__getstate__()
return tuple([self.__class__]
+ [(k, state[k]) for k in sorted(state.keys())])
def __eq__(self, other):
if type(self) == type(other):
return self.__key() == other.__key()
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.__key())
def get_platform_information(self):
"""
Return a list of `PlatformInformation` objects, containing information
about the machine(s) and environment(s) the computations are being
performed on/in.
"""
network_name = platform.node()
bits, linkage = platform.architecture()
if have_internet_connection():
try:
ip_addr = socket.gethostbyname(network_name) # any way to control the timeout?
except socket.gaierror: # see http://stackoverflow.com/questions/166506/finding-local-ip-addresses-in-python
ip_addr = "127.0.0.1"
else:
ip_addr = "127.0.0.1"
return [PlatformInformation(architecture_bits=bits,
architecture_linkage=linkage,
machine=platform.machine(),
network_name=network_name,
ip_addr=ip_addr,
processor=platform.processor(),
release=platform.release(),
system_name=platform.system(),
version=platform.version())]
# maybe add system time?
def get_type(self):
return self.__class__.__name__
@component
class SerialLaunchMode(LaunchMode):
"""
Enable running serial computations.
"""
name = "serial"
def __str__(self):
return "serial"
def check_files(self, executable, main_file):
if main_file is not None:
check_files_exist(executable.path, *main_file.split())
else:
check_files_exist(executable.path)
def generate_command(self, executable, main_file, arguments):
if main_file is not None:
if isinstance(executable, MatlabExecutable):
#if sys.platform == 'win32' or sys.platform == 'win64':
cmd = "%s -nodesktop -r \"%s('%s')\"" % (executable.name, main_file.split('.')[0], arguments) # only for Windows
# cmd = "%s -nodesktop -r \"%s('%s')\"" %(executable.name, main_file.split('.')[0], 'in.param') # only for Windows
else:
cmd = "%s %s %s %s" % (executable.path, executable.options, main_file, arguments)
else:
if executable.path == executable.name: # temporary hack
cmd = "./%s %s %s" % (executable.path, executable.options, arguments)
else:
cmd = "%s %s %s" % (executable.path, executable.options, arguments)
return cmd
generate_command.__doc__ = LaunchMode.generate_command.__doc__
@component
class DistributedLaunchMode(LaunchMode):
"""
Enable running distributed computations using MPI.
The current implementation is specific to MPICH2, but this will be
generalised in future releases.
"""
name = "distributed"
def __init__(self, n=1, mpirun="mpiexec", hosts=[], options=None,
pfi_path="/usr/local/bin/pfi.py", working_directory=None):
"""
`n` - the number of hosts to run on.
`mpirun` - the path to the mpirun or mpiexec executable. If a full path
is not given, the user's PATH will be searched.
`hosts` - a list of host names to run on. **Currently not used.**
`options` - extra command line options for mpirun/mpiexec
`pfi_path` - the path to the pfi.py script provided with Sumatra, which
should be installed on every node and is used to obtain
platform information.
`working_directory` - directory in which to run on the hosts
"""
LaunchMode.__init__(self, working_directory, options)
class MPI(Executable):
name = mpirun
default_executable_name = mpirun
if os.path.exists(mpirun): # mpirun is a full path
mpi_cmd = MPI(path=mpirun)
else:
mpi_cmd = MPI(path=None)
self.mpirun = mpi_cmd.path
# should warn if mpirun not found
self.hosts = hosts
self.n = n
self.mpi_info = {}
self.pfi_path = pfi_path
def __str__(self):
return "distributed (n=%d, mpiexec=%s, hosts=%s)" % (self.n, self.mpirun, self.hosts)
def check_files(self, executable, main_file):
if main_file is not None:
check_files_exist(self.mpirun, executable.path, *main_file.split())
else:
check_files_exist(self.mpirun, executable.path)
def generate_command(self, executable, main_file, arguments):
if hasattr(executable, "mpi_options"):
mpi_options = executable.mpi_options
else:
mpi_options = self.options or ""
#cmd = "%s -np %d -host %s %s %s %s" % (self.mpirun,
# self.n,
# ",".join(hosts),
# executable.path,
# main_file,
# parameter_file)
cmd = "%s -n %d --wdir %s" % ( # MPICH2-specific - need to generalize
self.mpirun,
self.n,
self.working_directory
)
if main_file is not None:
cmd += " %s %s %s %s %s" % (executable.path, mpi_options,
executable.options, main_file, arguments)
else:
cmd += " %s %s %s %s" % (executable.path, mpi_options,
executable.options, arguments)
return cmd
generate_command.__doc__ = LaunchMode.generate_command.__doc__
def get_platform_information(self):
try:
import mpi4py.MPI
MPI = mpi4py.MPI
except ImportError:
MPI = None
warnings.warn("mpi4py is not available, so Sumatra is not able to obtain platform information for remote nodes.")
platform_information = LaunchMode.get_platform_information()
if MPI:
import sys
comm = MPI.COMM_SELF.Spawn(sys.executable,
args=[self.pfi_path],
maxprocs=self.n)
platform_information = []
for rank in range(self.n):
platform_information.append(PlatformInformation(**comm.recv(source=rank, tag=rank).values()[0]))
comm.Disconnect()
return platform_information
get_platform_information.__doc__ = LaunchMode.get_platform_information.__doc__ + """
Requires the script :file:`pfi.py` to be placed on the user's path on
each node of the machine.
This is currently not useful, as I don't think there is any guarantee
that we get the same *n* nodes that the command is run on. Need to look
more into this.
"""
def __getstate__(self):
"""Return a dict containing the values needed to recreate this instance."""
return {'mpirun': self.mpirun, 'n': self.n, 'hosts': self.hosts,
'pfi_path': self.pfi_path, 'options': self.options,
'working_directory': self.working_directory}
@component
class SlurmMPILaunchMode(LaunchMode):
"""
Enable launching MPI computations with SLURM
(https://computing.llnl.gov/linux/slurm/)
"""
name = "slurm-mpi"
def __init__(self, n=1, mpirun="mpiexec", working_directory=None, options=None):
"""
`n` - the number of hosts to run on.
`mpirun` - the path to the mpirun or mpiexec executable. If a full path
is not given, the user's PATH will be searched.
`options` - extra options for SLURM
`working_directory` - directory in which to run on the hosts
"""
LaunchMode.__init__(self, working_directory, options)
class MPI(Executable):
name = mpirun
default_executable_name = mpirun
if os.path.exists(mpirun): # mpirun is a full path
mpi_cmd = MPI(path=mpirun)
else:
mpi_cmd = MPI(path=None)
self.mpirun = mpi_cmd.path
# should warn if mpirun not found
assert n > 0
self.n = int(n)
def __str__(self):
return "slurm-mpi"
def check_files(self, executable, main_file):
# should really check that files exist on whatever system SLURM sends the job to
if main_file is not None:
check_files_exist(executable.path, *main_file.split())
else:
check_files_exist(executable.path)
def generate_command(self, executable, main_file, arguments):
if hasattr(executable, "mpi_options"):
mpi_options = executable.mpi_options
else:
mpi_options = ""
cmd = "salloc -n %d %s %s --wdir %s" % (
self.n,
self.options or "",
self.mpirun,
self.working_directory
)
if main_file is not None:
cmd += " %s %s %s %s %s" % (executable.path, mpi_options,
executable.options, main_file, arguments)
else:
cmd += " %s %s %s %s" % (executable.path, mpi_options,
executable.options, | |
slug="not existing"
)
}
}
}
),
(
[
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "Some title"),
("State", "Open")
])
],
{
"Assessment": {
"row_warnings": {
errors.WRONG_VALUE_DEFAULT.format(
line=3,
column_name="State",
value="open",
)
}
}
}
),
(
[
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Title", "New Assessment"),
("Creators", "<EMAIL>"),
("Assignees", "<EMAIL>"),
("Verifiers", "<EMAIL>"),
("Finished Date", "7/3/2015"),
("Verified Date", "5/14/2016"),
]),
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Verified Date", "5/15/2016"),
])
],
{
"Assessment": {
"row_warnings": {
errors.UNMODIFIABLE_COLUMN.format(
line=3,
column_name="Verified Date"
)
}
}
}
),
)
@ddt.unpack
def test_assessment_warnings_errors(self, assessment_data, expected_errors):
""" Test full assessment import with warnings and errors
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=889865936
"""
if len(assessment_data) == 1:
if "Audit*" not in assessment_data[0]:
audit = factories.AuditFactory()
assessment_data[0]["Audit*"] = audit.slug
response = self.import_data(*assessment_data)
else:
audit = factories.AuditFactory()
assessment_data[0]["Audit*"] = audit.slug
self.import_data(assessment_data[0])
assessment = all_models.Assessment.query.filter_by(
title="New Assessment").first()
assessment_data[1]["Code*"] = assessment.slug
assessment_data[1]["Audit*"] = audit.slug
response = self.import_data(assessment_data[1])
self._check_csv_response(response, expected_errors)
def test_blank_optional_field(self):
"""Test warnings while import assessment with blank IssueTracker fields"""
audit = factories.AuditFactory()
resp = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Title*", "ass1"),
("Creators*", "<EMAIL>"),
("Assignees*", "<EMAIL>"),
("Component ID", ""),
("Hotlist ID", ""),
("Priority", ""),
("Severity", ""),
("Issue Type", ""),
("Ticket Title", ""),
("Ticket Tracker Integration", ""),
]))
self._check_csv_response(resp, {})
def test_mapping_control_through_snapshot(self):
"Test for add mapping control on assessment"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = all_models.Revision.query.filter(
all_models.Revision.resource_id == control.id,
all_models.Revision.resource_type == control.__class__.__name__
).order_by(
all_models.Revision.id.desc()
).first()
factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
self.assertFalse(db.session.query(
all_models.Relationship.get_related_query(
assessment, all_models.Snapshot()
).exists()).first()[0])
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("map:Control versions", control.slug),
]))
self.assertTrue(db.session.query(
all_models.Relationship.get_related_query(
assessment, all_models.Snapshot()
).exists()).first()[0])
@ddt.data(
("yes", True),
("no", True),
("invalid_data", False),
)
@ddt.unpack
def test_import_view_only_field(self, value, is_valid):
"Test import view only fields"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
resp = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("archived", value),
]))
row_warnings = []
if not is_valid:
row_warnings.append(u"Line 3: Field 'Archived' contains invalid data. "
u"The value will be ignored.")
self.assertEqual(
[{
u'ignored': 0,
u'updated': 1,
u'block_errors': [],
u'name': u'Assessment',
u'created': 0,
u'deleted': 0,
u'deprecated': 0,
u'row_warnings': row_warnings,
u'rows': 1,
u'block_warnings': [],
u'row_errors': [],
}],
resp)
@ddt.data((False, "no", 0, 1, []),
(True, "yes", 1, 0, [u'Line 3: Importing archived instance is '
u'prohibited. The line will be ignored.']))
@ddt.unpack
# pylint: disable=too-many-arguments
def test_import_archived_assessment(self, is_archived, value, ignored,
updated, row_errors):
"""Test archived assessment import procedure"""
with factories.single_commit():
audit = factories.AuditFactory(archived=is_archived)
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
resp = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("archived", value),
("description", "archived assessment description")
]))
self.assertEqual([{
u'ignored': ignored,
u'updated': updated,
u'block_errors': [],
u'name': u'Assessment',
u'created': 0,
u'deleted': 0,
u'deprecated': 0,
u'row_warnings': [],
u'rows': 1,
u'block_warnings': [],
u'row_errors': row_errors
}], resp)
def test_create_new_assessment_with_mapped_control(self):
"Test for creation assessment with mapped controls"
with factories.single_commit():
audit = factories.AuditFactory()
control = factories.ControlFactory()
revision = all_models.Revision.query.filter(
all_models.Revision.resource_id == control.id,
all_models.Revision.resource_type == control.__class__.__name__
).order_by(
all_models.Revision.id.desc()
).first()
factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
self.assertFalse(db.session.query(
all_models.Relationship.get_related_query(
all_models.Assessment(), all_models.Snapshot()
).exists()).first()[0])
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", all_models.Person.query.all()[0].email),
("Creators", all_models.Person.query.all()[0].email),
("Title", "Strange title"),
("map:Control versions", control.slug),
]))
self._check_csv_response(response, {})
assessment = all_models.Assessment.query.filter(
all_models.Assessment.title == "Strange title"
).first()
self.assertTrue(db.session.query(all_models.Relationship.get_related_query(
assessment, all_models.Snapshot()).exists()).first()[0]
)
def test_create_import_assignee(self):
"Test for creation assessment with mapped assignees"
name = "test_name"
email = "<EMAIL>"
with factories.single_commit():
audit = factories.AuditFactory()
assignee_id = factories.PersonFactory(name=name, email=email).id
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", email),
("Creators", all_models.Person.query.all()[0].email),
("Title", "Strange title"),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.title == "Strange title"
).first()
self._test_assigned_user(assessment, assignee_id, "Assignees")
def test_create_import_creators(self):
"Test for creation assessment with mapped creator"
name = "test_name"
email = "<EMAIL>"
with factories.single_commit():
audit = factories.AuditFactory()
creator_id = factories.PersonFactory(name=name, email=email).id
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", all_models.Person.query.all()[0].email),
("Creators", email),
("Title", "Strange title"),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.title == "Strange title"
).first()
self._test_assigned_user(assessment, creator_id, "Creators")
def test_update_import_creators(self):
"Test for creation assessment with mapped creator"
slug = "TestAssessment"
name = "test_name"
email = "<EMAIL>"
with factories.single_commit():
assessment = factories.AssessmentFactory(slug=slug)
creator_id = factories.PersonFactory(name=name, email=email).id
self._test_assigned_user(assessment, None, "Creators")
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Creators", email),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == slug
).first()
self._test_assigned_user(assessment, creator_id, "Creators")
def test_update_import_assignee(self):
"Test for creation assessment with mapped creator"
slug = "TestAssessment"
name = "test_name"
email = "<EMAIL>"
with factories.single_commit():
assessment = factories.AssessmentFactory(slug=slug)
assignee_id = factories.PersonFactory(name=name, email=email).id
self._test_assigned_user(assessment, None, "Assignees")
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Assignees", email),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == slug
).first()
self._test_assigned_user(assessment, assignee_id, "Assignees")
def test_update_import_verifiers(self):
"""Test import does not delete verifiers if empty value imported"""
slug = "TestAssessment"
assessment = factories.AssessmentFactory(slug=slug)
name = "test_name"
email = "<EMAIL>"
verifier = factories.PersonFactory(name=name, email=email)
verifier_id = verifier.id
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Verifiers", email),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == slug
).first()
self._test_assigned_user(assessment, verifier_id, "Verifiers")
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Verifiers", ""),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == slug
).first()
self._test_assigned_user(assessment, verifier_id, "Verifiers")
@ddt.data(
(
"Created Date",
lambda: datetime.date.today() - datetime.timedelta(7),
),
)
@ddt.unpack
def test_update_non_changeable_field(self, field, value_creator):
"""Test importing Assessment's "Created Date" field"""
slug = "TestAssessment"
with factories.single_commit():
value = value_creator()
factories.AssessmentFactory(
slug=slug,
modified_by=factories.PersonFactory(email="<EMAIL>"),
)
data = [{
"object_name": "Assessment",
"fields": "all",
"filters": {
"expression": {
"left": "code",
"op": {"name": "="},
"right": slug
},
}
}]
before_update = self.export_parsed_csv(data)["Assessment"][0][field]
with freezegun.freeze_time("2017-9-10"):
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
(field, value)
]))
self.assertEqual(before_update,
self.export_parsed_csv(data)["Assessment"][0][field])
@ddt.data(
("Last Updated By", "<EMAIL>"),
)
@ddt.unpack
def test_exportable_only_updated_by(self, field, value):
"""Test exportable only "Last Updated By" field"""
slug = "TestAssessment"
with factories.single_commit():
factories.AssessmentFactory(
slug=slug,
modified_by=factories.PersonFactory(email="<EMAIL>"),
)
data = [{
"object_name": "Assessment",
"fields": "all",
"filters": {
"expression": {
"left": "code",
"op": {"name": "="},
"right": slug
},
}
}]
before_update = self.export_parsed_csv(data)["Assessment"][0][field]
self.assertEqual(before_update, "<EMAIL>")
self.import_data(collections.OrderedDict(
[
("object_type", "Assessment"),
("Code*", slug),
(field, value)
]
))
after_update = self.export_parsed_csv(data)["Assessment"][0][field]
self.assertEqual(after_update, "<EMAIL>")
def test_import_last_deprecated_date(self):
"""Last Deprecated Date on assessment should be non editable."""
with factories.single_commit():
with freezegun.freeze_time("2017-01-01"):
assessment = factories.AssessmentFactory(status="Deprecated")
resp = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("code", assessment.slug),
("Last Deprecated Date", "02/02/2017"),
]))
result = all_models.Assessment.query.get(assessment.id)
self.assertEqual(1, len(resp))
self.assertEqual(1, resp[0]["updated"])
self.assertEqual(result.end_date, datetime.date(2017, 1, 1))
@ddt.data(*all_models.Assessment.VALID_STATES)
def test_import_set_up_deprecated(self, start_state):
"""Update assessment from {0} to Deprecated."""
with factories.single_commit():
assessment = factories.AssessmentFactory(status=start_state)
resp = self.import_data(
collections.OrderedDict([
("object_type", "Assessment"),
("code", assessment.slug),
("State", all_models.Assessment.DEPRECATED),
]))
self.assertEqual(1, len(resp))
self.assertEqual(1, resp[0]["updated"])
self.assertEqual(
all_models.Assessment.query.get(assessment.id).status,
all_models.Assessment.DEPRECATED)
def test_asmnt_cads_update_completed(self):
"""Test update of assessment without cads."""
with factories.single_commit():
audit = factories.AuditFactory()
asmnt = factories.AssessmentFactory(audit=audit)
factories.CustomAttributeDefinitionFactory(
title="CAD",
definition_type="assessment",
definition_id=asmnt.id,
attribute_type="Text",
mandatory=True,
)
data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmnt.slug),
("Audit", audit.slug),
("Title", "Test title"),
("State", "Completed"),
("CAD", "Some value"),
])
response = self.import_data(data)
self._check_csv_response(response, {})
def test_import_complete_missing_answers_warnings(self):
"""Test complete assessment with missing mandatory CAD comments."""
with factories.single_commit():
audit = factories.AuditFactory()
asmnt = factories.AssessmentFactory(audit=audit)
factories.CustomAttributeDefinitionFactory(
title="CAD",
definition_type="assessment",
definition_id=asmnt.id,
attribute_type="Dropdown",
multi_choice_options="no,yes",
multi_choice_mandatory="0,1"
)
data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmnt.slug),
("Audit", audit.slug),
("Title", "Test title"),
("State", "Completed"),
("CAD", "yes"),
])
expected_response = {
"Assessment": {
"row_warnings": {
errors.NO_REQUIRED_ANSWERS_WARNING.format(line=3),
}
}
}
response = self.import_data(data)
self._check_csv_response(response, expected_response)
def test_import_asmnt_rev_query_count(self):
"""Test only one revisions insert query should occur while importing."""
with factories.single_commit():
audit = factories.AuditFactory()
asmnt = factories.AssessmentFactory(audit=audit)
cad_names = ("CAD1", "CAD2", "CAD3")
for name in cad_names:
factories.CustomAttributeDefinitionFactory(
title=name,
definition_type="assessment",
definition_id=asmnt.id,
attribute_type="Text",
mandatory=True,
)
data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmnt.slug),
("Audit", audit.slug),
("Title", "Test title"),
("State", "Completed"),
("CAD1", "Some value 1"),
("CAD2", "Some value 2"),
("CAD3", "Some value 3"),
])
with utils.QueryCounter() as counter:
response = self.import_data(data)
self._check_csv_response(response, {})
rev_insert_queries = [query for query in counter.queries
if 'INSERT INTO revisions' in query]
self.assertEqual(len(rev_insert_queries), 1)
def test_asmt_verified_date_update_from_none(self):
"""Test that we able to set Verified Date if it is empty"""
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assessment.slug),
("Verifiers", "<EMAIL>"),
("Verified Date", "01/22/2019"),
]))
self._check_csv_response(response, {})
self.assertEqual(
all_models.Assessment.query.get(assessment.id).verified_date,
datetime.datetime(2019, 1, 22))
def test_asmt_complete_verified(self):
"""Test assessment moved to Complete and Verified state"""
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
slug = assessment.slug
user = all_models.Person.query.first()
assessment.add_person_with_role_name(user, "Verifiers")
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", slug),
("State", "Completed"),
("Verified Date", "01/22/2019"),
]))
self._check_csv_response(response, {})
assmt = all_models.Assessment.query.one()
self.assertTrue(assmt.verified)
self.assertEqual(assmt.status, "Completed")
def test_asmt_verified_date_readonly(self):
"""Test that Verified Date is readonly"""
audit = factories.AuditFactory()
date = datetime.datetime(2019, 05, 22)
assessment = \
factories.AssessmentFactory(audit=audit,
verified_date=date)
expected_warnings = | |
<filename>ate_asc_run.py
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import time
import numpy as np
import torch
import torch.nn.functional as F
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from ate_asc_modeling import BertForSequenceLabeling
from optimization import BertAdam
from tokenization import BertTokenizer
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from ate_asc_features import ATEASCProcessor, convert_examples_to_features, get_labels
from utils import get_logger, get_aspect_chunks, get_polaity_chunks
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
return max(0, (1.0 - x) / (1.0 - warmup))
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def parse_input_parameter():
global logger
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased.")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--task_name", default="ate_asc", type=str, required=False, help="The name of the task to train.")
parser.add_argument("--data_name", default="", type=str, required=False, help="The name of the task to train.")
parser.add_argument("--train_file", default=None, type=str, required=False)
parser.add_argument("--valid_file", default=None, type=str, required=False)
parser.add_argument("--test_file", default=None, type=str, required=False)
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for eval.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--num_thread_reader', type=int, default=0, help='')
parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n")
parser.add_argument("--verbose_logging", default=False, action='store_true',
help="If true, all of the warnings related to data processing will be printed. A number of warnings are expected for a normal CoQA evaluation.")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.");
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument("--use_ghl", action='store_true', help="Whether use weighted cross entropy to decoder.")
parser.add_argument("--use_vat", action='store_true', help="Whether use vat to encoder.")
parser.add_argument("--use_decoder", default=True, type=str2bool, help="Whether use decoder to asc.")
parser.add_argument("--num_decoder_layer", default=2, type=int, help="When `use_decoder' is True, set the number of decoder.")
parser.add_argument("--decoder_shared_layer", default=3, type=int, help="When `use_decoder' is True, set the number of shared encoder.")
args = parser.parse_args()
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" {}: {}".format(key, args.__dict__[key]))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
task_config = {
"use_ghl": args.use_ghl,
"use_vat": args.use_vat,
"num_decoder_layer": args.num_decoder_layer,
"decoder_shared_layer": args.decoder_shared_layer,
}
return args, task_config
def init_device(args):
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
return device, n_gpu
def init_model(args, num_tp_labels, task_config, device, n_gpu):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
if args.num_decoder_layer != 0:
layer_num_list = [layer_num for layer_num in range(args.num_decoder_layer)]
new_model_state_dict = {}
model_state_dict_exsit_keys = model_state_dict.keys()
max_bert_layer = max([int(k_str.split(".")[3]) for k_str in model_state_dict_exsit_keys if "bert.encoder.layer" in k_str])
for k_str in model_state_dict_exsit_keys:
new_model_state_dict[k_str] = model_state_dict[k_str]
for layer_num in layer_num_list:
bert_key_name = "bert.encoder.layer.{}".format(max_bert_layer - args.num_decoder_layer + 1 + layer_num)
mirror_key_name = "bert.encoder.layer.{}".format(layer_num)
if k_str.find(bert_key_name) == 0:
new_key_name = k_str.replace(bert_key_name, mirror_key_name).replace("bert.encoder", "decoder.decoder")
if "attention.self" in new_key_name:
new_key_name_sufx = new_key_name.replace("attention.self", "slf_attn.att")
new_model_state_dict[new_key_name_sufx] = model_state_dict[k_str].clone()
new_key_name_sufx = new_key_name.replace("attention.self", "enc_attn.att")
new_model_state_dict[new_key_name_sufx] = model_state_dict[k_str].clone()
elif "attention.output" in new_key_name:
new_key_name_sufx = new_key_name.replace("attention.output", "slf_attn.output")
new_model_state_dict[new_key_name_sufx] = model_state_dict[k_str].clone()
new_key_name_sufx = new_key_name.replace("attention.output", "enc_attn.output")
new_model_state_dict[new_key_name_sufx] = model_state_dict[k_str].clone()
else:
new_model_state_dict[new_key_name] = model_state_dict[k_str].clone()
if k_str.find("bert.embeddings") == 0:
new_key_name = k_str.replace("bert.embeddings", "decoder.embeddings")
new_model_state_dict[new_key_name] = model_state_dict[k_str].clone()
model_state_dict = new_model_state_dict
else:
model_state_dict = None
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else \
os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
model = BertForSequenceLabeling.from_pretrained(args.bert_model, cache_dir=cache_dir, state_dict=model_state_dict,
num_tp_labels=num_tp_labels, task_config=task_config)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
return model
def prep_optimizer(args, model, num_train_optimization_steps):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
if args.use_decoder:
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "bert." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "bert." not in n]
coef_lr = 10.
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01, 'lr': args.learning_rate * coef_lr},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0, 'lr': args.learning_rate * coef_lr}
]
else:
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
return optimizer
def dataloader_train(args, tokenizer, file_path):
dataset = ATEASCProcessor(file_path=file_path, set_type="train")
logger.info("Loaded train file: {}".format(file_path))
at_labels, as_labels = get_labels(dataset.label_tp_list)
features = convert_examples_to_features(dataset.examples, (at_labels, as_labels),
args.max_seq_length, tokenizer, verbose_logging=args.verbose_logging)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_at_label_ids = torch.tensor([f.at_label_id for f in features], dtype=torch.long)
all_as_label_ids = torch.tensor([f.as_label_id for f in features], dtype=torch.long)
all_label_mask = torch.tensor([f.label_mask for f in features], dtype=torch.long)
all_label_mask_X = torch.tensor([f.label_mask_X for f in features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_at_label_ids, all_as_label_ids,
all_label_mask, all_label_mask_X)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=args.num_thread_reader)
return dataloader, train_data, (at_labels, as_labels)
def dataloader_val(args, tokenizer, file_path, label_tp_list, set_type="val"):
dataset = ATEASCProcessor(file_path=file_path, set_type=set_type)
logger.info("Loaded val file: {}".format(file_path))
eval_features = convert_examples_to_features(dataset.examples, label_tp_list,
args.max_seq_length, tokenizer, verbose_logging=args.verbose_logging)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_at_label_ids = torch.tensor([f.at_label_id for f in eval_features], dtype=torch.long)
all_as_label_ids = torch.tensor([f.as_label_id for f in eval_features], dtype=torch.long)
all_label_mask = torch.tensor([f.label_mask for f in eval_features], dtype=torch.long)
all_label_mask_X = torch.tensor([f.label_mask_X for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_at_label_ids, all_as_label_ids,
all_label_mask, | |
if 'quiet' not in self.params.keys():
RuntimeUtils.info("Tunnel is %s." % (self.is_tunnel_enabled and 'enabled' or 'disabled'), format=self.params.get('output-format', 'text'))
# Register, if requested
if 'activation-keys' in self.params.keys():
self._do_register_at_sm(force=('override' in self.params.keys()))
# Execute some command, if any
if self.params.get('command'):
self._do_command()
# Performing tasks
def _do_register_at_sm(self, force=False):
"""
Register remote node at SUSE Manager.
"""
ssl_certificate = "/srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT" # Point of configuration in a future.
if self.environ.target_os.lower() == 'linux':
# Register remote against SUSE Manager
if self.ssh.execute("rpm -qa | grep sm-client-tools || echo 'absent'") == 'absent':
RuntimeUtils.info('Installing SM Client on target machine', format=self.params.get('output-format', 'text'))
remote_pkg_pth = '/tmp/sm-client-tools.%s.%s.rpm' % (time.time(), random.randint(0xff, 0xffff)) # Temporary unique (hopefully) name on remote filesystem.
local_pkg_pth = "/srv/www/htdocs/pub/bootstrap/sm-client-tools.rpm"
if not os.path.exists(local_pkg_pth):
raise Exception('SUSE Manager Client package does not exists.')
self.ssh.push_file(local_pkg_pth, remote_pkg_pth)
self.ssh.execute('/bin/rpm -ivh %s; rm %s' % (remote_pkg_pth, remote_pkg_pth))
if self.ssh.execute('test -e /usr/bin/sm-client && echo "installed" || echo "failed"') == 'failed':
raise Exception("SM Client installation failed. :-(")
else:
if 'quiet' not in self.params.keys():
RuntimeUtils.info("SM Client has been installed", format=self.params.get('output-format', 'text'))
else:
if 'quiet' not in self.params.keys():
RuntimeUtils.info('SM Client is already installed', format=self.params.get('output-format', 'text'))
# Get SSL certificate fingerprint
ssl_fp = os.popen("/usr/bin/openssl x509 -noout -in %s -fingerprint" % ssl_certificate).read().split('=')[-1].strip()
if not 'quiet' in self.params.keys():
RuntimeUtils.info("SSL certificate: %s" % ssl_fp, format=self.params.get('output-format', 'text'))
# If we need sudo, we need to know it is there
if getpass.getuser() != 'root':
if self.ssh.execute("test -e /usr/bin/sudo && echo 'OK'") != 'OK':
raise Exception("You cannot run anything on \"%s\" as \"%s\" without sudo installed!" % (self.hostname, getpass.getuser()))
# Check if we have permissions
if self.ssh.execute("/usr/bin/sudo -S true < /dev/null &>/dev/null && echo 'OK'") != 'OK':
raise Exception("Not enough privileges for user \"%s\" on \"%s\" node." % (getpass.getuser(), self.hostname))
# Register machine
remote_tmp_logfile = '/tmp/.sm-client-tools.%s.%s.log' % (time.strftime('%Y%m%d.%H%M%S.backup', time.localtime()), random.randint(0xff, 0xffff))
overrides = []
if self.is_tunnel_enabled:
overrides.append('--cfg=noSSLServerURL,http://%s:%s/' % (self.localhostname, self.tunnel.http_port))
overrides.append('--cfg=serverURL,https://%s:%s/XMLRPC' % (self.localhostname, self.tunnel.https_port))
prefix = getpass.getuser() != 'root' and "/usr/bin/sudo -n " or ""
self.ssh.execute("%s/usr/bin/sm-client --output-format=xml --hostname=%s --activation-keys=%s --ssl-fingerprint=%s %s > %s" %
(prefix, self.localhostname, self.params['activation-keys'], ssl_fp, ' '.join(overrides), remote_tmp_logfile))
smc_out = SMClientOutput(self.ssh.execute("test -e %s && /bin/cat %s && rm %s || echo '<?xml version=\"1.0\" encoding=\"UTF-8\"?><log/>'" %
(remote_tmp_logfile, remote_tmp_logfile, remote_tmp_logfile)))
if smc_out.events.get(SMClientOutput.ERROR):
RuntimeUtils.warning("Remote machine was not happy:", format=self.params.get('output-format', 'text'))
for error_message in smc_out.events.get(SMClientOutput.ERROR):
RuntimeUtils.error(error_message, format=self.params.get('output-format', 'text'))
raise Exception("Registration failed. Please login to the %s and find out why." % self.hostname)
elif smc_out.events.get(SMClientOutput.WARNING) and not 'quiet' in self.params.keys():
for warning_message in smc_out.events.get(SMClientOutput.WARNING):
RuntimeUtils.warning(self.hostname + ": " + warning_message, format=self.params.get('output-format', 'text'))
# No success blah-blah-blah here.
else:
# Solaris fans, do it yourself. :-)
raise Exception('I cannot register %s against SUSE Manager as of today.' % self.environ.target_os)
if 'quiet' not in self.params.keys():
RuntimeUtils.info("Remote machine %s has been registered successfully." % self.hostname, format=self.params.get('output-format', 'text'))
def _do_tunneling(self, check_only=False):
"""
Enable or disable tunnel.
"""
if not self.ssh:
raise Exception("SSH link was not initialized.")
# Get content of the /etc/hosts on the remote machine
random.seed()
token = '# <PASSWORD>' % (time.<PASSWORD>(), random.randint(0xff, 0xffff))
etc_hosts = self.ssh.execute("test -e /etc/hosts && cat /etc/hosts || echo '%s'" % token) + ""
self.is_tunnel_enabled = False
if etc_hosts.find(token) > -1:
raise Exception('Tunneling cannot be enabled on this system.')
else:
for line in map(lambda item:item.strip().lower(), etc_hosts.split("\n")):
if not line.startswith('#') and line.find(self.localhostname) > -1:
self.is_tunnel_enabled = True
break
# Setup SSH if tunneling around
if self.is_tunnel_enabled:
self.ssh.set_tunneling(((self.tunnel.http_port, 80, self.localhostname),
(self.tunnel.https_port, 443, self.localhostname),))
# Exit if this is only check/setup
if check_only:
return
# Skip procedure if nothing needed to do.
enable = self.params.get('tunneling', '') == 'yes'
RuntimeUtils.info('%s tunneling on %s node.' % ((enable and 'Enabling' or 'Disabling'), self.hostname),
format=self.params.get('output-format', 'text'))
if enable:
if self.is_tunnel_enabled:
RuntimeUtils.warning('Tunelling on the node "%s" is already enabled.' % self.hostname,
format=self.params.get('output-format', 'text'))
return
else:
if not self.is_tunnel_enabled:
RuntimeUtils.warning('Tunelling on the node "%s" is already disabled.' % self.hostname,
format=self.params.get('output-format', 'text'))
return
self.is_tunnel_enabled = enable
hosts = []
for line in etc_hosts.split("\n"):
if not line.strip().startswith('#'):
if enable and line.lower().find('localhost') + 1:
line = map(lambda item:item.strip(), filter(None, line.split(' ')))
line.append(self.localhostname)
line = ' '.join(line)
else:
line = ' '.join(filter(None, line.replace(self.localhostname, '').split(' '))).strip()
hosts.append(line)
etc_hosts = '\n'.join(hosts)
# Save to tempfile
tmpfd, tmppth = tempfile.mkstemp(prefix='sm-push-hosts-%s-' % self.hostname)
tmpfh = os.fdopen(tmpfd, "w")
tmpfh.write(etc_hosts + "\n")
tmpfh.close()
# Push the file to the remote
remote_hosts_pth = '/tmp/.sm-push-hosts-%s.%s' % (time.time(), random.randint(0xff, 0xffff))
self.ssh.push_file(tmppth, remote_hosts_pth)
# Push failed?
if (self.ssh.execute("test -e %s && echo 'OK' || echo '%s'" % (remote_hosts_pth, token)) + "").strip() != 'OK':
raise Exception('Unable to send new configuration to "%s" node.' % self.hostname)
# Replace remote file
if 'safe' in self.params.keys():
backup_suffix = time.strftime('%Y%m%d.%H%M%S.backup', time.localtime())
res = self.ssh.execute('mv /etc/hosts /etc/hosts.%s' % backup_suffix)
if res:
RuntimeUtils.error(res, format=self.params.get('output-format', 'text'))
self._cleanup(tmppth)
raise Exception('Remote node error.')
if not 'quiet' in self.params.keys():
RuntimeUtils.info('Previous file "/etc/hosts" has been saved as "/etc/hosts.%s"' % backup_suffix,
format=self.params.get('output-format', 'text'))
res = self.ssh.execute('mv %s /etc/hosts; chmod 0644 /etc/hosts' % remote_hosts_pth)
if res:
RuntimeUtils.error(res, format=self.params.get('output-format', 'text'))
self._cleanup(tmppth)
raise Exception('Remote node error.')
# Restart DNS cache
self._restart_dns_cache()
# Enable or disable 3rd party services
self._enable_services(not enable)
def _enable_services(self, enable):
"""
Enable or disable various 3rd party services that should not run when SSH tunneling is around.
"""
if self.environ.target_os.lower() == 'linux':
for service_name, service_exec in [('OSAD client-side', '/etc/init.d/osad'),
('Red Hat Network update query', '/etc/init.d/rhnsd'),]:
if self.ssh.execute('test -e %s && %s %s;chkconfig %s %s || echo "absent"' %(service_exec,
service_exec,
(enable and 'start' or 'stop'),
(enable and '-a' or '-d'),
service_exec.split('/')[-1])) != 'absent':
RuntimeUtils.info('%s %s service' % ((enable and 'Enabling' or 'Stopping'), service_name),
format=self.params.get('output-format', 'text'))
else:
RuntimeUtils.warning('Additional service operations are not supported for %s on %s.' % (self.environ.target_os, self.environ.target_arch),
format=self.params.get('output-format', 'text'))
def _restart_dns_cache(self):
"""
Restart DNS cache.
On Linux it is nscd.
"""
if self.environ.target_os.lower() == 'linux':
if self.ssh.execute("test -e /etc/init.d/nscd && echo 'exists' || echo 'absent'") == 'exists':
RuntimeUtils.info('Restarting name service cache daemon on remote node.',
format=self.params.get('output-format', 'text'))
self.ssh.execute('/etc/init.d/nscd')
else:
RuntimeUtils.warning('DNS cache operations are not supported for %s on %s.' % (self.environ.target_os, self.environ.target_arch),
format=self.params.get('output-format', 'text'))
def _cleanup(self, *fpth):
"""
Cleanup all given file paths.
"""
for fp in fpth:
if os.path.exists(fp):
try:
os.unlink(fp)
except Exception, ex:
RuntimeUtils.warning('Could not remove local temporary file "%s"' % fp,
format=self.params.get('output-format', 'text'))
RuntimeUtils.error(str(ex), format=self.params.get('output-format', 'text'))
def _do_command(self):
"""
Execute a custom command on the remote machine.
"""
if not self.ssh:
raise Exception("SSH link was not initialized.")
if not 'quiet' in self.params.keys():
RuntimeUtils.info('Executing command: "' + self.params.get('command') + '"',
format=self.params.get('output-format', 'text'))
RuntimeUtils.info('Remote response below as follows:',
format=self.params.get('output-format', 'text'))
response = self.ssh.execute(self.params.get('command'))
# Output "frame" only during verbose mode (default)
if not 'quiet' in self.params.keys() and self.params.get('output-format', 'text') == 'text':
print >> sys.stdout, "-" * 80
if self.params.get('output-format', 'text') == 'xml':
RuntimeUtils.info(response or "", format='xml')
else:
print >> sys.stdout, response
if not 'quiet' in self.params.keys() and self.params.get('output-format', 'text') == 'text':
print >> sys.stdout, "-" * 80
class RuntimeUtils:
"""
All 'orphan' functions are here. :)
"""
@classmethod
def is_root(self):
"""
Returns True if user is root.
"""
return getpass.getuser() == 'root'
@classmethod
def get_event_time(self):
"""
Format a time for an event, usually used in XML messages.
"""
return time.strftime('%Y.%m.%d %T', time.localtime())
@classmethod
def header(self):
"""
Displays header.
"""
print >> sys.stdout, "SUSE Manager Task Push. Version 0.1\n" \
+ "Copyright (c) 2013 by SUSE Linux Products GmbH\n"
@classmethod
def usage(self):
"""
Displays usage and exits.
"""
print >> sys.stderr, "Usage:\n\tsm-push <options>\n"
print >> sys.stderr, "Options:"
print >> sys.stderr, "\t--hostname=<DNS name>\t\tSpecify target hostname."
print >> sys.stderr, "\t--activation-keys=<list>\tComma separated list of activation keys.\n" \
+ "\t\t\t\t\tIf parameter specified, machine will be registered against SUSE Manager."
print >> sys.stderr, "\t--override\t\t\tIgnore conditional request of an operation and always perform it."
print >> sys.stderr, "\t--command=\"<command>\"\t\tCustom command to be executed on the target machine.\n" \
+ "\t\t\t\t\tPlease escape quote and/or double-quote inside, if required."
print >> sys.stderr, "\t--tunneling=<yes|no>\t\tEnable or disable tunneling."
print >> sys.stderr, "\t--output-format=<xml|text>\tOutput format. Default is \"text\"."
print >> sys.stderr, "\t--safe\t\t\t\tMake a backup copy of previous configuration."
print >> sys.stderr, "\t--quiet\t\t\t\tProduce no output at all except occurred errors and command result."
print >> sys.stderr, "\t--help\t\t\t\tDisplays this message.\n\n"
print >> sys.stderr, "Environment variables:"
print | |
token_pedido_ssr = ""
def ssr_estoque():
return{
"cep_destino": "07929050",
"url": "https://ssr.e-peca.com.br/checkout/pagamento",
"produtos": [
{
"id": "78",
"id_produto_variacao": "78",
"preco_final": 78.99,
"quantidade": 2,
"codigosoriginais": [
"43957-86E",
"43957-86C",
"43957-86B",
"43957-86D",
"43957-86A",
"43957-86"
]
},
{
"id": "101",
"id_produto_variacao": "101",
"preco_final": 220.89,
"quantidade": 1,
"codigosoriginais": [
"46127-04"
]
}
]
}
def ssr_verifica_frete_1():
return{
"cep_destino": "07929050",
"url": "https://ssr.e-peca.com.br/checkout/carrinho",
"produtos": [
{
"id": "101",
"id_produto_variacao": "101",
"preco_final": 220.89,
"quantidade": 1,
"codigosoriginais": [
"46127-04"
]
}
]
}
def ssr_pedido_valido_visa_1():
return{
"cliente": {
"id_identificador": "45368311850",
"email": "<EMAIL>",
"nome_razao": "<NAME>",
"endereco_cobranca": {
"uf": "SP",
"cep": "07929050",
"nro": "159",
"obs": None,
"bairro": "Jardim",
"cidade": "Francisco Morato",
"logradouro": "Rua das Palmeiras",
"complemento": "Próximo a escola"
},
"endereco_entrega": {
"uf": "SP",
"cep": "07929050",
"nro": "159",
"obs": None,
"bairro": "Jardim",
"cidade": "Francisco Morato",
"logradouro": "Rua das Palmeiras",
"complemento": "Próximo a escola"
}
},
"forma_pagamento": {
"id": 1,
"nome": "Visa Crédito",
"n_parcelas": 1,
"detalhes_pagamento": {
"hash_cartao": "c8773dbc4625366b5d3a499298feeadf939c86aefa42e4472ae8dffba70027f3b7304c77c25452274e8d3d09a376b9841997d99a8c0a1e76af1a5c77a79ee4dd",
"pagador": {
"nome": "<NAME>",
"identificador": "45368311850",
"dt_nascimento": "29/09/1994",
"telefone": {
"ddd": "11",
"numero": "994757788"
}
}
}
},
"cupom_desconto": {
"nome": "",
"valor_total_produtos": 220.89
},
"origens": [
{
"cep_origem": "80740000",
"cep_destino": "07929050",
"url": "",
"produtos": [
{
"id": "101",
"id_produto_variacao": 101,
"preco_final": 220.89,
"quantidade": 1,
"codigosoriginais": [
"46127-04"
],
"subgrupo":"Mesas de Garfo",
"segmento":"Motocicleta",
"descricao":"Mesa de Garfo",
"largura":20.5,
"altura":25,
"profundidade":19,
"peso":2.01,
"foto":"101/360/Lv1/img01.jpg",
"INFO_BOUTIQUE":True,
"id_produto":101,
"id_produto_pai":101,
"id_loja":4,
"id_empresa":4,
"pn_produto":"46127-04",
"preco":220.89,
"preco_desconto":220.89,
"preco_original":220.89,
"desconto":0,
"estoque":3,
"estoque_minimo":0,
"info_boutique":True,
"info_gerais":{
"segmento": "Motocicleta",
"foto": "101/360/Lv1/img01.jpg",
"largura": 20.5,
"peso": 2.01,
"altura": 25,
"subgrupo": "Mesas de Garfo",
"tamanho_unico": "",
"profundidade": 19,
"descricao": "Mesa de Garfo"
},
"categoria": [
],
"cep_origem":"80740000",
"vendido_por":"The One Harley-Davidson - Curitiba/PR",
"alias":"PR-CWB-01",
"status":"ATIVO",
"vitrine":{
"peso": 2.01,
"altura": 25,
"largura": 20.5,
"produto": 101,
"descricao": "Mesa de Garfo",
"profundidade": 19,
"segmento": "Motocicleta",
"foto": "101/360/Lv1/img01.jpg",
"subgrupo": "Mesas de Garfo"
},
"data_atualizacao": ""
}
],
"embalagem": [
{
"id": 2,
"nome": "<NAME>",
"largura": 55,
"altura": 36,
"profundidade": 28,
"peso_max_embalagem": 10,
"peso_max": 10,
"peso_itens": 2.111,
"volume_embalagem": 55440,
"volume_max": 55440,
"volume_itens": 9737.5,
"volume_utilizado": 9737.5,
"itens_embalagem": 1,
"itens_volume": 1,
"peso_utilizado": 21.11,
"peso_utilizado_percentual": 21.11,
"volume_utilizado_percentual": 17.56,
"produtos": [
{
"id": "101",
"id_produto_variacao": "101",
"segmento": "Motocicleta",
"subgrupo": "Mesas de Garfo",
"descricao": "Mesa de Garfo",
"codigosoriginais": [
"46127-04"
],
"foto":"101/360/Lv1/img01.jpg",
"vendido_por":"<NAME> - Curitiba/PR",
"id_loja":4,
"cep_origem":"80740000",
"INFO_BOUTIQUE":True,
"quantidade":1,
"preco_final":220.89,
"estoque":3,
"largura":20.5,
"altura":25,
"profundidade":19,
"peso":2.01,
"volume":9737.5,
"embalagem":[
],
"posicao":[
"altura",
"largura",
"profundidade"
]
}
],
"unidade":"cm cubicos",
"preco_final":220.89
}
],
"vendido_por":"<NAME>-Davidson - Curitiba/PR",
"frete":[
{
"empresa": "Correios",
"entrega_metodo_tipo": "EXPRESS",
"entrega_metodo_nome": "Correios Sedex",
"entrega_metodo_id": "2",
"entrega_custo_final": 100.47,
"entrega_estimada_dias": 6
}
]
}
]
}
def ssr_calculo_frete_1():
return{
"cliente": {
"id_identificador": "45368311850",
"email": "<EMAIL>",
"nome_razao": "<NAME>",
"endereco_cobranca": {
"uf": "SP",
"cep": "07929050",
"nro": "159",
"obs": None,
"bairro": "Jardim",
"cidade": "Francisco Morato",
"logradouro": "Rua das Palmeiras",
"complemento": "Próximo a escola"
},
"endereco_entrega": {
"uf": "SP",
"cep": "07929050",
"nro": "159",
"obs": None,
"bairro": "Jardim",
"cidade": "<NAME>",
"logradouro": "Rua das Palmeiras",
"complemento": "Próximo a escola"
}
},
"cupom_desconto": {
"nome": "",
"valor_total_produtos": 220.89
},
"origens": [
{
"cep_origem": "80740000",
"cep_destino": "07929050",
"url": "",
"produtos": [
{
"id": "101",
"id_produto_variacao": 101,
"preco_final": 220.89,
"quantidade": 1,
"codigosoriginais": [
"46127-04"
],
"subgrupo":"Mesas de Garfo",
"segmento":"Motocicleta",
"descricao":"Mesa de Garfo",
"largura":20.5,
"altura":25,
"profundidade":19,
"peso":2.01,
"foto":"101/360/Lv1/img01.jpg",
"INFO_BOUTIQUE":True,
"id_produto":101,
"id_produto_pai":101,
"id_loja":4,
"id_empresa":4,
"pn_produto":"46127-04",
"preco":220.89,
"preco_desconto":220.89,
"preco_original":220.89,
"desconto":0,
"estoque":3,
"estoque_minimo":0,
"info_boutique":True,
"info_gerais":{
"segmento": "Motocicleta",
"foto": "101/360/Lv1/img01.jpg",
"largura": 20.5,
"peso": 2.01,
"altura": 25,
"subgrupo": "Mesas de Garfo",
"tamanho_unico": "",
"profundidade": 19,
"descricao": "Mesa de Garfo"
},
"categoria": [
],
"cep_origem":"80740000",
"vendido_por":"<NAME> - Curitiba/PR",
"alias":"PR-CWB-01",
"status":"ATIVO",
"vitrine":{
"peso": 2.01,
"altura": 25,
"largura": 20.5,
"produto": 101,
"descricao": "Mesa de Garfo",
"profundidade": 19,
"segmento": "Motocicleta",
"foto": "101/360/Lv1/img01.jpg",
"subgrupo": "Mesas de Garfo"
},
"data_atualizacao": ""
}
],
"embalagem": [
{
"id": 2,
"nome": "<NAME>",
"largura": 55,
"altura": 36,
"profundidade": 28,
"peso_max_embalagem": 10,
"peso_max": 10,
"peso_itens": 2.111,
"volume_embalagem": 55440,
"volume_max": 55440,
"volume_itens": 9737.5,
"volume_utilizado": 9737.5,
"itens_embalagem": 1,
"itens_volume": 1,
"peso_utilizado": 21.11,
"peso_utilizado_percentual": 21.11,
"volume_utilizado_percentual": 17.56,
"produtos": [
{
"id": "101",
"id_produto_variacao": "101",
"segmento": "Motocicleta",
"subgrupo": "Mesas de Garfo",
"descricao": "<NAME>",
"codigosoriginais": [
"46127-04"
],
"foto":"101/360/Lv1/img01.jpg",
"vendido_por":"The One Harley-Davidson - Curitiba/PR",
"id_loja":4,
"cep_origem":"80740000",
"INFO_BOUTIQUE":True,
"quantidade":1,
"preco_final":220.89,
"estoque":3,
"largura":20.5,
"altura":25,
"profundidade":19,
"peso":2.01,
"volume":9737.5,
"embalagem":[
],
"posicao":[
"altura",
"largura",
"profundidade"
]
}
],
"unidade":"cm cubicos",
"preco_final":220.89
}
],
"vendido_por":"The One Harley-Davidson - Curitiba/PR",
"frete":[
{
"empresa": "Correios",
"entrega_metodo_tipo": "EXPRESS",
"entrega_metodo_nome": "Correios Sedex",
"entrega_metodo_id": "2",
"entrega_custo_final": 100.47,
"entrega_estimada_dias": 6
}
]
}
]
}
def ssr_calculo_frete_2():
return{
"cliente": {
"id_identificador": "45368311850",
"email": "<EMAIL>",
"nome_razao": "<NAME>",
"endereco_cobranca": {
"uf": "SP",
"cep": "07929050",
"nro": "159",
"obs": None,
"bairro": "Jardim",
"cidade": "<NAME>",
"logradouro": "Rua das Palmeiras",
"complemento": "Próximo a escola"
},
"endereco_entrega": {
"uf": "SP",
"cep": "07929050",
"nro": "159",
"obs": None,
"bairro": "Jardim",
"cidade": "<NAME>",
"logradouro": "Rua das Palmeiras",
"complemento": "Próximo a escola"
}
},
"cupom_desconto": {
"nome": "",
"valor_total_produtos": 119.25
},
"origens": [
{
"cep_origem": "80740000",
"cep_destino": "07929050",
"url": "",
"produtos": [
{
"id": "924",
"id_produto_variacao": 924,
"preco_final": 119.25,
"quantidade": 1,
"codigosoriginais": [
"67700043",
"67700043A",
"67700145",
"67700145A"
],
"subgrupo":"Faróis",
"segmento":"Motocicleta",
"descricao":"Farol",
"largura":19,
"altura":30,
"profundidade":11.5,
"peso":1.1,
"foto":"924/360/Lv1/img01.jpg",
"INFO_BOUTIQUE":True,
"id_produto":924,
"id_produto_pai":924,
"id_loja":4,
"id_empresa":4,
"pn_produto":"67700043",
"preco":119.25,
"preco_desconto":119.25,
"preco_original":119.25,
"desconto":0,
"estoque":5,
"estoque_minimo":0,
"info_boutique":True,
"info_gerais":{
"segmento": "Motocicleta",
"foto": "924/360/Lv1/img01.jpg",
"largura": 19,
"peso": 1.1,
"altura": 30,
"subgrupo": "Faróis",
"tamanho_unico": "",
"profundidade": 11.5,
"descricao": "Farol"
},
"categoria": [
],
"cep_origem":"80740000",
"vendido_por":"<NAME> - Curitiba/PR",
"alias":"PR-CWB-01",
"status":"ATIVO",
"vitrine":{
"peso": 1.1,
"altura": 30,
"largura": 19,
"produto": 924,
"descricao": "Farol",
"profundidade": 11.5,
"segmento": "Motocicleta",
"foto": "924/360/Lv1/img01.jpg",
"subgrupo": "Faróis"
},
"data_atualizacao": ""
}
],
"embalagem": [
{
"id": 2,
"nome": "<NAME>",
"largura": 55,
"altura": 36,
"profundidade": 28,
"peso_max_embalagem": 10,
"peso_max": 10,
"peso_itens": 1.155,
"volume_embalagem": 55440,
"volume_max": 55440,
"volume_itens": 6555,
"volume_utilizado": 6555,
"itens_embalagem": 1,
"itens_volume": 1,
"peso_utilizado": 11.55,
"peso_utilizado_percentual": 11.55,
"volume_utilizado_percentual": 11.82,
"produtos": [
{
"id": "924",
"id_produto_variacao": "924",
"segmento": "Motocicleta",
"subgrupo": "Faróis",
"descricao": "Farol",
"codigosoriginais": [
"67700043",
"67700043A",
"67700145",
"67700145A"
],
"foto":"924/360/Lv1/img01.jpg",
"vendido_por":"<NAME> - Curitiba/PR",
"id_loja":4,
"cep_origem":"80740000",
"INFO_BOUTIQUE":True,
"quantidade":1,
"preco_final":119.25,
"estoque":5,
"largura":19,
"altura":30,
"profundidade":11.5,
"peso":1.1,
"volume":6555,
"embalagem":[
],
"posicao":[
"altura",
"largura",
"profundidade"
]
}
],
"unidade":"cm cubicos",
"preco_final":119.25
}
],
"vendido_por":"<NAME> - Curitiba/PR",
"frete":[
{
"empresa": "Retirada Na Loja",
"entrega_custo_final": 0,
"entrega_estimada_dias": 1,
"entrega_metodo_id": 195,
"entrega_metodo_nome": "Retirada na Loja",
"entrega_metodo_tipo": "PICKUP"
}
]
}
]
}
def ssr_verifica_frete_2():
return{
"cep_destino": "07929050",
"url": "https://ssr.e-peca.com.br/checkout/pagamento",
"produtos": [
{
"id": "924",
"id_produto_variacao": "924",
"preco_final": 119.25,
"quantidade": 1,
"codigosoriginais": [
"67700043",
"67700043A",
"67700145",
"67700145A"
]
}
]
}
def ssr_pedido_valido_master_card_1():
return{
"cliente": {
"id_identificador": "45368311850",
"email": "<EMAIL>",
"nome_razao": "<NAME>",
"endereco_cobranca": {
"uf": "SP",
"cep": "07929050",
"nro": "159",
"obs": None,
"bairro": "Jardim",
"cidade": "Francisco Morato",
"logradouro": "Rua das Palmeiras",
"complemento": "Próximo a escola"
},
"endereco_entrega": {
"uf": "SP",
"cep": "07929050",
"nro": "159",
"obs": None,
"bairro": "Jardim",
"cidade": "Francisco Morato",
"logradouro": "Rua das Palmeiras",
"complemento": "Próximo a escola"
}
},
"forma_pagamento": {
"id": 7,
"nome": "Mastercard Crédito",
"n_parcelas": 1,
"detalhes_pagamento": {
"hash_cartao": "9bbcb515c1bf24fb4b27f7a89a65d2dff89d6a6aeeb82a1fdce84bafa7fc688b791ffb2bc1fb6f38d4d5331f75ddd0008b0b4af76923f112cc7a47f6a9fcfc0d",
"pagador": {
"nome": "<NAME>",
"identificador": "45368311850",
"dt_nascimento": "29/09/1994",
"telefone": {
"ddd": "11",
"numero": "994757788"
}
}
}
},
"cupom_desconto": {
"nome": "",
"valor_total_produtos": 119.25
},
"origens": [
{
"cep_origem": "80740000",
"cep_destino": "07929050",
"url": "",
"produtos": [
{
"id": "924",
"id_produto_variacao": 924,
"preco_final": 119.25,
"quantidade": 1,
"codigosoriginais": [
"67700043",
"67700043A",
"67700145",
"67700145A"
],
"subgrupo":"Faróis",
"segmento":"Motocicleta",
"descricao":"Farol",
"largura":19,
"altura":30,
"profundidade":11.5,
"peso":1.1,
"foto":"924/360/Lv1/img01.jpg",
"INFO_BOUTIQUE":True,
"id_produto":924,
"id_produto_pai":924,
"id_loja":4,
"id_empresa":4,
"pn_produto":"67700043",
"preco":119.25,
"preco_desconto":119.25,
"preco_original":119.25,
"desconto":0,
"estoque":5,
"estoque_minimo":0,
"info_boutique":True,
"info_gerais":{
"segmento": "Motocicleta",
"foto": "924/360/Lv1/img01.jpg",
"largura": 19,
"peso": 1.1,
"altura": 30,
"subgrupo": "Faróis",
"tamanho_unico": "",
"profundidade": 11.5,
"descricao": "Farol"
},
"categoria": [
],
"cep_origem":"80740000",
"vendido_por":"<NAME> - Curitiba/PR",
"alias":"PR-CWB-01",
"status":"ATIVO",
"vitrine":{
"peso": 1.1,
"altura": 30,
"largura": 19,
"produto": 924,
"descricao": "Farol",
"profundidade": 11.5,
"segmento": "Motocicleta",
"foto": "924/360/Lv1/img01.jpg",
"subgrupo": "Faróis"
},
"data_atualizacao": ""
}
],
"embalagem": [
{
"id": 2,
"nome": "<NAME>",
"largura": 55,
"altura": 36,
"profundidade": 28,
"peso_max_embalagem": 10,
"peso_max": 10,
"peso_itens": 1.155,
"volume_embalagem": 55440,
"volume_max": 55440,
"volume_itens": 6555,
"volume_utilizado": 6555,
"itens_embalagem": 1,
"itens_volume": 1,
"peso_utilizado": 11.55,
"peso_utilizado_percentual": 11.55,
"volume_utilizado_percentual": 11.82,
"produtos": [
{
"id": "924",
"id_produto_variacao": "924",
"segmento": "Motocicleta",
"subgrupo": "Faróis",
"descricao": "Farol",
"codigosoriginais": [
"67700043",
"67700043A",
"67700145",
"67700145A"
],
"foto":"924/360/Lv1/img01.jpg",
"vendido_por":"The One Harley-Davidson - Curitiba/PR",
"id_loja":4,
"cep_origem":"80740000",
"INFO_BOUTIQUE":True,
"quantidade":1,
"preco_final":119.25,
"estoque":5,
"largura":19,
"altura":30,
"profundidade":11.5,
"peso":1.1,
"volume":6555,
"embalagem":[
],
"posicao":[
"altura",
"largura",
"profundidade"
]
}
],
"unidade":"cm cubicos",
"preco_final":119.25
}
],
"vendido_por":"The One Harley-Davidson - Curitiba/PR",
"frete":[
{
"empresa": "Retirada Na Loja",
"entrega_custo_final": 0,
"entrega_estimada_dias": | |
ConfigurationException(
'Expected training_main_memory_chunksize > training_chunk')
if self.guessing_secondary_training:
if ((not self.secondary_training) or
(not self.secondary_training_save_freqs)):
raise ConfigurationException(
'Expected secondary_training and secondary_training_save_freqs')
if self.sequence_model != Sequence.MANY_TO_MANY and\
self.sequence_model != Sequence.MANY_TO_ONE:
raise ConfigurationException(
"Configuration parameter 'sequence_model' can only be "
"'many_to_many' or 'many_to_one'")
def as_dict(self):
answer = dict(vars(ModelDefaults).copy())
answer.update(self.adict)
return {
key: value for key, value in answer.items() if (
key[0] != '_' and not hasattr(value, '__call__')
and not isinstance(value, staticmethod))}
def set_intermediate_info(self, key, value):
self._intermediate_data[key] = value
self._write_intermediate_data()
def get_intermediate_info(self, key):
self._check_if_should_reload()
try:
return self._intermediate_data[key]
except KeyError as e:
logging.error('Cannot find intermediate data %s. Looking in %s',
str(e), self.intermediate_fname)
raise
def override_from_commandline(self, cmdline):
answer = {}
for keyval in cmdline.split(';'):
if not keyval:
continue
key, _, value = keyval.partition('=')
answer[key] = type(getattr(self, key))(value)
self.adict.update(answer)
def sequence_model_updates(self):
if self.sequence_model == Sequence.MANY_TO_MANY:
self.char_bag += PASSWORD_START
class BasePreprocessor():
def __init__(self, config=ModelDefaults()):
self.config = config
def begin(self, pwd_list):
raise NotImplementedError()
def begin_resetable(self, resetable):
self.begin(resetable.create_new())
def next_chunk(self):
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
def stats(self):
self.reset()
x_vec, _, _ = self.next_chunk()
count_instances = 0
while len(x_vec) != 0:
count_instances += len(x_vec)
x_vec, _, _ = self.next_chunk()
logging.info('Number of training instances %s', count_instances)
return count_instances
@staticmethod
def fromConfig(config):
if config.sequence_model == Sequence.MANY_TO_MANY:
return ManyToManyPreprocessor(config)
if config.sequence_model == Sequence.MANY_TO_ONE:
return Preprocessor(config)
raise ValueError('unknown sequence model: %s' % config.sequence_model)
class Preprocessor(BasePreprocessor):
def __init__(self, config=ModelDefaults()):
super().__init__(config)
self.chunk = 0
self.resetable_pwd_list = None
self.pwd_whole_list = None
self.pwd_freqs = None
self.chunked_pwd_list = None
def begin(self, pwd_list):
self.pwd_whole_list = list(pwd_list)
def begin_resetable(self, resetable):
self.resetable_pwd_list = resetable
self.reset()
def all_prefixes(self, pwd):
return [pwd[:i] for i in range(len(pwd))] + [pwd]
def all_suffixes(self, pwd):
return [pwd[i] for i in range(len(pwd))] + [PASSWORD_END]
def repeat_weight(self, pwd):
return [self.password_weight(pwd) for _ in range(len(pwd) + 1)]
def train_from_pwds(self, pwd_tuples):
self.pwd_freqs = dict(pwd_tuples)
pwds = list(map(lambda x: x[0], pwd_tuples))
return (itertools.chain.from_iterable(map(self.all_prefixes, pwds)),
itertools.chain.from_iterable(map(self.all_suffixes, pwds)),
itertools.chain.from_iterable(map(self.repeat_weight, pwds)))
def next_chunk(self):
if self.chunk * self.config.training_chunk >= len(self.pwd_whole_list):
if self.resetable_pwd_list is None:
return [], [], []
try:
new_iterator = self.chunked_pwd_list.__next__()
except StopIteration:
return [], [], []
self.begin(new_iterator)
self.reset_subiterator()
return self.next_chunk()
pwd_list = self.pwd_whole_list[
self.chunk * self.config.training_chunk:
min((self.chunk + 1) * self.config.training_chunk,
len(self.pwd_whole_list))]
self.chunk += 1
pwd_input, output, weight = self.train_from_pwds(pwd_list)
return (list(pwd_input), list(output), list(weight))
def password_weight(self, pwd):
if isinstance(pwd, tuple):
pwd = ''.join(pwd)
if pwd in self.pwd_freqs:
return self.pwd_freqs[pwd]
assert False, 'Cannot find frequency for password'
return 0.0
def reset(self):
if self.resetable_pwd_list is None:
self.reset_subiterator()
return
self.chunked_pwd_list = iter(grouper(
self.resetable_pwd_list.as_iterator(),
self.config.training_main_memory_chunksize))
try:
self.begin(self.chunked_pwd_list.__next__())
except StopIteration:
logging.warning('Password list has no passwords?')
self.pwd_whole_list = []
self.reset_subiterator()
def reset_subiterator(self):
self.chunk = 0
if self.config.randomize_training_order:
random.shuffle(self.pwd_whole_list)
class Trainer():
def __init__(self, pwd_list, config=ModelDefaults(), multi_gpu=1):
self.config = config
self.chunk = 0
self.generation = 0
self.model = None
self.model_to_save = None
self.multi_gpu = multi_gpu
self.pwd_list = pwd_list
self.cumulative_chunks = 0
self.min_loss_early_stopping = float("inf")
self.poor_batches_early_stopping = 0
self.smoothened_loss = collections.deque(maxlen=int(config.chunk_print_interval))
if config.tensorboard:
self.callback = TensorBoard(config.tensorboard_dir)
self.train_log_names = []
self.test_log_names = []
else:
self.callback = None
self.train_log_names = None
self.test_log_names = None
self.ctable = CharacterTable.fromConfig(self.config)
self.feature_layers = []
self.classification_layers = []
def next_train_set_as_np(self):
x_strs, y_str_list, weight_list = self.pwd_list.next_chunk()
x_vec = self.prepare_x_data(x_strs)
y_vec = self.prepare_y_data(y_str_list)
weight_vec = np.zeros((len(weight_list)))
for i, weight in enumerate(weight_list):
weight_vec[i] = weight
return shuffle(x_vec, y_vec, weight_vec)
def prepare_x_data(self, x_strs):
return self.ctable.encode_many(x_strs)
def prepare_y_data(self, y_str_list):
y_vec = np.zeros((len(y_str_list), self.ctable.vocab_size),
dtype=np.bool)
self.ctable.y_encode_into(y_vec, y_str_list)
return y_vec
def _make_layer(self, **kwargs):
recurrent_train_backwards = self.config.train_backwards
model_type = self.config.model_type
hidden_size = self.config.hidden_size
if model_type == 'GRU':
return recurrent.GRU(
hidden_size,
return_sequences=True,
go_backwards=recurrent_train_backwards,
**kwargs)
if model_type == 'LSTM':
return recurrent.LSTM(
hidden_size,
return_sequences=True,
go_backwards=recurrent_train_backwards,
**kwargs)
if model_type == 'Conv1D':
return Conv1D(
hidden_size,
self.config.convolutional_kernel_size,
**kwargs)
raise ConfigurationException('Unknown model_type: %s' % model_type)
def _return_model(self):
model = Sequential()
# Add the first input layer. If embedding is enabled, we add a different
# layer which does not have the input_shape defined
if self.config.embedding_layer:
self.feature_layers.append(
Embedding(
self.ctable.vocab_size,
self.config.embedding_size,
input_length=self.config.context_length))
self.feature_layers.append(self._make_layer())
else:
self.feature_layers.append(
self._make_layer(
input_shape=(
self.config.context_length, self.ctable.vocab_size)))
# Add the main model layers. These layers will not be trainable during
# secondary training.
for _ in range(self.config.layers):
if self.config.dropouts:
self.feature_layers.append(Dropout(self.config.dropout_ratio))
self.feature_layers.append(self._make_layer())
self.feature_layers.append(Flatten())
# Add any additional classification layers. These layers may be
# trainable during secondary training.
for _ in range(self.config.dense_layers):
self.classification_layers.append(
Dense(self.config.dense_hidden_size))
# Append the final layer which has the correct dimensions for the output
self.classification_layers.append(
Dense(self.ctable.vocab_size, activation='softmax'))
# Actually build the model
for layer in self.feature_layers + self.classification_layers:
try:
model.add(layer)
except Exception as e:
logging.error('Error when adding layer %s: %s', layer, e)
raise
return model
def build_model(self, model=None):
if self.multi_gpu >= 2:
with tf.device('/cpu:0'):
if model is None:
model = self._return_model()
self.model_to_save = model
model = keras.utils.multi_gpu_model(model, gpus=self.multi_gpu)
else:
if model is None:
model = self._return_model()
self.model_to_save = model
metrics = ['accuracy']
if self.config.tensorboard:
tensorboard_metrics = ['loss'] + metrics
self.train_log_names = ['train_' + name for name in tensorboard_metrics]
self.test_log_names = ['test_' + name for name in tensorboard_metrics]
model.compile(loss='categorical_crossentropy',
optimizer=self.config.model_optimizer,
metrics=metrics)
self.model = model
def init_layers(self):
assert self.model is not None
assert len(self.classification_layers) == 0
assert len(self.feature_layers) == 0
for layer in self.model.layers:
if isinstance(layer, (TimeDistributed, Activation, Dense)):
self.classification_layers.append(layer)
else:
self.feature_layers.append(layer)
def train_model(self, serializer):
prev_accuracy = 0
max_accuracy = 0
if self.config.tensorboard:
self.callback.set_model(self.model)
for gen in range(self.config.generations):
self.generation = gen + 1
logging.info('Generation %d', gen + 1)
accuracy, early_stop = self.train_model_generation(serializer)
logging.info('Generation accuracy: %s', accuracy)
if early_stop:
break
if not self.config.early_stopping and \
(accuracy > max_accuracy or self.config.save_always):
max_accuracy = accuracy
serializer.save_model(self.model_to_save)
if ((accuracy - prev_accuracy) <
self.config.training_accuracy_threshold):
logging.info('Accuracy diff of %s is less than threshold.',
accuracy - prev_accuracy)
break
prev_accuracy = accuracy
def test_set(self, x_all, y_all, w_all):
split_at = len(x_all) - max(
int(len(x_all) / self.config.train_test_ratio), 1)
x_train = x_all[0:split_at, :]
x_val = x_all[split_at:, :]
y_train, y_val = (y_all[:split_at], y_all[split_at:])
w_train, w_val = (w_all[:split_at], w_all[split_at:])
return x_train, x_val, y_train, y_val, w_train, w_val
def training_step(self, x_all, y_all, w_all):
x_train, x_val, y_train, y_val, w_train, w_val = self.test_set(
x_all, y_all, w_all)
train_loss, train_accuracy = self.model.train_on_batch(
x_train, y_train, sample_weight=w_train)
test_loss, test_accuracy = self.model.test_on_batch(
x_val, y_val, sample_weight=w_val)
return (train_loss, train_accuracy, test_loss, test_accuracy)
def early_stopping(self, smooth_loss, serializer):
stop = False
if self.min_loss_early_stopping > smooth_loss:
self.min_loss_early_stopping = smooth_loss
serializer.save_model(self.model_to_save)
self.poor_batches_early_stopping = 0
elif self.poor_batches_early_stopping < self.config.early_stopping_patience:
self.poor_batches_early_stopping += 1
else:
stop = True
return stop
def train_model_generation(self, serializer=None):
if self.config.early_stopping:
assert serializer, "Need to specify serializer with early_stopping"
self.chunk = 0
self.pwd_list.reset()
accuracy_accum = []
x_all, y_all, w_all = self.next_train_set_as_np()
chunk = 0
early_stop = False
while len(x_all) != 0:
assert len(x_all) == len(y_all)
tr_loss, tr_acc, te_loss, te_acc = self.training_step(
x_all, y_all, w_all)
accuracy_accum += [(len(x_all), te_acc)]
self.smoothened_loss.append((len(x_all), te_loss))
if self.config.tensorboard:
self.write_log(self.train_log_names, [tr_loss, tr_acc], self.cumulative_chunks)
self.write_log(self.test_log_names, [te_loss, te_acc], self.cumulative_chunks)
if chunk % self.config.chunk_print_interval == 0:
#Finding weighted average to get the right loss value over batches
# of unequal sizes
instances_smoothened = map(lambda x: x[0], self.smoothened_loss)
loss_smoothened = sum(map(lambda x: x[0] * x[1], self.smoothened_loss)
) / sum(instances_smoothened)
logging.info('Chunk %s. Each chunk is size %s',
chunk, len(x_all))
logging.info('Train loss %s. Test loss %s. Test accuracy %s. Averaged loss %s',
tr_loss, te_loss, te_acc, loss_smoothened)
if self.config.tensorboard:
self.callback.writer.flush()
if self.config.early_stopping and \
self.cumulative_chunks >= self.config.early_stopping_patience and \
self.cumulative_chunks % self.config.chunk_print_interval == 0:
#Second condition so that the model doesn't start saving \
# very early in the training process
#Third condition to prevent evaluation of accuracy too frequently
instances_smoothened = map(lambda x: x[0], self.smoothened_loss)
loss_smoothened = sum(map(lambda x: x[0] * x[1], self.smoothened_loss)
) / sum(instances_smoothened)
early_stop = self.early_stopping(loss_smoothened, serializer)
if early_stop:
instances = map(lambda x: x[0], accuracy_accum)
return sum(map(lambda x: x[0] * x[1], accuracy_accum)
) / sum(instances), early_stop
x_all, y_all, w_all = self.next_train_set_as_np()
chunk += 1
self.cumulative_chunks += 1
instances = map(lambda x: x[0], accuracy_accum)
return sum(map(lambda x: x[0] * x[1], accuracy_accum)) / sum(instances), early_stop
def train(self, serializer):
logging.info('Building model...')
self.build_model(self.model)
logging.info('Done compiling model. Beginning training...')
self.train_model(serializer)
def freeze_feature_layers(self):
for layer in self.feature_layers:
layer.trainable = False
def retrain_classification(self, preprocessor, serializer):
assert self.model is not None
assert len(self.feature_layers) != 0
if self.config.freeze_feature_layers_during_secondary_training:
logging.info('Freezing feature layers...')
self.freeze_feature_layers()
logging.info('Retraining...')
self.pwd_list = preprocessor
self.train(serializer)
def write_log(self, names, logs, batch_no):
assert self.callback
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.callback.writer.add_summary(summary, batch_no)
class ManyToManyTrainer(Trainer):
def | |
fr.write('\t name = "' + name + '"\n')
fr.write('\t virtual_network_name = "' + vnetname + '"\n')
fr.write('\t resource_group_name = "' + rgs + '"\n')
sprefix=subs[j]["properties"]["addressPrefix"]
fr.write('\t address_prefix = "' + sprefix + '"\n')
rtbid="null"
try:
seps=subs[j]["properties"]["serviceEndpoints"]
kcount=len(seps)
#print (json.dumps(seps, indent=4, separators=(',', ': ')))
#print kcount
lseps='['
for k in range(0, kcount):
x=seps[k]["service"]
lseps=lseps+'"'+x+'",'
lseps=lseps+']'
fr.write('\t service_endpoints = '+ lseps + '\n')
except KeyError:
pass
try:
snsgid=subs[j]["properties"]["networkSecurityGroup"]["id"].split("/")[8].replace(".","-")
snsgrg=subs[j]["properties"]["networkSecurityGroup"]["id"].split("/")[4].replace(".","-").lower()
fr.write('\t network_security_group_id = "${azurerm_network_security_group.' + snsgrg + '__' + snsgid +'.id}"' + '\n')
except KeyError:
pass
try:
rtbid=subs[j]["properties"]["routeTable"]["id"].split("/")[8].replace(".","-")
rtrg=subs[j]["properties"]["routeTable"]["id"].split("/")[4].replace(".","-").lower()
fr.write('\t route_table_id = "${azurerm_route_table.' + rtrg + '__' + rtbid +'.id}"' + '\n')
except KeyError:
pass
try:
delegn=subs[j]["properties"]["delegations"]
kcount=len(delegn)
for k in range(0, kcount):
delegn=subs[j]["properties"]["delegations"][k]["name"]
fr.write('delegation {\n')
fr.write('\t name = "' + delegn + '"\n')
try:
sdn=subs[j]["properties"]["delegations"][k]["properties"]["serviceName"]
sdact=str(ast.literal_eval(json.dumps(subs[j]["properties"]["delegations"][k]["properties"]["actions"])))
sdact=sdact.replace("'",'"')
fr.write('\t service_delegation {\n')
fr.write('\t name = "' + sdn + '"\n')
#fr.write('\t actions = ' + sdact + '\n')
fr.write('\t} \n')
except KeyError:
pass
fr.write('} \n')
# end k loop
except KeyError:
pass
fr.write('}' + ' \n')
# azurerm_subnet_network_security_group_association
r1="skip"
try:
snsgid=subs[j]["properties"]["networkSecurityGroup"]["id"].split("/")[8].replace(".","-")
r1="azurerm_subnet_network_security_group_association"
fr.write('resource ' + r1 + ' ' + rg + '__' + rname + '__' + snsgid + ' {\n')
fr.write('\tsubnet_id = "${azurerm_subnet.' + rg + '__' + rname + '.id}"' + '\n')
fr.write('\tnetwork_security_group_id = "${azurerm_network_security_group.' + snsgrg + '__' + snsgid +'.id}"' + '\n')
fr.write('}' + ' \n')
except KeyError:
pass
# azurerm_subnet_route_table_association
r2="skip"
try:
rtbid=subs[j]["properties"]["routeTable"]["id"].split("/")[8].replace(".","-")
r2="azurerm_subnet_route_table_association"
fr.write('resource ' + r2 + ' ' + rg + '__' + rname + '__' + rtbid + ' {\n')
fr.write('\tsubnet_id = "${azurerm_subnet.' + rg + '__' + rname + '.id}"' + '\n')
fr.write('\troute_table_id = "${azurerm_route_table.' + rtrg + '__' + rtbid +'.id}"' + '\n')
fr.write('}' + ' \n')
except KeyError:
pass
#fr.write('}\n')
fr.close() # close .tf file
# azurerm_subnet
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(j) + ' of ' + str(jcount-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# azurerm_subnet_network_security_group_association
if "skip" not in r1:
tfrm.write('terraform state rm ' + r1 + '.' + rg + '__' + rname + '__' + snsgid + '\n')
tfcomm='terraform import '+r1 +'.'+rg+'__'+rname+'__'+snsgid+' '+id+'\n'
tfim.write(tfcomm)
# azurerm_subnet_route_table_association
if "skip" not in r2:
tfrm.write('terraform state rm ' + r2 + '.' + rg + '__' + rname + '__' + rtbid + '\n')
tfcomm='terraform import '+r2 +'.'+rg+'__'+rname+'__'+rtbid+' '+id+'\n'
tfim.write(tfcomm)
# end j
###############
# specific code end
###############
# end for i loop
tfrm.close()
tfim.close()
#end subnet
#
# azurerm_virtual_network_peering
#
def azurerm_virtual_network_peering(crf,cde,crg,headers,requests,sub,json,az2tfmess):
#############
# 080 vnet peering
tfp="azurerm_virtual_network_peering"
if crf in tfp:
# peering in vnet
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Network/virtualNetworks"
params = {'api-version': '2018-07-01'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf="080-"+tfp+"-staterm.sh"
tfimf="080-"+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print "# " + tfp,
count=len(azr)
print count
for i in range(0, count):
peers=azr[i]["properties"]["virtualNetworkPeerings"]
vnetname=azr[i]["name"]
jcount=len(peers)
for j in range(0, jcount):
name=peers[j]["name"]
#loc=peers[j]["location"] peers don't have a location
id=peers[j]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(peers[j], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write("")
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t resource_group_name = "'+ rgs + '"\n')
fr.write('\t virtual_network_name = "' + vnetname + '"\n')
rvnid=peers[j]["properties"]["remoteVirtualNetwork"]["id"]
aft=str(peers[j]["properties"]["allowForwardedTraffic"]).lower()
agt=str(peers[j]["properties"]["allowGatewayTransit"]).lower()
avna=str(peers[j]["properties"]["allowVirtualNetworkAccess"]).lower()
urg=str(peers[j]["properties"]["useRemoteGateways"]).lower()
fr.write('\t remote_virtual_network_id = "' + rvnid + '"\n')
fr.write('\t allow_forwarded_traffic = ' + aft + '\n')
fr.write('\t allow_gateway_transit = ' + agt + '\n')
fr.write('\t allow_virtual_network_access = ' + avna + '\n')
fr.write('\t use_remote_gateways = ' + urg + '\n')
fr.write('}\n')
fr.close() # close .tf file
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(j) + ' of ' + str(jcount-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# end for j loop
# end for i loop
tfrm.close()
tfim.close()
#end peering
#
# azurerm_managed_disk
#
def azurerm_managed_disk(crf,cde,crg,headers,requests,sub,json,az2tfmess):
tfp="azurerm_managed_disk"
azr=""
if crf in tfp:
# REST or cli
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Compute/disks"
params = {'api-version': '2017-03-30'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf="100-"+tfp+"-staterm.sh"
tfimf="100-"+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print "# " + tfp,
count=len(azr)
print count
for i in range(0, count):
oname=azr[i]["name"]
name=oname.replace("/.vhd","/_vhd")
loc=azr[i]["location"]
id=azr[i]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t location = "'+ loc + '"\n')
fr.write('\t resource_group_name = "'+ rgs + '"\n')
try:
ostyp=azr[i]["properties"]["osType"]
fr.write('\t os_type = "' + ostyp + '"\n')
except KeyError:
pass
try:
creopt=azr[i]["properties"]["creationData"]["createOption"]
fr.write('\t create_option = "' + creopt + '"\n')
except KeyError:
pass
try:
creopt=azr[i]["properties"]["creationData"]["sourceResourceId"]
fr.write('\t source_resource_id = "' + creopt + '"\n')
except KeyError:
pass
try:
imid=azr[i]["properties"]["creationData"]["imageReference"]["id"]
fr.write('\t image_reference_id = "' + imid + '"\n')
except KeyError:
pass
"""
try:
creid=azr[i]["properties"]["creationData"]["imageReference"]["id"]
fr.write('\t source_resource_id = "' + creid + '"\n')
except KeyError:
pass
"""
try:
enc=azr[i]["properties"]["encryptionSettings"]["enabled"]
fr.write('\t encryption_settings { \n')
fr.write('\t\t enabled = ' + str(enc).lower() + '\n')
try:
kekurl=azr[i]["properties"]["encryptionSettings"]["keyEncryptionKey"]["keyUrl"]
kekvltid=azr[i]["properties"]["encryptionSettings"]["keyEncryptionKey"]["sourceVault"]["id"]
fr.write('\t\t key_encryption_key { \n')
fr.write('\t\t\t key_url = "' + kekurl + '"\n')
fr.write('\t\t\t source_vault_id = "' + kekvltid + '"\n')
fr.write('\t\t } \n')
except KeyError:
pass
try:
dekurl=azr[i]["properties"]["encryptionSettings"]["diskEncryptionKey"]["secretUrl"]
dekvltid=azr[i]["properties"]["encryptionSettings"]["diskEncryptionKey"]["sourceVault"]["id"]
fr.write('\t\t disk_encryption_key { \n')
fr.write('\t\t\t secret_url = "' + dekurl + '"\n')
fr.write('\t\t\t source_vault_id = "' + dekvltid + '"\n')
fr.write('\t\t } \n')
except KeyError:
pass
fr.write('\t } \n')
except KeyError:
pass
try:
stopt=azr[i]["sku"]["name"]
fr.write('\t storage_account_type = "' + stopt + '"\n')
except KeyError:
fr.write('\t storage_account_type = "' + "StandardSSD_LRS" + '"\n')
pass
try:
dsize=str(azr[i]["properties"]["diskSizeGB"])
fr.write('\t disk_size_gb = "' + dsize + '"\n')
except KeyError:
pass
try:
zones=azr[i]["zones"]
fr.write('zones = ')
fr.write(json.dumps(zones, indent=4, separators=(',', ': ')))
fr.write('\n')
except KeyError:
pass
# tags block
try:
mtags=azr[i]["tags"]
fr.write('tags = { \n')
for key in mtags.keys():
tval=mtags[key]
fr.write('\t "' + key + '"="' + tval + '"\n')
fr.write('}\n')
except KeyError:
pass
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) + ' of ' + str(count-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# end for i loop
tfrm.close()
tfim.close()
#end managed disk
#
# azurerm_storage_account
#
import ast
def azurerm_storage_account(crf,cde,crg,headers,requests,sub,json,az2tfmess):
# 110 storage account
tfp="azurerm_storage_account"
azr=""
if crf in tfp:
# REST or cli
# print "REST Storage Acc"
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Storage/storageAccounts"
params = {'api-version': '2019-04-01'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf="110-"+tfp+"-staterm.sh"
tfimf="110-"+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print '# '+tfp,
count=len(azr)
print count
for i in range(0, count):
name=azr[i]["name"]
loc=azr[i]["location"]
id=azr[i]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde: print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
#print prefix
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write("")
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t location = "'+ loc + '"\n')
fr.write('\t resource_group_name = "'+ rgs + '"\n')
satier=azr[i]["sku"]["tier"]
sakind=azr[i]["kind"]
sartype=azr[i]["sku"]["name"].split("_")[1]
saencrypt=str(azr[i]["properties"]["encryption"]["services"]["blob"]["enabled"]).lower()
fiencrypt=str(azr[i]["properties"]["encryption"]["services"]["file"]["enabled"]).lower()
sahttps=str(azr[i]["properties"]["supportsHttpsTrafficOnly"]).lower()
#nrs=azr[i]["properties"]["networkAcls"]
saencs=azr[i]["properties"]["encryption"]["keySource"]
fr.write('\t account_tier = "' + satier + '"\n')
fr.write('\t account_kind = "' + sakind + '"\n')
fr.write('\t account_replication_type = "' + sartype + '"\n')
fr.write('\t enable_blob_encryption = ' + saencrypt + '\n')
fr.write('\t enable_file_encryption = ' + fiencrypt + '\n')
fr.write('\t enable_https_traffic_only = ' + sahttps + '\n')
fr.write('\t account_encryption_source = "' + saencs + '"\n')
try:
ishns=str(azr[i]["properties"]["isHnsEnabled"]).lower()
fr.write('\t is_hns_enabled = ' + ishns + '\n')
except KeyError:
pass
try:
byp=str(ast.literal_eval(json.dumps(azr[i]["properties"]["networkAcls"]["bypass"])))
byp=byp.replace("'",'"')
byp=byp.replace(", ",'", "')
ipr=azr[i]["properties"]["networkAcls"]["ipRules"]
#print(json.dumps(ipr, indent=4, separators=(',', ': ')))
| |
return _gui.GuiPushButton_set_style(self, style)
def get_content_size(self, w, h):
return _gui.GuiPushButton_get_content_size(self, w, h)
def set_roundness(self, roundness):
return _gui.GuiPushButton_set_roundness(self, roundness)
def get_roundness(self):
return _gui.GuiPushButton_get_roundness(self)
def set_ellipsis(self, value):
return _gui.GuiPushButton_set_ellipsis(self, value)
def get_ellipsis(self):
return _gui.GuiPushButton_get_ellipsis(self)
def set_action(self, action):
return _gui.GuiPushButton_set_action(self, action)
def enable_state_button_behavior(self, press):
return _gui.GuiPushButton_enable_state_button_behavior(self, press)
def release(self):
return _gui.GuiPushButton_release(self)
if _newclass:
class_info = staticmethod(_gui.GuiPushButton_class_info)
else:
class_info = _gui.GuiPushButton_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiPushButton____class_destructor__)
else:
___class_destructor__ = _gui.GuiPushButton____class_destructor__
def get_class_info(self):
return _gui.GuiPushButton_get_class_info(self)
def __gui_destroy__(self):
return _gui.GuiPushButton___gui_destroy__(self)
def __collect__(self):
return _gui.GuiPushButton___collect__(self)
def __uncollect__(self):
return _gui.GuiPushButton___uncollect__(self)
def is_created_by_python(self):
if hasattr(self, '__pycreated__'):
return self.__pycreated__
else: return False
def destroy(self):
if self.is_created_by_python():
self.__disown__()
self.__gui_destroy__()
self.__uncollect__()
def __del__(self):
if not self.is_created_by_python(): return
if self.is_shown():
self.hide()
if self.is_destroyed():
if self.thisown: self.__disown__()
else: self.destroy()
def __disown__(self):
self.this.disown()
_gui.disown_GuiPushButton(self)
return weakref_proxy(self)
GuiPushButton_swigregister = _gui.GuiPushButton_swigregister
GuiPushButton_swigregister(GuiPushButton)
EVT_ID_PUSH_BUTTON_CLICK = cvar.EVT_ID_PUSH_BUTTON_CLICK
EVT_ID_PUSH_BUTTON_UNCLICK = cvar.EVT_ID_PUSH_BUTTON_UNCLICK
def GuiPushButton_get_style_name(style):
return _gui.GuiPushButton_get_style_name(style)
GuiPushButton_get_style_name = _gui.GuiPushButton_get_style_name
def GuiPushButton_class_info():
return _gui.GuiPushButton_class_info()
GuiPushButton_class_info = _gui.GuiPushButton_class_info
def GuiPushButton____class_destructor__(instance, is_array):
return _gui.GuiPushButton____class_destructor__(instance, is_array)
GuiPushButton____class_destructor__ = _gui.GuiPushButton____class_destructor__
class GuiCheckButton(GuiWidget):
__swig_setmethods__ = {}
for _s in [GuiWidget]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiCheckButton, name, value)
__swig_getmethods__ = {}
for _s in [GuiWidget]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiCheckButton, name)
__repr__ = _swig_repr
STYLE_FLAT = _gui.GuiCheckButton_STYLE_FLAT
STYLE_BUTTON = _gui.GuiCheckButton_STYLE_BUTTON
STYLE_ROUND = _gui.GuiCheckButton_STYLE_ROUND
STYLE_COUNT = _gui.GuiCheckButton_STYLE_COUNT
if _newclass:
get_style_name = staticmethod(_gui.GuiCheckButton_get_style_name)
else:
get_style_name = _gui.GuiCheckButton_get_style_name
def __init__(self, *args):
if self.__class__ == GuiCheckButton:
_self = None
else:
_self = self
this = _gui.new_GuiCheckButton(_self, *args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
self.__pycreated__ = True
self.__collect__()
def draw(self, dc):
return _gui.GuiCheckButton_draw(self, dc)
def get_label(self):
return _gui.GuiCheckButton_get_label(self)
def set_label(self, label):
return _gui.GuiCheckButton_set_label(self, label)
def get_value(self):
return _gui.GuiCheckButton_get_value(self)
def set_value(self, value):
return _gui.GuiCheckButton_set_value(self, value)
def process_event(self, event_id):
return _gui.GuiCheckButton_process_event(self, event_id)
def get_justification(self):
return _gui.GuiCheckButton_get_justification(self)
def set_justification(self, justification):
return _gui.GuiCheckButton_set_justification(self, justification)
def get_icon(self):
return _gui.GuiCheckButton_get_icon(self)
def set_icon(self, *args):
return _gui.GuiCheckButton_set_icon(self, *args)
def get_pushed_icon(self):
return _gui.GuiCheckButton_get_pushed_icon(self)
def set_pushed_icon(self, *args):
return _gui.GuiCheckButton_set_pushed_icon(self, *args)
def get_icon_position(self):
return _gui.GuiCheckButton_get_icon_position(self)
def set_icon_position(self, position):
return _gui.GuiCheckButton_set_icon_position(self, position)
def get_style(self):
return _gui.GuiCheckButton_get_style(self)
def set_style(self, style):
return _gui.GuiCheckButton_set_style(self, style)
def get_content_size(self, w, h):
return _gui.GuiCheckButton_get_content_size(self, w, h)
def set_roundness(self, roundness):
return _gui.GuiCheckButton_set_roundness(self, roundness)
def get_roundness(self):
return _gui.GuiCheckButton_get_roundness(self)
def set_edge_selection(self, b):
return _gui.GuiCheckButton_set_edge_selection(self, b)
def is_use_option(self):
return _gui.GuiCheckButton_is_use_option(self)
def set_use_option(self, use_option):
return _gui.GuiCheckButton_set_use_option(self, use_option)
if _newclass:
class_info = staticmethod(_gui.GuiCheckButton_class_info)
else:
class_info = _gui.GuiCheckButton_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiCheckButton____class_destructor__)
else:
___class_destructor__ = _gui.GuiCheckButton____class_destructor__
def get_class_info(self):
return _gui.GuiCheckButton_get_class_info(self)
def __gui_destroy__(self):
return _gui.GuiCheckButton___gui_destroy__(self)
def __collect__(self):
return _gui.GuiCheckButton___collect__(self)
def __uncollect__(self):
return _gui.GuiCheckButton___uncollect__(self)
def is_created_by_python(self):
if hasattr(self, '__pycreated__'):
return self.__pycreated__
else: return False
def destroy(self):
if self.is_created_by_python():
self.__disown__()
self.__gui_destroy__()
self.__uncollect__()
def __del__(self):
if not self.is_created_by_python(): return
if self.is_shown():
self.hide()
if self.is_destroyed():
if self.thisown: self.__disown__()
else: self.destroy()
def __disown__(self):
self.this.disown()
_gui.disown_GuiCheckButton(self)
return weakref_proxy(self)
GuiCheckButton_swigregister = _gui.GuiCheckButton_swigregister
GuiCheckButton_swigregister(GuiCheckButton)
EVT_ID_CHECK_BUTTON_CLICK = cvar.EVT_ID_CHECK_BUTTON_CLICK
EVT_ID_CHECK_OPTION_BUTTON_CLICK = cvar.EVT_ID_CHECK_OPTION_BUTTON_CLICK
def GuiCheckButton_get_style_name(style):
return _gui.GuiCheckButton_get_style_name(style)
GuiCheckButton_get_style_name = _gui.GuiCheckButton_get_style_name
def GuiCheckButton_class_info():
return _gui.GuiCheckButton_class_info()
GuiCheckButton_class_info = _gui.GuiCheckButton_class_info
def GuiCheckButton____class_destructor__(instance, is_array):
return _gui.GuiCheckButton____class_destructor__(instance, is_array)
GuiCheckButton____class_destructor__ = _gui.GuiCheckButton____class_destructor__
class GuiCheckbox(GuiWidget):
__swig_setmethods__ = {}
for _s in [GuiWidget]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiCheckbox, name, value)
__swig_getmethods__ = {}
for _s in [GuiWidget]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiCheckbox, name)
__repr__ = _swig_repr
def __init__(self, *args):
if self.__class__ == GuiCheckbox:
_self = None
else:
_self = self
this = _gui.new_GuiCheckbox(_self, *args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
self.__pycreated__ = True
self.__collect__()
def draw(self, dc):
return _gui.GuiCheckbox_draw(self, dc)
def get_label(self):
return _gui.GuiCheckbox_get_label(self)
def set_label(self, label):
return _gui.GuiCheckbox_set_label(self, label)
def get_value(self):
return _gui.GuiCheckbox_get_value(self)
def set_value(self, value):
return _gui.GuiCheckbox_set_value(self, value)
def process_event(self, event_id):
return _gui.GuiCheckbox_process_event(self, event_id)
if _newclass:
class_info = staticmethod(_gui.GuiCheckbox_class_info)
else:
class_info = _gui.GuiCheckbox_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiCheckbox____class_destructor__)
else:
___class_destructor__ = _gui.GuiCheckbox____class_destructor__
def get_class_info(self):
return _gui.GuiCheckbox_get_class_info(self)
def __gui_destroy__(self):
return _gui.GuiCheckbox___gui_destroy__(self)
def __collect__(self):
return _gui.GuiCheckbox___collect__(self)
def __uncollect__(self):
return _gui.GuiCheckbox___uncollect__(self)
def is_created_by_python(self):
if hasattr(self, '__pycreated__'):
return self.__pycreated__
else: return False
def destroy(self):
if self.is_created_by_python():
self.__disown__()
self.__gui_destroy__()
self.__uncollect__()
def __del__(self):
if not self.is_created_by_python(): return
if self.is_shown():
self.hide()
if self.is_destroyed():
if self.thisown: self.__disown__()
else: self.destroy()
def __disown__(self):
self.this.disown()
_gui.disown_GuiCheckbox(self)
return weakref_proxy(self)
GuiCheckbox_swigregister = _gui.GuiCheckbox_swigregister
GuiCheckbox_swigregister(GuiCheckbox)
EVT_ID_CHECKBOX_CLICK = cvar.EVT_ID_CHECKBOX_CLICK
def GuiCheckbox_class_info():
return _gui.GuiCheckbox_class_info()
GuiCheckbox_class_info = _gui.GuiCheckbox_class_info
def GuiCheckbox____class_destructor__(instance, is_array):
return _gui.GuiCheckbox____class_destructor__(instance, is_array)
GuiCheckbox____class_destructor__ = _gui.GuiCheckbox____class_destructor__
class GuiListView(GuiWidget):
__swig_setmethods__ = {}
for _s in [GuiWidget]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiListView, name, value)
__swig_getmethods__ = {}
for _s in [GuiWidget]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiListView, name)
__repr__ = _swig_repr
def __init__(self, parent, x, y, w, h):
if self.__class__ == GuiListView:
_self = None
else:
_self = self
this = _gui.new_GuiListView(_self, parent, x, y, w, h)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
self.__pycreated__ = True
self.__collect__()
def draw(self, dc):
return _gui.GuiListView_draw(self, dc)
def show(self):
return _gui.GuiListView_show(self)
def hide(self):
return _gui.GuiListView_hide(self)
def resize(self, x, y, w, h):
return _gui.GuiListView_resize(self, x, y, w, h)
def process_event(self, event_id):
return _gui.GuiListView_process_event(self, event_id)
def get_selected_index(self):
return _gui.GuiListView_get_selected_index(self)
def set_selected_index(self, index):
return _gui.GuiListView_set_selected_index(self, index)
def add_item(self, *args):
return _gui.GuiListView_add_item(self, *args)
def get_item_name(self, index):
return _gui.GuiListView_get_item_name(self, index)
def set_item_name(self, index, name):
return _gui.GuiListView_set_item_name(self, index, name)
def get_item_data(self, index):
return _gui.GuiListView_get_item_data(self, index)
def set_item_data(self, index, data):
return _gui.GuiListView_set_item_data(self, index, data)
def set_item_enabled(self, *args):
return _gui.GuiListView_set_item_enabled(self, *args)
def make_item_title(self, name):
return _gui.GuiListView_make_item_title(self, name)
def is_item_enabled(self, *args):
return _gui.GuiListView_is_item_enabled(self, *args)
def is_item_disabled(self, *args):
return _gui.GuiListView_is_item_disabled(self, *args)
def get_item_count(self):
return _gui.GuiListView_get_item_count(self)
def get_item_icon(self, index):
return _gui.GuiListView_get_item_icon(self, index)
def get_item_icons(self, index):
return _gui.GuiListView_get_item_icons(self, index)
def remove_all_items(self):
return _gui.GuiListView_remove_all_items(self)
def remove_item(self, index):
return _gui.GuiListView_remove_item(self, index)
def find_item_by_name(self, name):
return _gui.GuiListView_find_item_by_name(self, name)
def set_mouse_over_selection(self, over):
return _gui.GuiListView_set_mouse_over_selection(self, over)
def get_mouse_over_selection(self):
return _gui.GuiListView_get_mouse_over_selection(self)
def scroll(self):
return _gui.GuiListView_scroll(self)
def set_cell_height(self, height):
return _gui.GuiListView_set_cell_height(self, height)
def set_icons_width(self, width):
return _gui.GuiListView_set_icons_width(self, width)
def get_item_y_position(self, index, y):
return _gui.GuiListView_get_item_y_position(self, index, y)
def set_highlight_case_sensitive(self, case_sensitive):
return _gui.GuiListView_set_highlight_case_sensitive(self, case_sensitive)
def set_highlight_words(self, keyword_list):
return _gui.GuiListView_set_highlight_words(self, keyword_list)
def is_item_readonly(self, name, readonly):
return _gui.GuiListView_is_item_readonly(self, name, readonly)
def set_item_readonly(self, name, enabled):
return _gui.GuiListView_set_item_readonly(self, name, enabled)
def get_scrollbar_width(self):
return _gui.GuiListView_get_scrollbar_width(self)
if _newclass:
class_info = staticmethod(_gui.GuiListView_class_info)
else:
class_info = _gui.GuiListView_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiListView____class_destructor__)
else:
___class_destructor__ = _gui.GuiListView____class_destructor__
def get_class_info(self):
return _gui.GuiListView_get_class_info(self)
def __gui_destroy__(self):
return _gui.GuiListView___gui_destroy__(self)
def __collect__(self):
return _gui.GuiListView___collect__(self)
def __uncollect__(self):
return _gui.GuiListView___uncollect__(self)
def is_created_by_python(self):
if hasattr(self, '__pycreated__'):
return self.__pycreated__
else: return False
def destroy(self):
if self.is_created_by_python():
self.__disown__()
self.__gui_destroy__()
self.__uncollect__()
def __del__(self):
if not self.is_created_by_python(): return
if self.is_shown():
self.hide()
if self.is_destroyed():
if self.thisown: self.__disown__()
else: self.destroy()
def __disown__(self):
self.this.disown()
_gui.disown_GuiListView(self)
return weakref_proxy(self)
GuiListView_swigregister = _gui.GuiListView_swigregister
GuiListView_swigregister(GuiListView)
EVT_ID_LIST_VIEW_SELECT = cvar.EVT_ID_LIST_VIEW_SELECT
EVT_ID_LIST_VIEW_VALIDATE = cvar.EVT_ID_LIST_VIEW_VALIDATE
def GuiListView_class_info():
return _gui.GuiListView_class_info()
GuiListView_class_info = _gui.GuiListView_class_info
def GuiListView____class_destructor__(instance, is_array):
return _gui.GuiListView____class_destructor__(instance, is_array)
GuiListView____class_destructor__ = _gui.GuiListView____class_destructor__
class GuiListViewItem(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiListViewItem, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GuiListViewItem, name)
__repr__ = _swig_repr
def __init__(self):
this = _gui.new_GuiListViewItem()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_setmethods__["data"] = _gui.GuiListViewItem_data_set
__swig_getmethods__["data"] = _gui.GuiListViewItem_data_get
if _newclass:
data = _swig_property(_gui.GuiListViewItem_data_get, _gui.GuiListViewItem_data_set)
__swig_setmethods__["enabled"] = _gui.GuiListViewItem_enabled_set
__swig_getmethods__["enabled"] = _gui.GuiListViewItem_enabled_get
if _newclass:
enabled = _swig_property(_gui.GuiListViewItem_enabled_get, _gui.GuiListViewItem_enabled_set)
__swig_setmethods__["readonly"] = _gui.GuiListViewItem_readonly_set
__swig_getmethods__["readonly"] = _gui.GuiListViewItem_readonly_get
if _newclass:
readonly = _swig_property(_gui.GuiListViewItem_readonly_get, _gui.GuiListViewItem_readonly_set)
__swig_setmethods__["bold"] = _gui.GuiListViewItem_bold_set
__swig_getmethods__["bold"] = _gui.GuiListViewItem_bold_get
if _newclass:
bold = _swig_property(_gui.GuiListViewItem_bold_get, _gui.GuiListViewItem_bold_set)
__swig_setmethods__["name"] = _gui.GuiListViewItem_name_set
__swig_getmethods__["name"] = _gui.GuiListViewItem_name_get
if _newclass:
name = _swig_property(_gui.GuiListViewItem_name_get, _gui.GuiListViewItem_name_set)
__swig_setmethods__["icons"] = _gui.GuiListViewItem_icons_set
__swig_getmethods__["icons"] = _gui.GuiListViewItem_icons_get
if _newclass:
icons = _swig_property(_gui.GuiListViewItem_icons_get, _gui.GuiListViewItem_icons_set)
__swig_destroy__ = _gui.delete_GuiListViewItem
__del__ = lambda self: None
GuiListViewItem_swigregister = _gui.GuiListViewItem_swigregister
GuiListViewItem_swigregister(GuiListViewItem)
class GuiPanel(GuiWidget):
__swig_setmethods__ = {}
for _s in [GuiWidget]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiPanel, name, value)
__swig_getmethods__ = {}
for _s in [GuiWidget]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiPanel, name)
__repr__ = _swig_repr
def __init__(self, parent, x, y, w, h):
if self.__class__ == GuiPanel:
_self = None
else:
_self = self
this = _gui.new_GuiPanel(_self, parent, x, y, w, h)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
self.__pycreated__ = True
self.__collect__()
def draw(self, dc):
return _gui.GuiPanel_draw(self, dc)
def show(self):
return _gui.GuiPanel_show(self)
def hide(self):
return _gui.GuiPanel_hide(self)
def get_background_color(self):
return _gui.GuiPanel_get_background_color(self)
def set_background_color(self, *args):
return _gui.GuiPanel_set_background_color(self, *args)
def set_top_toolbar_visible(self, visible):
return _gui.GuiPanel_set_top_toolbar_visible(self, visible)
def is_top_toolbar_visible(self):
return _gui.GuiPanel_is_top_toolbar_visible(self)
def set_bottom_toolbar_visible(self, visible):
return _gui.GuiPanel_set_bottom_toolbar_visible(self, visible)
def is_bottom_toolbar_visible(self):
return _gui.GuiPanel_is_bottom_toolbar_visible(self)
def get_top_toolbar(self):
return _gui.GuiPanel_get_top_toolbar(self)
def get_bottom_toolbar(self):
return _gui.GuiPanel_get_bottom_toolbar(self)
def is_scroll_enabled(self):
return _gui.GuiPanel_is_scroll_enabled(self)
def set_scroll_enabled(self, enable):
return _gui.GuiPanel_set_scroll_enabled(self, enable)
def is_horizontal_bar_visible(self):
return _gui.GuiPanel_is_horizontal_bar_visible(self)
def is_vertical_bar_visible(self):
return _gui.GuiPanel_is_vertical_bar_visible(self)
def reinit_scrollbars_offset(self):
| |
<reponame>Stienvdh/statrick
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 3.0.0
# Copyright (C) 2019-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ome_firmware
short_description: Firmware update of PowerEdge devices and its components through OpenManage Enterprise
version_added: "2.0.0"
description: "This module updates the firmware of PowerEdge devices and all its components through
OpenManage Enterprise."
extends_documentation_fragment:
- dellemc.openmanage.ome_auth_options
options:
device_service_tag:
description:
- List of targeted device service tags.
- Either I(device_id) or I(device_service_tag) can be used individually or together.
- I(device_service_tag) is mutually exclusive with I(device_group_names).
type: list
elements: str
device_id:
description:
- List of targeted device ids.
- Either I(device_id) or I(device_service_tag) can be used individually or together.
- I(device_id) is mutually exclusive with I(device_group_names).
type: list
elements: int
device_group_names:
description:
- Enter the name of the group to update the firmware of all the devices within the group.
- I(device_group_names) is mutually exclusive with I(device_id) and I(device_service_tag).
type: list
elements: str
baseline_name:
description:
- Enter the baseline name to update the firmware of all the devices or groups of
devices against the available compliance report.
- The firmware update can also be done by providing the baseline name and the path to
the single DUP file. To update multiple baselines at once, provide the baseline
names separated by commas.
- I(baseline_names) is mutually exclusive with I(device_group_names), I(device_id)
and I(device_service_tag).
type: str
dup_file:
description: "Executable file to apply on the targets."
type: str
requirements:
- "python >= 2.7.5"
author:
- "<NAME> (@felixs88)"
notes:
- Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
- This module does not support C(check_mode).
'''
EXAMPLES = r'''
---
- name: Update firmware from DUP file using device ids
dellemc.openmanage.ome_firmware:
hostname: "192.168.0.1"
username: "username"
password: "password"
device_id:
- 11111
- 22222
dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE"
- name: Update firmware from a DUP file using a device service tags
dellemc.openmanage.ome_firmware:
hostname: "192.168.0.1"
username: "username"
password: "password"
device_service_tag:
- KLBR111
- KLBR222
dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE"
- name: Update firmware from a DUP file using a device group names
dellemc.openmanage.ome_firmware:
hostname: "192.168.0.1"
username: "username"
password: "password"
device_group_names:
- servers
dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE"
- name: Update firmware using baseline name
dellemc.openmanage.ome_firmware:
hostname: "192.168.0.1"
username: "username"
password: "password"
baseline_name: baseline_devices
- name: Update firmware from a DUP file using a baseline names
dellemc.openmanage.ome_firmware:
hostname: "192.168.0.1"
username: "username"
password: "password"
baseline_name: "baseline_devices, baseline_groups"
dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE"
'''
RETURN = r'''
---
msg:
type: str
description: "Overall firmware update status."
returned: always
sample: "Successfully submitted the firmware update job."
update_status:
type: dict
description: "Firmware Update job and progress details from the OME."
returned: success
sample: {
'LastRun': None,
'CreatedBy': 'user',
'Schedule': 'startnow',
'LastRunStatus': {
'Id': 1111,
'Name': 'NotRun'
},
'Builtin': False,
'Editable': True,
'NextRun': None,
'JobStatus': {
'Id': 1111,
'Name': 'New'
},
'JobName': 'Firmware Update Task',
'Visible': True,
'State': 'Enabled',
'JobDescription': 'dup test',
'Params': [{
'Value': 'true',
'Key': 'signVerify',
'JobId': 11111}, {
'Value': 'false',
'Key': 'stagingValue',
'JobId': 11112}, {
'Value': 'false',
'Key': 'complianceUpdate',
'JobId': 11113}, {
'Value': 'INSTALL_FIRMWARE',
'Key': 'operationName',
'JobId': 11114}],
'Targets': [{
'TargetType': {
'Id': 1000,
'Name': 'DEVICE'},
'Data': 'DCIM:INSTALLED#701__NIC.Mezzanine.1A-1-1=1111111111111',
'Id': 11115,
'JobId': 11116}],
'StartTime': None,
'UpdatedBy': None,
'EndTime': None,
'Id': 11117,
'JobType': {
'Internal': False,
'Id': 5,
'Name': 'Update_Task'}
}
error_info:
description: Details of the HTTP Error.
returned: on HTTP error
type: dict
sample: {
"error": {
"code": "Base.1.0.GeneralError",
"message": "A general error has occurred. See ExtendedInfo for more information.",
"@Message.ExtendedInfo": [
{
"MessageId": "GEN1234",
"RelatedProperties": [],
"Message": "Unable to process the request because an error occurred.",
"MessageArgs": [],
"Severity": "Critical",
"Resolution": "Retry the operation. If the issue persists, contact your system administrator."
}
]
}
}
'''
import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
def spawn_update_job(rest_obj, job_payload):
"""Spawns an update job and tracks it to completion."""
job_uri, job_details = "JobService/Jobs", {}
job_resp = rest_obj.invoke_request("POST", job_uri, data=job_payload)
if job_resp.status_code == 201:
job_details = job_resp.json_data
return job_details
def job_payload_for_update(rest_obj, module, target_data, baseline=None):
"""Formulate the payload to initiate a firmware update job."""
resp = rest_obj.get_job_type_id("Update_Task")
if resp is None:
module.fail_json(msg="Unable to fetch the job type Id.")
payload = {
"Id": 0, "JobName": "Firmware Update Task",
"JobDescription": "Firmware Update Task", "Schedule": "startnow",
"State": "Enabled", "JobType": {"Id": resp, "Name": "Update_Task"},
"Targets": target_data,
"Params": [{"Key": "operationName", "Value": "INSTALL_FIRMWARE"},
{"Key": "stagingValue", "Value": "false"},
{"Key": "signVerify", "Value": "true"}]
}
if baseline is not None:
payload["Params"].append({"Key": "complianceReportId", "Value": "{0}".format(baseline["baseline_id"])})
payload["Params"].append({"Key": "repositoryId", "Value": "{0}".format(baseline["repo_id"])})
payload["Params"].append({"Key": "catalogId", "Value": "{0}".format(baseline["catalog_id"])})
payload["Params"].append({"Key": "complianceUpdate", "Value": "true"})
else:
payload["Params"].append({"JobId": 0, "Key": "complianceUpdate", "Value": "false"})
return payload
def get_applicable_components(rest_obj, dup_payload, module):
"""Get the target array to be used in spawning jobs for update."""
target_data = []
dup_url = "UpdateService/Actions/UpdateService.GetSingleDupReport"
headers = {"Content-Type": "application/json", "Accept": "application/json"}
dup_resp = rest_obj.invoke_request("POST", dup_url, data=dup_payload,
headers=headers, api_timeout=60)
if dup_resp.status_code == 200:
dup_data = dup_resp.json_data
file_token = str(dup_payload['SingleUpdateReportFileToken'])
for device in dup_data:
for component in device['DeviceReport']['Components']:
temp_map = {}
temp_map['Id'] = device['DeviceId']
temp_map['Data'] = "{0}={1}".format(component['ComponentSourceName'], file_token)
temp_map['TargetType'] = {}
temp_map['TargetType']['Id'] = int(device['DeviceReport']['DeviceTypeId'])
temp_map['TargetType']['Name'] = str(device['DeviceReport']['DeviceTypeName'])
target_data.append(temp_map)
else:
module.fail_json(msg="Unable to get components DUP applies.")
return target_data
def get_dup_applicability_payload(file_token, device_ids=None, group_ids=None, baseline_ids=None):
"""Returns the DUP applicability JSON payload."""
dup_applicability_payload = {'SingleUpdateReportBaseline': [],
'SingleUpdateReportGroup': [],
'SingleUpdateReportTargets': [],
'SingleUpdateReportFileToken': file_token}
if device_ids is not None:
dup_applicability_payload.update(
{"SingleUpdateReportTargets": list(map(int, device_ids))}
)
elif group_ids is not None:
dup_applicability_payload.update(
{"SingleUpdateReportGroup": list(map(int, group_ids))}
)
elif baseline_ids is not None:
dup_applicability_payload.update(
{"SingleUpdateReportBaseline": list(map(int, baseline_ids))}
)
return dup_applicability_payload
def upload_dup_file(rest_obj, module):
"""Upload DUP file to OME and get a file token."""
upload_uri = "UpdateService/Actions/UpdateService.UploadFile"
headers = {"Content-Type": "application/octet-stream",
"Accept": "application/octet-stream"}
upload_success, token = False, None
dup_file = module.params['dup_file']
if not isinstance(dup_file, str):
module.fail_json(
msg="argument {0} is type of {1} and we were unable to convert to string: {1} cannot be "
"converted to a string".format("dup_file", type(dup_file)))
with open(module.params['dup_file'], 'rb') as payload:
payload = payload.read()
response = rest_obj.invoke_request("POST", upload_uri, data=payload, headers=headers,
api_timeout=100, dump=False)
if response.status_code == 200:
upload_success = True
token = str(response.json_data)
else:
module.fail_json(msg="Unable to upload {0} to {1}".format(module.params['dup_file'],
module.params['hostname']))
return upload_success, token
def get_device_ids(rest_obj, module, device_id_tags):
"""Getting the list of device ids filtered from the device inventory."""
device_id = []
resp = rest_obj.get_all_report_details("DeviceService/Devices")
if resp["report_list"]:
device_resp = dict([(str(device['Id']), device['DeviceServiceTag']) for device in resp["report_list"]])
device_tags = map(str, device_id_tags)
invalid_tags = []
for tag in device_tags:
if tag in device_resp.keys() or tag.isdigit():
device_id.append(tag)
elif tag in device_resp.values():
ids = list(device_resp.keys())[list(device_resp.values()).index(tag)]
device_id.append(ids)
else:
invalid_tags.append(tag)
if invalid_tags:
module.fail_json(
msg="Unable to complete the operation because the entered target device service"
" tag(s) or device id(s) '{0}' are invalid.".format(",".join(set(invalid_tags))))
else:
module.fail_json(msg="Failed to fetch the device facts.")
return device_id
def get_dup_baseline(rest_obj, module):
"""Getting the list of baseline ids filtered from the baselines."""
resp = rest_obj.get_all_report_details("UpdateService/Baselines")
baseline = module.params.get('baseline_name').split(",")
if resp["report_list"]:
baseline_ids = [bse['Id'] for bse in resp["report_list"] for name in baseline if bse['Name'] == name]
if len(set(baseline)) != len(set(baseline_ids)):
module.fail_json(
msg="Unable to complete the operation because the entered target baseline name(s)"
" '{0}' are invalid.".format(",".join(set(baseline))))
else:
module.fail_json(msg="Unable to complete the operation because the entered"
"target baseline name(s) does not exists.")
return baseline_ids
def get_group_ids(rest_obj, module):
"""Getting the list of group ids filtered from the groups."""
resp = rest_obj.get_all_report_details("GroupService/Groups")
group_name = module.params.get('device_group_names')
if resp["report_list"]:
grp_ids = [grp['Id'] for grp in resp["report_list"] for grpname in group_name if grp['Name'] == grpname]
if len(set(group_name)) != len(set(grp_ids)):
module.fail_json(
msg="Unable to complete the operation because the entered target device group name(s)"
" '{0}' are invalid.".format(",".join(set(group_name))))
return grp_ids
def get_baseline_ids(rest_obj, module):
"""Getting the list of group ids filtered from the groups."""
resp = rest_obj.get_all_report_details("UpdateService/Baselines")
baseline, baseline_details = module.params.get('baseline_name'), {}
if resp["report_list"]:
for bse in resp["report_list"]:
if bse['Name'] == baseline:
baseline_details["baseline_id"] = bse["Id"]
baseline_details["repo_id"] = bse["RepositoryId"]
baseline_details["catalog_id"] = bse["CatalogId"]
if not baseline_details:
module.fail_json(
msg="Unable to complete the operation because the entered target baseline name"
" '{0}' is invalid.".format(baseline))
else:
module.fail_json(msg="Unable to complete the operation because the entered"
"target baseline name does not exist.")
return baseline_details
def single_dup_update(rest_obj, module):
target_data, device_ids, group_ids, baseline_ids = None, None, None, None
if module.params.get("device_group_names") is not None:
group_ids = get_group_ids(rest_obj, module)
elif module.params.get("baseline_name") is not None \
and module.params.get("dup_file") is not None:
baseline_ids | |
<filename>src/sage/schemes/elliptic_curves/ell_number_field.py
# -*- coding: utf-8 -*-
r"""
Elliptic curves over number fields
An elliptic curve `E` over a number field `K` can be given
by a Weierstrass equation whose coefficients lie in `K` or by
using ``base_extend`` on an elliptic curve defined over a subfield.
One major difference to elliptic curves over `\QQ` is that there
might not exist a global minimal equation over `K`, when `K` does
not have class number one.
Another difference is the lack of understanding of modularity for
general elliptic curves over general number fields.
Currently Sage can obtain local information about `E/K_v` for finite places
`v`, it has an interface to Denis Simon's script for 2-descent, it can compute
the torsion subgroup of the Mordell-Weil group `E(K)`, and it can work with
isogenies defined over `K`.
EXAMPLE::
sage: K.<i> = NumberField(x^2+1)
sage: E = EllipticCurve([0,4+i])
sage: E.discriminant()
-3456*i - 6480
sage: P= E([i,2])
sage: P+P
(-2*i + 9/16 : -9/4*i - 101/64 : 1)
::
sage: E.has_good_reduction(2+i)
True
sage: E.local_data(4+i)
Local data at Fractional ideal (i + 4):
Reduction type: bad additive
Local minimal model: Elliptic Curve defined by y^2 = x^3 + (i+4) over Number Field in i with defining polynomial x^2 + 1
Minimal discriminant valuation: 2
Conductor exponent: 2
Kodaira Symbol: II
Tamagawa Number: 1
sage: E.tamagawa_product_bsd()
1
::
sage: E.simon_two_descent()
(1, 1, [(i : 2 : 1)])
::
sage: E.torsion_order()
1
::
sage: E.isogenies_prime_degree(3)
[Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + (i+4) over Number Field in i with defining polynomial x^2 + 1 to Elliptic Curve defined by y^2 = x^3 + (-27*i-108) over Number Field in i with defining polynomial x^2 + 1]
AUTHORS:
- <NAME> 2007
- <NAME>
- <NAME>
REFERENCE:
- [Sil] Silverman, <NAME>. The arithmetic of elliptic curves. Second edition. Graduate Texts in
Mathematics, 106. Springer, 2009.
- [Sil2] Silverman, <NAME>. Advanced topics in the arithmetic of elliptic curves. Graduate Texts in
Mathematics, 151. Springer, 1994.
"""
#*****************************************************************************
# Copyright (C) 2007 <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from ell_field import EllipticCurve_field
import ell_point
import sage.matrix.all as matrix
from sage.rings.ring import Ring
from sage.rings.arith import gcd, prime_divisors
from sage.misc.all import prod
import ell_torsion
from ell_generic import is_EllipticCurve
from gp_simon import simon_two_descent
from constructor import EllipticCurve
from sage.rings.all import PolynomialRing, ZZ, QQ, RealField
import sage.misc.misc
from sage.misc.misc import verbose, forall
from sage.rings.integer import Integer
from sage.rings.arith import valuation
import gal_reps_number_field
class EllipticCurve_number_field(EllipticCurve_field):
r"""
Elliptic curve over a number field.
EXAMPLES::
sage: K.<i>=NumberField(x^2+1)
sage: EllipticCurve([i, i - 1, i + 1, 24*i + 15, 14*i + 35])
Elliptic Curve defined by y^2 + i*x*y + (i+1)*y = x^3 + (i-1)*x^2 + (24*i+15)*x + (14*i+35) over Number Field in i with defining polynomial x^2 + 1
"""
def __init__(self, K, ainvs):
r"""
EXAMPLES:
A curve from the database of curves over `\QQ`, but over a larger field:
sage: K.<i>=NumberField(x^2+1)
sage: EllipticCurve(K,'389a1')
Elliptic Curve defined by y^2 + y = x^3 + x^2 + (-2)*x over Number Field in i with defining polynomial x^2 + 1
Making the field of definition explicitly larger::
sage: EllipticCurve(K,[0,-1,1,0,0])
Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 over Number Field in i with defining polynomial x^2 + 1
"""
self._known_points = []
EllipticCurve_field.__init__(self, K, ainvs)
_point = ell_point.EllipticCurvePoint_number_field
def base_extend(self, R):
"""
Return the base extension of ``self`` to `R`.
EXAMPLES::
sage: E = EllipticCurve('11a3')
sage: K = QuadraticField(-5, 'a')
sage: E.base_extend(K)
Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 over Number Field in a with defining polynomial x^2 + 5
Check that non-torsion points are remembered when extending
the base field (see :trac:`16034`)::
sage: E = EllipticCurve([1, 0, 1, -1751, -31352])
sage: K.<d> = QuadraticField(5)
sage: E.gens()
[(52 : 111 : 1)]
sage: EK = E.base_extend(K)
sage: EK.gens()
[(52 : 111 : 1)]
"""
E = super(EllipticCurve_number_field, self).base_extend(R)
if isinstance(E, EllipticCurve_number_field):
E._known_points = [E([R(_) for _ in P.xy()]) for P in self._known_points if not P.is_zero()]
return E
def simon_two_descent(self, verbose=0, lim1=2, lim3=4, limtriv=2,
maxprob=20, limbigprime=30, known_points=None):
r"""
Return lower and upper bounds on the rank of the Mordell-Weil
group `E(K)` and a list of points.
This method is used internally by the :meth:`~rank`,
:meth:`~rank_bounds` and :meth:`~gens` methods.
INPUT:
- ``self`` -- an elliptic curve `E` over a number field `K`
- ``verbose`` -- 0, 1, 2, or 3 (default: 0), the verbosity level
- ``lim1`` -- (default: 2) limit on trivial points on quartics
- ``lim3`` -- (default: 4) limit on points on ELS quartics
- ``limtriv`` -- (default: 2) limit on trivial points on `E`
- ``maxprob`` -- (default: 20)
- ``limbigprime`` -- (default: 30) to distinguish between
small and large prime numbers. Use probabilistic tests for
large primes. If 0, don't use probabilistic tests.
- ``known_points`` -- (default: None) list of known points on
the curve
OUTPUT: a triple ``(lower, upper, list)`` consisting of
- ``lower`` (integer) -- lower bound on the rank
- ``upper`` (integer) -- upper bound on the rank
- ``list`` -- list of points in `E(K)`
The integer ``upper`` is in fact an upper bound on the
dimension of the 2-Selmer group, hence on the dimension of
`E(K)/2E(K)`. It is equal to the dimension of the 2-Selmer
group except possibly if `E(K)[2]` has dimension 1. In that
case, ``upper`` may exceed the dimension of the 2-Selmer group
by an even number, due to the fact that the algorithm does not
perform a second descent.
.. note::
For non-quadratic number fields, this code does return, but
it takes a long time.
ALGORITHM:
Uses <NAME>'s PARI/GP scripts from
http://www.math.unicaen.fr/~simon/.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 23, 'a')
sage: E = EllipticCurve(K, '37')
sage: E == loads(dumps(E))
True
sage: E.simon_two_descent()
(2, 2, [(0 : 0 : 1), (1/8*a + 5/8 : -3/16*a - 7/16 : 1)])
sage: E.simon_two_descent(lim1=3, lim3=20, limtriv=5, maxprob=7, limbigprime=10)
(2, 2, [(-1 : 0 : 1), (-1/8*a + 5/8 : -3/16*a - 9/16 : 1)])
::
sage: K.<a> = NumberField(x^2 + 7, 'a')
sage: E = EllipticCurve(K, [0,0,0,1,a]); E
Elliptic Curve defined by y^2 = x^3 + x + a over Number Field in a with defining polynomial x^2 + 7
sage: v = E.simon_two_descent(verbose=1); v
elliptic curve: Y^2 = x^3 + Mod(1, y^2 + 7)*x + Mod(y, y^2 + 7)
Trivial points on the curve = [[1, 1, 0], [Mod(1/2*y + 3/2, y^2 + 7), Mod(-y - 2, y^2 + 7), 1]]
#S(E/K)[2] = 2
#E(K)/2E(K) = 2
#III(E/K)[2] = 1
rank(E/K) = 1
listpoints = [[Mod(1/2*y + 3/2, y^2 + 7), Mod(-y - 2, y^2 + 7), 1]]
(1, 1, [(1/2*a + 3/2 : -a - 2 : 1)])
sage: v = E.simon_two_descent(verbose=2)
K = bnfinit(y^2 + 7);
a = Mod(y,K.pol);
bnfellrank(K, [0, 0, 0, 1, a], [[Mod(1/2*y + 3/2, y^2 + 7), Mod(-y - 2, y^2 + 7)]]);
elliptic curve: Y^2 = x^3 + Mod(1, y^2 + 7)*x + Mod(y, y^2 + 7)
A = Mod(0, y^2 + 7)
B = Mod(1, y^2 + 7)
C = Mod(y, y^2 + 7)
<BLANKLINE>
Computing L(S,2)
L(S,2) = [Mod(Mod(-1, y^2 + 7)*x^2 + Mod(-1/2*y + 1/2, y^2 + 7)*x + 1, x^3 + Mod(1, y^2 + 7)*x + Mod(y, y^2 + 7)), Mod(Mod(-1, y^2 + 7)*x^2 + Mod(-1/2*y - 1/2, y^2 + 7)*x + 1, x^3 + Mod(1, y^2 + 7)*x + Mod(y, y^2 + 7)), Mod(-1, x^3 + Mod(1, y^2 + 7)*x + Mod(y, y^2 + 7)), Mod(x^2 + 2, x^3 + Mod(1, y^2 + 7)*x + Mod(y, y^2 + 7)), Mod(Mod(1, y^2 + 7)*x + Mod(1/2*y + 3/2, y^2 + 7), x^3 + Mod(1, y^2 + 7)*x + Mod(y, y^2 + 7)), Mod(Mod(1, y^2 + 7)*x + Mod(1/2*y - 3/2, y^2 + | |
new %s[pInStruct->%s];\n' % (m_name, m_type, self.struct_dict[s][m]['array_size'])
#construct_txt += ' std::copy (pInStruct->%s, pInStruct->%s+pInStruct->%s, %s);\n' % (m_name, m_name, self.struct_dict[s][m]['array_size'], m_name)
construct_txt += ' memcpy ((void *)%s, (void *)pInStruct->%s, sizeof(%s)*pInStruct->%s);\n' % (m_name, m_name, m_type, self.struct_dict[s][m]['array_size'])
construct_txt += ' }\n'
destruct_txt += ' if (%s)\n' % (m_name)
destruct_txt += ' delete[] %s;\n' % (m_name)
elif self.struct_dict[s][m]['array']:
# Init array ptr to NULL
init_list += '\n\t%s(NULL),' % (m_name)
init_func_txt += ' %s = NULL;\n' % (m_name)
array_element = 'pInStruct->%s[i]' % (m_name)
if is_type(self.struct_dict[s][m]['type'], 'struct') and self._hasSafeStruct(self.struct_dict[s][m]['type']):
array_element = '%s(&pInStruct->%s[i])' % (self._getSafeStructName(self.struct_dict[s][m]['type']), m_name)
construct_txt += ' if (%s && pInStruct->%s) {\n' % (self.struct_dict[s][m]['array_size'], m_name)
construct_txt += ' %s = new %s[%s];\n' % (m_name, m_type, self.struct_dict[s][m]['array_size'])
destruct_txt += ' if (%s)\n' % (m_name)
destruct_txt += ' delete[] %s;\n' % (m_name)
construct_txt += ' for (uint32_t i=0; i<%s; ++i) {\n' % (self.struct_dict[s][m]['array_size'])
if 'safe_' in m_type:
construct_txt += ' %s[i].initialize(&pInStruct->%s[i]);\n' % (m_name, m_name)
else:
construct_txt += ' %s[i] = %s;\n' % (m_name, array_element)
construct_txt += ' }\n'
construct_txt += ' }\n'
elif self.struct_dict[s][m]['ptr']:
construct_txt += ' if (pInStruct->%s)\n' % (m_name)
construct_txt += ' %s = new %s(pInStruct->%s);\n' % (m_name, m_type, m_name)
construct_txt += ' else\n'
construct_txt += ' %s = NULL;\n' % (m_name)
destruct_txt += ' if (%s)\n' % (m_name)
destruct_txt += ' delete %s;\n' % (m_name)
elif 'safe_' in m_type: # inline struct, need to pass in reference for constructor
init_list += '\n\t%s(&pInStruct->%s),' % (m_name, m_name)
init_func_txt += ' %s.initialize(&pInStruct->%s);\n' % (m_name, m_name)
else:
init_list += '\n\t%s(pInStruct->%s),' % (m_name, m_name)
init_func_txt += ' %s = pInStruct->%s;\n' % (m_name, m_name)
if '' != init_list:
init_list = init_list[:-1] # hack off final comma
if s in custom_construct_txt:
construct_txt = custom_construct_txt[s]
ss_src.append("\n%s::%s(const %s* pInStruct) : %s\n{\n%s}" % (ss_name, ss_name, s, init_list, construct_txt))
ss_src.append("\n%s::%s() {}" % (ss_name, ss_name))
# Create slight variation of init and construct txt for copy constructor that takes a src object reference vs. struct ptr
copy_construct_init = init_func_txt.replace('pInStruct->', 'src.')
copy_construct_txt = construct_txt.replace(' (pInStruct->', ' (src.') # Exclude 'if' blocks from next line
copy_construct_txt = copy_construct_txt.replace('(pInStruct->', '(*src.') # Pass object to copy constructors
copy_construct_txt = copy_construct_txt.replace('pInStruct->', 'src.') # Modify remaining struct refs for src object
ss_src.append("\n%s::%s(const %s& src)\n{\n%s%s}" % (ss_name, ss_name, ss_name, copy_construct_init, copy_construct_txt)) # Copy constructor
ss_src.append("\n%s::~%s()\n{\n%s}" % (ss_name, ss_name, destruct_txt))
ss_src.append("\nvoid %s::initialize(const %s* pInStruct)\n{\n%s%s}" % (ss_name, s, init_func_txt, construct_txt))
# Copy initializer uses same txt as copy constructor but has a ptr and not a reference
init_copy = copy_construct_init.replace('src.', 'src->')
init_construct = copy_construct_txt.replace('src.', 'src->')
ss_src.append("\nvoid %s::initialize(const %s* src)\n{\n%s%s}" % (ss_name, ss_name, init_copy, init_construct))
if s in ifdef_dict:
ss_src.append('#endif')
return "\n".join(ss_src)
class EnumCodeGen:
def __init__(self, enum_type_dict=None, enum_val_dict=None, typedef_fwd_dict=None, in_file=None, out_sh_file=None, out_vh_file=None):
self.et_dict = enum_type_dict
self.ev_dict = enum_val_dict
self.tf_dict = typedef_fwd_dict
self.in_file = in_file
self.out_sh_file = out_sh_file
self.eshfg = CommonFileGen(self.out_sh_file)
self.out_vh_file = out_vh_file
self.evhfg = CommonFileGen(self.out_vh_file)
def generateStringHelper(self):
self.eshfg.setHeader(self._generateSHHeader())
self.eshfg.setBody(self._generateSHBody())
self.eshfg.generate()
def generateEnumValidate(self):
self.evhfg.setHeader(self._generateSHHeader())
self.evhfg.setBody(self._generateVHBody())
self.evhfg.generate()
def _generateVHBody(self):
body = []
for bet in sorted(self.et_dict):
fet = self.tf_dict[bet]
body.append("static inline uint32_t validate_%s(%s input_value)\n{" % (fet, fet))
# TODO : This is not ideal, but allows for flag combinations. Need more rigorous validation of realistic flag combinations
if 'flagbits' in bet.lower():
body.append(' if (input_value > (%s))' % (' | '.join(self.et_dict[bet])))
body.append(' return 0;')
body.append(' return 1;')
body.append('}\n\n')
else:
body.append(' switch ((%s)input_value)\n {' % (fet))
for e in sorted(self.et_dict[bet]):
if (self.ev_dict[e]['unique']):
body.append(' case %s:' % (e))
body.append(' return 1;\n default:\n return 0;\n }\n}\n\n')
return "\n".join(body)
def _generateSHBody(self):
body = []
# with open(self.out_file, "a") as hf:
# bet == base_enum_type, fet == final_enum_type
for bet in sorted(self.et_dict):
fet = self.tf_dict[bet]
body.append("static inline const char* string_%s(%s input_value)\n{\n switch ((%s)input_value)\n {" % (fet, fet, fet))
for e in sorted(self.et_dict[bet]):
if (self.ev_dict[e]['unique']):
body.append(' case %s:\n return "%s";' % (e, e))
body.append(' default:\n return "Unhandled %s";\n }\n}\n\n' % (fet))
return "\n".join(body)
def _generateSHHeader(self):
header = []
header.append('#pragma once\n')
header.append('#ifdef _WIN32\n')
header.append('#pragma warning( disable : 4065 )\n')
header.append('#endif\n')
header.append('#include <vulkan/%s>\n\n\n' % self.in_file)
return "\n".join(header)
class CMakeGen:
def __init__(self, struct_wrapper=None, out_dir=None):
self.sw = struct_wrapper
self.include_headers = []
self.add_lib_file_list = self.sw.get_file_list()
self.out_dir = out_dir
self.out_file = os.path.join(self.out_dir, "CMakeLists.txt")
self.cmg = CommonFileGen(self.out_file)
def generate(self):
self.cmg.setBody(self._generateBody())
self.cmg.generate()
def _generateBody(self):
body = []
body.append("project(%s)" % os.path.basename(self.out_dir))
body.append("cmake_minimum_required(VERSION 2.8)\n")
body.append("add_library(${PROJECT_NAME} %s)\n" % " ".join(self.add_lib_file_list))
body.append('set(COMPILE_FLAGS "-fpermissive")')
body.append('set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILE_FLAGS}")\n')
body.append("include_directories(${SRC_DIR}/thirdparty/${GEN_API}/inc/)\n")
body.append("target_include_directories (%s PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})\n" % os.path.basename(self.out_dir))
return "\n".join(body)
class GraphVizGen:
def __init__(self, struct_dict, prefix, out_dir):
self.struct_dict = struct_dict
self.api = prefix
if prefix == "vulkan":
self.api_prefix = "vk"
else:
self.api_prefix = prefix
self.out_file = os.path.join(out_dir, self.api_prefix+"_struct_graphviz_helper.h")
self.gvg = CommonFileGen(self.out_file)
def generate(self):
self.gvg.setCopyright("//This is the copyright\n")
self.gvg.setHeader(self._generateHeader())
self.gvg.setBody(self._generateBody())
#self.gvg.setFooter('}')
self.gvg.generate()
def set_include_headers(self, include_headers):
self.include_headers = include_headers
def _generateHeader(self):
header = []
header.append("//#includes, #defines, globals and such...\n")
for f in self.include_headers:
if 'vk_enum_string_helper' not in f:
header.append("#include <%s>\n" % f)
#header.append('#include "vk_enum_string_helper.h"\n\n// Function Prototypes\n')
header.append("\nchar* dynamic_gv_display(const void* pStruct, const char* prefix);\n")
return "".join(header)
def _get_gv_func_name(self, struct):
return "%s_gv_print_%s" % (self.api_prefix, struct.lower().strip("_"))
# Return elements to create formatted string for given struct member
def _get_struct_gv_print_formatted(self, struct_member, pre_var_name="", postfix = "\\n", struct_var_name="pStruct", struct_ptr=True, print_array=False, port_label=""):
struct_op = "->"
pre_var_name = '"%s "' % struct_member['full_type']
if not struct_ptr:
struct_op = "."
member_name = struct_member['name']
print_type = "p"
cast_type = ""
member_post = ""
array_index = ""
member_print_post = ""
print_delimiter = "%"
if struct_member['array'] and 'char' in struct_member['type'].lower(): # just print char array as string
print_type = "p"
print_array = False
elif struct_member['array'] and not print_array:
# Just print base address of array when not full print_array
cast_type = "(void*)"
elif is_type(struct_member['type'], 'enum'):
if struct_member['ptr']:
struct_var_name = "*" + struct_var_name
cast_type = "string_%s" % struct_member['type']
print_type = "s"
elif is_type(struct_member['type'], 'struct'): # print struct address for now
cast_type = "(void*)"
if not struct_member['ptr']:
cast_type = "(void*)&"
elif 'bool' in struct_member['type'].lower():
print_type = "s"
member_post = ' ? "TRUE" : "FALSE"'
elif 'float' in struct_member['type']:
print_type = "f"
elif 'uint64' in struct_member['type'] or 'gpusize' in struct_member['type'].lower():
print_type = '" PRId64 "'
elif 'uint8' in struct_member['type']:
print_type = "hu"
elif 'size' in struct_member['type'].lower():
print_type = '" PRINTF_SIZE_T_SPECIFIER "'
print_delimiter = ""
elif True in [ui_str.lower() in struct_member['type'].lower() for ui_str in ['uint', 'flags', 'samplemask']]:
print_type = "u"
elif 'int' in struct_member['type']:
print_type = "i"
elif struct_member['ptr']:
pass
else:
#print("Unhandled struct type: %s" % struct_member['type'])
cast_type = "(void*)"
if print_array and struct_member['array']:
member_print_post = "[%u]"
array_index = " i,"
member_post = "[i]"
print_out = "<TR><TD>%%s%s%s</TD><TD%s>%s%s%s</TD></TR>" % (member_name, member_print_post, port_label, print_delimiter, print_type, postfix) # section of print that goes inside of quotes
print_arg = ", %s,%s %s(%s%s%s)%s\n" % (pre_var_name, array_index, cast_type, struct_var_name, struct_op, member_name, member_post) # section of print passed to portion in quotes
return (print_out, print_arg)
def _generateBody(self):
gv_funcs = []
array_func_list = [] # structs for which we'll generate an array version of their print function
array_func_list.append('vkbufferviewattachinfo')
array_func_list.append('vkimageviewattachinfo')
array_func_list.append('vksamplerimageviewinfo')
array_func_list.append('vkdescriptortypecount')
# For first pass, generate prototype
for s in sorted(self.struct_dict):
gv_funcs.append('char* %s(const %s* pStruct, const char* myNodeName);\n' % (self._get_gv_func_name(s), typedef_fwd_dict[s]))
if s.lower().strip("_") in array_func_list:
if s.lower().strip("_") in ['vkbufferviewattachinfo', 'vkimageviewattachinfo']:
gv_funcs.append('char* %s_array(uint32_t count, const %s* const* pStruct, const char* myNodeName);\n' % (self._get_gv_func_name(s), typedef_fwd_dict[s]))
else:
gv_funcs.append('char* %s_array(uint32_t count, const %s* pStruct, const char* myNodeName);\n' % (self._get_gv_func_name(s), typedef_fwd_dict[s]))
gv_funcs.append('\n')
for s in sorted(self.struct_dict):
p_out = ""
p_args = ""
stp_list = [] # stp == "struct to print" a list of structs for this API call that should be printed as structs
# the fields below are a super-hacky way for now to get port labels into GV output, TODO : Clean this up!
pl_dict = {}
struct_num = 0
# This isn't great but this pre-pass flags structs w/ pNext and other struct ptrs
for m in sorted(self.struct_dict[s]):
if 'pNext' == self.struct_dict[s][m]['name'] or is_type(self.struct_dict[s][m]['type'], 'struct'):
stp_list.append(self.struct_dict[s][m])
if 'pNext' == self.struct_dict[s][m]['name']:
pl_dict[m] = ' PORT=\\"pNext\\"'
else:
pl_dict[m] = ' PORT=\\"struct%i\\"' % struct_num
struct_num += 1
gv_funcs.append('char* %s(const %s* pStruct, const char* myNodeName)\n{\n char* str;\n' % (self._get_gv_func_name(s), typedef_fwd_dict[s]))
num_stps = len(stp_list);
total_strlen_str = ''
if 0 != num_stps:
gv_funcs.append(" char* tmpStr;\n")
gv_funcs.append(" char nodeName[100];\n")
gv_funcs.append(' char* stp_strs[%i];\n' % num_stps)
for index in range(num_stps):
if (stp_list[index]['ptr']):
if | |
ignore_missing:
warnings.warn(
"[GDSPY] Cell {0} not found; operations on this CellArray may not work.".format(
self.ref_cell
),
stacklevel=2,
)
def __str__(self):
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
return 'CellArray ("{0}", {1} x {2}, at ({3[0]}, {3[1]}), spacing {4[0]} x {4[1]}, rotation {5}, magnification {6}, reflection {7})'.format(
name,
self.columns,
self.rows,
self.origin,
self.spacing,
self.rotation,
self.magnification,
self.x_reflection,
)
def __repr__(self):
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
return 'CellArray("{0}", {1}, {2}, ({4[0]}, {4[1]}), ({3[0]}, {3[1]}), {5}, {6}, {7})'.format(
name,
self.columns,
self.rows,
self.origin,
self.spacing,
self.rotation,
self.magnification,
self.x_reflection,
)
def to_gds(self, multiplier):
"""
Convert this object to a GDSII element.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
element.
Returns
-------
out : string
The GDSII binary string that represents this object.
"""
name = self.ref_cell.name
if len(name) % 2 != 0:
name = name + "\0"
data = struct.pack(">4H", 4, 0x0B00, 4 + len(name), 0x1206) + name.encode(
"ascii"
)
x2 = self.origin[0] + self.columns * self.spacing[0]
y2 = self.origin[1]
x3 = self.origin[0]
y3 = self.origin[1] + self.rows * self.spacing[1]
if (
(self.rotation is not None)
or (self.magnification is not None)
or self.x_reflection
):
word = 0
values = b""
if self.x_reflection:
word += 0x8000
y3 = 2 * self.origin[1] - y3
if not (self.magnification is None):
# This flag indicates that the magnification is absolute, not
# relative (not supported).
# word += 0x0004
values += struct.pack(">2H", 12, 0x1B05) + _eight_byte_real(
self.magnification
)
if not (self.rotation is None):
# This flag indicates that the rotation is absolute, not
# relative (not supported).
# word += 0x0002
sa = numpy.sin(self.rotation * numpy.pi / 180.0)
ca = numpy.cos(self.rotation * numpy.pi / 180.0)
tmp = (
(x2 - self.origin[0]) * ca
- (y2 - self.origin[1]) * sa
+ self.origin[0]
)
y2 = (
(x2 - self.origin[0]) * sa
+ (y2 - self.origin[1]) * ca
+ self.origin[1]
)
x2 = tmp
tmp = (
(x3 - self.origin[0]) * ca
- (y3 - self.origin[1]) * sa
+ self.origin[0]
)
y3 = (
(x3 - self.origin[0]) * sa
+ (y3 - self.origin[1]) * ca
+ self.origin[1]
)
x3 = tmp
values += struct.pack(">2H", 12, 0x1C05) + _eight_byte_real(
self.rotation
)
data += struct.pack(">3H", 6, 0x1A01, word) + values
return data + struct.pack(
">2H2h2H6l2H",
8,
0x1302,
self.columns,
self.rows,
28,
0x1003,
int(round(self.origin[0] * multiplier)),
int(round(self.origin[1] * multiplier)),
int(round(x2 * multiplier)),
int(round(y2 * multiplier)),
int(round(x3 * multiplier)),
int(round(y3 * multiplier)),
4,
0x1100,
)
def area(self, by_spec=False):
"""
Calculate the total area of the cell array with the
magnification factor included.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with the areas
of each individual pair (layer, datatype).
Returns
-------
out : number, dictionary
Area of this cell.
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else 0
if self.magnification is None:
factor = self.columns * self.rows
else:
factor = self.columns * self.rows * self.magnification ** 2
if by_spec:
cell_area = self.ref_cell.area(True)
for kk in cell_area.keys():
cell_area[kk] *= factor
return cell_area
else:
return self.ref_cell.area() * factor
def get_polygons(self, by_spec=False, depth=None):
"""
Return the list of polygons created by this reference.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with the
polygons of each individual pair (layer, datatype).
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons. References below this level will result
in a bounding box. If `by_spec` is True the key will be
name of the referenced cell.
Returns
-------
out : list of array-like[N][2] or dictionary
List containing the coordinates of the vertices of each
polygon, or dictionary with the list of polygons (if
`by_spec` is True).
Note
----
Instances of `FlexPath` and `RobustPath` are also included in
the result by computing their polygonal boundary.
"""
if not isinstance(self.ref_cell, Cell):
return dict() if by_spec else []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.magnification is not None:
mag = self.magnification * _one
if self.origin is not None:
orgn = numpy.array(self.origin)
if self.x_reflection:
xrefl = _pmone_int
if by_spec:
cell_polygons = self.ref_cell.get_polygons(True, depth)
polygons = {}
for kk in cell_polygons.keys():
polygons[kk] = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for points in cell_polygons[kk]:
if self.magnification:
polygons[kk].append(points * mag + spc)
else:
polygons[kk].append(points + spc)
if self.x_reflection:
polygons[kk][-1] = polygons[kk][-1] * xrefl
if self.rotation is not None:
polygons[kk][-1] = (
polygons[kk][-1] * ct
+ polygons[kk][-1][:, ::-1] * st
)
if self.origin is not None:
polygons[kk][-1] = polygons[kk][-1] + orgn
else:
cell_polygons = self.ref_cell.get_polygons(depth=depth)
polygons = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for points in cell_polygons:
if self.magnification is not None:
polygons.append(points * mag + spc)
else:
polygons.append(points + spc)
if self.x_reflection:
polygons[-1] = polygons[-1] * xrefl
if self.rotation is not None:
polygons[-1] = (
polygons[-1] * ct + polygons[-1][:, ::-1] * st
)
if self.origin is not None:
polygons[-1] = polygons[-1] + orgn
return polygons
def get_polygonsets(self, depth=None):
"""
Return the list of polygons created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve polygons from.
Returns
-------
out : list of `PolygonSet`
List containing the polygons in this cell and its
references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.x_reflection:
xrefl = _pmone_int
if self.magnification is not None:
mag = self.magnification * _one
if self.origin is not None:
orgn = numpy.array(self.origin)
polygonsets = self.ref_cell.get_polygonsets(depth=depth)
array = []
for i in range(self.columns):
for j in range(self.rows):
spc = numpy.array([self.spacing[0] * i, self.spacing[1] * j])
for polygonset in polygonsets:
ps = libcopy.deepcopy(polygonset)
for ii in range(len(ps.polygons)):
if self.magnification is not None:
ps.polygons[ii] = ps.polygons[ii] * mag + spc
else:
ps.polygons[ii] = ps.polygons[ii] + spc
if self.x_reflection:
ps.polygons[ii] = ps.polygons[ii] * xrefl
if self.rotation is not None:
ps.polygons[ii] = (
ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st
)
if self.origin is not None:
ps.polygons[ii] = ps.polygons[ii] + orgn
array.append(ps)
return array
def get_paths(self, depth=None):
"""
Return the list of paths created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve paths from.
Returns
-------
out : list of `FlexPath` or `RobustPath`
List containing the paths in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.origin is not None:
trans = numpy.array(self.origin)
else:
trans = None
if self.rotation is not None:
rot = self.rotation * numpy.pi / 180.0
else:
rot = None
paths = self.ref_cell.get_paths(depth=depth)
array = []
for i in range(self.columns):
for j in range(self.rows):
spc = numpy.array([self.spacing[0] * i, self.spacing[1] * j])
for path in paths:
array.append(
libcopy.deepcopy(path).transform(
trans, rot, self.magnification, self.x_reflection, spc
)
)
return array
def get_labels(self, depth=None):
"""
Return the list of labels created by this reference.
Parameters
----------
depth : integer or None
If not None, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of `Label`
List containing the labels in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone
if self.magnification is not None:
mag = self.magnification * _one
if self.origin is not None:
orgn = numpy.array(self.origin)
if self.x_reflection:
xrefl = _pmone_int
cell_labels = self.ref_cell.get_labels(depth=depth)
labels = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for clbl in cell_labels:
lbl = libcopy.deepcopy(clbl)
| |
the hw config 'rngi' and 'rngw' in " + str(self) + " class can't adopt temporal coding simultaneously."
assert self.hwcfg["quantilei"] > 0 and self.hwcfg["quantilei"] <= 1, \
"Error: the hw config 'quantilei' of " + str(self) + " class needs to be within (0, 1]."
assert self.hwcfg["quantilew"] > 0 and self.hwcfg["quantilew"] <= 1, \
"Error: the hw config 'quantilew' of " + str(self) + " class needs to be within (0, 1]."
assert self.hwcfg["rounding"] in ["round", "ceil", "floor"], \
"Error: the hw config 'rounding' of " + str(self) + " class requires one of ['round', 'ceil', 'floor']."
assert self.hwcfg["signmag"] is True, \
"Error: the hw config 'signmag' of " + str(self) + " class requires to be True, i.e., always computing on sign-magnitue data, for diverse architectures."
# maximum possible run cycle
self.cycle_max = 2**(max(hwcfg["widthi"], hwcfg["widthw"]) - hwcfg["signmag"])
# actual run cycle
if self.hwcfg["cycle"] is None:
self.cycle_act = self.cycle_max
else:
self.cycle_act = min(self.hwcfg["cycle"], self.cycle_max)
self.hwcfg["cycle"] = self.cycle_act
# weight and bias
if weight_ext is not None:
assert (weight_ext.size()[0], weight_ext.size()[1]) == (out_features, in_features), \
"Error: the hw config 'out_features, in_features' in " + str(self) + " class unmatches the binary weight shape."
self.weight.data = weight_ext
if bias and (bias_ext is not None):
assert bias_ext.size()[0] == out_features, \
"Error: the hw config 'out_features' in " + str(self) + " class unmatches the binary bias shape."
self.bias.data = bias_ext
swcfg={
"btype" : torch.float,
"rtype" : torch.float,
"stype" : torch.float
}
# random_sequence from RNG
hwcfg_irng = {
"width" : self.hwcfg["widthi"] - self.hwcfg["signmag"],
"dimr" : 1,
"rng" : self.hwcfg["rngi"]
}
self.irng = RNG(hwcfg_irng, swcfg)()
hwcfg_wrng = {
"width" : self.hwcfg["widthw"] - self.hwcfg["signmag"],
"dimr" : 1,
"rng" : self.hwcfg["rngw"]
}
self.wrng = RNG(hwcfg_wrng, swcfg)()
if (self.itc) and (not self.wtc):
# cbsg controller is input
self.rngctler = self.irng
self.rngctlee = self.wrng
elif (not self.itc) and (self.wtc):
# cbsg controller is weight
self.rngctler = self.wrng
self.rngctlee = self.irng
elif (not self.itc) and (not self.wtc):
# when rate coding is applied to both input and weight, always control weight with input
# the hardware cost of doing this is similar to the opposite
self.rngctler = self.irng
self.rngctlee = self.wrng
# generate the value map for mul using current rng
# dim 0 is input index
# the tensor input value is the actual value produced by the rngctler
self.mapctler = torch.nn.Parameter(torch.empty(self.cycle_max), requires_grad=False)
cycle_ctlerval = torch.empty(0)
torch.cat(self.cycle_max*[torch.arange(self.cycle_max, dtype=torch.float).unsqueeze(1)], 1, out=cycle_ctlerval)
cycle_ctlerbit = torch.empty(0)
torch.gt(cycle_ctlerval, self.rngctler.unsqueeze(0), out=cycle_ctlerbit)
self.mapctler.data = torch.sum(cycle_ctlerbit, 1).squeeze_().type(torch.long)
# dim 0 is input index, dim 1 is weight index
# the tensor value is the actual weight value produced by the rngctlee, under a specific input and weight
self.mapctlee = torch.nn.Parameter(torch.empty(self.cycle_max, self.cycle_max), requires_grad=False)
cycle_ctleebit = torch.empty(0)
torch.gt(cycle_ctlerval, self.rngctlee.unsqueeze(0), out=cycle_ctleebit)
for c in range(self.cycle_max):
self.mapctlee.data[c] = torch.sum(cycle_ctleebit[:, 0:self.mapctler.data[c]], 1).squeeze_()
self.rshift_i = None
self.rshift_w = None
self.rshift_o = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
self.rshift_i, self.rshift_w, self.rshift_o = \
rshift_offset(input, self.weight, self.hwcfg["widthi"] - self.hwcfg["signmag"], self.hwcfg["widthw"] - self.hwcfg["signmag"], self.hwcfg["rounding"], self.hwcfg["quantilei"], self.hwcfg["quantilew"])
return HUBLinearFunction.apply(input, self.weight, self.bias,
self.rshift_i, self.rshift_w, self.rshift_o,
self.cycle_act, self.mapctlee)
# Inherit from Function
class HUBLinearFunction(torch.autograd.Function):
"""
This code is for rate coding for both input and weight.
"""
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, weight, bias=None,
rshift_i=3, rshift_w=3, rshift_o=3,
cycle=128, mapcbsg=None):
ctx.save_for_backward(input, weight, bias)
assert len(input.size()) == 2, \
"Error: the input of HUBLinearFunction class needs 2 dimensions."
# first dim should always be batch
batch = input.size()[0]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# input preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# scale input to range 0~2^widthi-1
buf_i = torch.empty(0, dtype=torch.long, device=input.device)
torch.abs((input >> rshift_i).unsqueeze_(1).round().type(torch.long), out=buf_i)
torch.clamp(buf_i, 0, cycle-1, out=buf_i)
# actual input: its sign
act_input = torch.empty(0, device=input.device)
torch.sign(input, out=act_input)
act_input.unsqueeze_(1)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# weight preparation
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# scale weight with batch to range 0~2^widthw-1
buf_w_no_batch = torch.empty(0, dtype=torch.long, device=weight.device)
torch.abs((weight >> rshift_w).unsqueeze_(0).round().type(torch.long), out=buf_w_no_batch)
torch.clamp(buf_w_no_batch, 0, cycle-1, out=buf_w_no_batch)
buf_w = torch.empty(0, dtype=torch.long, device=weight.device)
torch.cat(batch*[buf_w_no_batch], 0, out=buf_w)
# get actual weight for calculation
sign_wght_no_batch = torch.empty(0, device=weight.device)
torch.sign(weight, out=sign_wght_no_batch)
sign_wght_no_batch.unsqueeze_(0)
act_wght = torch.empty(0, device=weight.device)
torch.cat(batch*[sign_wght_no_batch], 0, out=act_wght)
torch.mul(mapcbsg[buf_i, buf_w], act_wght, out=act_wght)
output = torch.empty(0, device=weight.device)
torch.matmul(act_input, act_wght.transpose(1, 2), out=output)
output = (output >> rshift_o).squeeze_(1)
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_input = grad_output.matmul(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().matmul(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
return grad_input, grad_weight, grad_bias, None, None, None, None, None
class FXPLinear(torch.nn.Linear):
"""
This module is the fully connected layer for binary signed data in fxp format using binary computing.
The hardware configuration specifies
1) the data with in bit for input and weight/bias
2) the quantile to quantize input and weight/bias
3) the rounding mode for both input and weight/bias
"""
def __init__(
self,
in_features,
out_features,
bias=True,
weight_ext=None,
bias_ext=None,
hwcfg={
"widthi" : 8,
"quantilei" : 1,
"widthw" : 8,
"quantilew" : 1,
"rounding" : "round"
}):
super(FXPLinear, self).__init__(in_features, out_features, bias)
self.hwcfg = {}
self.hwcfg["widthi"] = hwcfg["widthi"]
self.hwcfg["quantilei"] = hwcfg["quantilei"]
self.hwcfg["widthw"] = hwcfg["widthw"]
self.hwcfg["quantilew"] = hwcfg["quantilew"]
self.hwcfg["rounding"] = hwcfg["rounding"].lower()
# weight and bias
if weight_ext is not None:
assert (weight_ext.size()[0], weight_ext.size()[1]) == (out_features, in_features), \
"Error: the hw config 'out_features, in_features' in " + str(self) + " class unmatches the binary weight shape."
self.weight.data = weight_ext
if bias and (bias_ext is not None):
assert bias_ext.size()[0] == out_features, \
"Error: the hw config 'out_features' in " + str(self) + " class unmatches the binary bias shape."
self.bias.data = bias_ext
# max abs value
self.max_abs_i = 2**self.hwcfg["widthi"]
self.max_abs_w = 2**self.hwcfg["widthw"]
self.rshift_i = None
self.rshift_w = None
self.rshift_o = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
self.rshift_i, self.rshift_w, _ = \
rshift_offset(input, self.weight, self.hwcfg["widthi"] - 1, self.hwcfg["widthw"] - 1, self.hwcfg["rounding"], self.hwcfg["quantilei"], self.hwcfg["quantilew"])
self.rshift_o = 0 - self.rshift_i - self.rshift_w
return FXPLinearFunction.apply(input, self.weight, self.bias, self.rshift_i, self.rshift_w, self.rshift_o, self.max_abs_i, self.max_abs_w)
# Inherit from Function
class FXPLinearFunction(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, weight, bias=None,
rshift_i=3,
rshift_w=3,
rshift_o=3,
max_abs_i=128,
max_abs_w=128):
ctx.save_for_backward(input, weight, bias)
# # # # # # # | |
<reponame>YileC928/finm-portfolio-2021
"""
Mean models to use with ARCH processes. All mean models must inherit from
:class:`ARCHModel` and provide the same methods with the same inputs.
"""
from __future__ import annotations
import copy
import sys
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
)
import numpy as np
from pandas import DataFrame, Index
from scipy.optimize import OptimizeResult
from statsmodels.tsa.tsatools import lagmat
from arch.__future__._utility import check_reindex
from arch.typing import ArrayLike, ArrayLike1D, DateLike, Label, NDArray
from arch.univariate.base import (
ARCHModel,
ARCHModelForecast,
ARCHModelResult,
implicit_constant,
)
from arch.univariate.distribution import (
Distribution,
GeneralizedError,
Normal,
SkewStudent,
StudentsT,
)
from arch.univariate.volatility import (
ARCH,
EGARCH,
FIGARCH,
GARCH,
HARCH,
ConstantVariance,
VolatilityProcess,
)
from arch.utility.array import (
AbstractDocStringInheritor,
cutoff_to_index,
ensure1d,
parse_dataframe,
)
from arch.vendor import cached_property
if sys.version_info >= (3, 8):
from typing import Literal
elif TYPE_CHECKING:
from typing_extensions import Literal
__all__ = ["HARX", "ConstantMean", "ZeroMean", "ARX", "arch_model", "LS"]
COV_TYPES = {
"white": "White's Heteroskedasticity Consistent Estimator",
"classic_ols": "Homoskedastic (Classic)",
"robust": "Bollerslev-Wooldridge (Robust) Estimator",
"mle": "ML Estimator",
"classic": "ML Estimator",
}
def _forecast_pad(count: int, forecasts: NDArray) -> NDArray:
shape = list(forecasts.shape)
shape[0] = count
fill = np.full(tuple(shape), np.nan)
return np.concatenate((fill, forecasts))
def _ar_forecast(
y: NDArray,
horizon: int,
start_index: int,
constant: float,
arp: NDArray,
x: NDArray,
exogp: NDArray,
) -> NDArray:
"""
Generate mean forecasts from an AR-X model
Parameters
----------
y : ndarray
horizon : int
start_index : int
constant : float
arp : ndarray
exogp : ndarray
x : ndarray
Returns
-------
forecasts : ndarray
"""
t = y.shape[0]
p = arp.shape[0]
fcasts = np.empty((t - start_index, p + horizon))
for i in range(p):
first = start_index - p + i + 1
last = t - p + i + 1
fcasts[:, i] = y[first:last]
arp_rev = arp[::-1]
for i in range(p, horizon + p):
fcasts[:, i] = constant + fcasts[:, i - p : i].dot(arp_rev)
if x.shape[0] > 0:
fcasts[:, i] += x[:, :, i - p].T @ exogp
fcasts = fcasts[:, p:]
return fcasts
def _ar_to_impulse(steps: int, params: NDArray) -> NDArray:
p = params.shape[0]
impulse = np.zeros(steps)
impulse[0] = 1
if p == 0:
return impulse
for i in range(1, steps):
k = min(p - 1, i - 1)
st = max(i - p, 0)
impulse[i] = impulse[st:i].dot(params[k::-1])
return impulse
class HARX(ARCHModel, metaclass=AbstractDocStringInheritor):
r"""
Heterogeneous Autoregression (HAR), with optional exogenous regressors,
model estimation and simulation
Parameters
----------
y : {ndarray, Series}
nobs element vector containing the dependent variable
x : {ndarray, DataFrame}, optional
nobs by k element array containing exogenous regressors
lags : {scalar, ndarray}, optional
Description of lag structure of the HAR.
* Scalar included all lags between 1 and the value.
* A 1-d n-element array includes the HAR lags 1:lags[0]+1,
1:lags[1]+1, ... 1:lags[n]+1.
* A 2-d (2,n)-element array that includes the HAR lags of the form
lags[0,j]:lags[1,j]+1 for all columns of lags.
constant : bool, optional
Flag whether the model should include a constant
use_rotated : bool, optional
Flag indicating to use the alternative rotated form of the HAR where
HAR lags do not overlap
hold_back : int
Number of observations at the start of the sample to exclude when
estimating model parameters. Used when comparing models with different
lag lengths to estimate on the common sample.
volatility : VolatilityProcess, optional
Volatility process to use in the model
distribution : Distribution, optional
Error distribution to use in the model
rescale : bool, optional
Flag indicating whether to automatically rescale data if the scale of the
data is likely to produce convergence issues when estimating model parameters.
If False, the model is estimated on the data without transformation. If True,
than y is rescaled and the new scale is reported in the estimation results.
Examples
--------
Standard HAR with average lags 1, 5 and 22
>>> import numpy as np
>>> from arch.univariate import HARX
>>> y = np.random.RandomState(1234).randn(100)
>>> harx = HARX(y, lags=[1, 5, 22])
>>> res = harx.fit()
A standard HAR with average lags 1 and 6 but holding back 10 observations
>>> from pandas import Series, date_range
>>> index = date_range('2000-01-01', freq='M', periods=y.shape[0])
>>> y = Series(y, name='y', index=index)
>>> har = HARX(y, lags=[1, 6], hold_back=10)
Models with equivalent parametrizations of lags. The first uses
overlapping lags.
>>> harx_1 = HARX(y, lags=[1,5,22])
The next uses rotated lags so that they do not overlap.
>>> harx_2 = HARX(y, lags=[1,5,22], use_rotated=True)
The third manually specified overlapping lags.
>>> harx_3 = HARX(y, lags=[[1, 1, 1], [1, 5, 22]])
The final manually specified non-overlapping lags
>>> harx_4 = HARX(y, lags=[[1, 2, 6], [1, 5, 22]])
It is simple to verify that these are the equivalent by inspecting the R2.
>>> models = [harx_1, harx_2, harx_3, harx_4]
>>> print([mod.fit().rsquared for mod in models])
0.085, 0.085, 0.085, 0.085
Notes
-----
The HAR-X model is described by
.. math::
y_t = \mu + \sum_{i=1}^p \phi_{L_{i}} \bar{y}_{t-L_{i,0}:L_{i,1}}
+ \gamma' x_t + \epsilon_t
where :math:`\bar{y}_{t-L_{i,0}:L_{i,1}}` is the average value of
:math:`y_t` between :math:`t-L_{i,0}` and :math:`t - L_{i,1}`.
"""
def __init__(
self,
y: Optional[ArrayLike] = None,
x: Optional[ArrayLike] = None,
lags: Optional[
Union[int, Sequence[int], Sequence[Sequence[int]], NDArray]
] = None,
constant: bool = True,
use_rotated: bool = False,
hold_back: Optional[int] = None,
volatility: Optional[VolatilityProcess] = None,
distribution: Optional[Distribution] = None,
rescale: Optional[bool] = None,
) -> None:
super().__init__(
y,
hold_back=hold_back,
volatility=volatility,
distribution=distribution,
rescale=rescale,
)
self._x = x
self._x_names: List[str] = []
self._x_index: Optional[Union[NDArray, Index]] = None
self.lags: Optional[
Union[int, Sequence[int], Sequence[Sequence[int]], NDArray]
] = lags
self._lags = np.empty(0)
self.constant: bool = constant
self.use_rotated: bool = use_rotated
self.regressors: np.ndarray[Any, np.dtype[np.float64]] = np.empty(
(0, 0), dtype=np.float64
)
self._name = "HAR"
if self._x is not None:
self._name += "-X"
if lags is not None:
max_lags = int(np.max(np.asarray(lags, dtype=np.int32)))
else:
max_lags = 0
self._max_lags = max_lags
self._hold_back = max_lags if hold_back is None else hold_back
if self._hold_back < max_lags:
from warnings import warn
warn(
"hold_back is less then the minimum number given the lags selected",
RuntimeWarning,
)
self._hold_back = max_lags
self._init_model()
@property
def x(self) -> ArrayLike:
"""Gets the value of the exogenous regressors in the model"""
return self._x
def parameter_names(self) -> List[str]:
return self._generate_variable_names()
def _model_description(self, include_lags: bool = True) -> Dict[str, str]:
"""Generates the model description for use by __str__ and related
functions"""
lagstr = "none"
if include_lags and self.lags is not None:
assert self._lags is not None
lagstr_comp = [f"[{lag[0]}:{lag[1]}]" for lag in self._lags.T]
lagstr = ", ".join(lagstr_comp)
xstr = str(self._x.shape[1]) if self._x is not None else "0"
conststr = "yes" if self.constant else "no"
od = {"constant": conststr}
if include_lags:
od["lags"] = lagstr
od["no. of exog"] = xstr
od["volatility"] = self.volatility.__str__()
od["distribution"] = self.distribution.__str__()
return od
def __str__(self) -> str:
descr = self._model_description()
descr_str = self.name + "("
for key, val in descr.items():
if val and key:
descr_str += key + ": " + val + ", "
descr_str = descr_str[:-2] # Strip final ', '
descr_str += ")"
return descr_str
def __repr__(self) -> str:
txt = self.__str__()
txt.replace("\n", "")
return txt + ", id: " + hex(id(self))
def _repr_html_(self) -> str:
"""HTML representation for IPython Notebook"""
descr = self._model_description()
html = "<strong>" + self.name + "</strong>("
for key, val in descr.items():
html += "<strong>" + key + ": </strong>" + val + ",\n"
html += "<strong>ID: </strong> " + hex(id(self)) + ")"
return html
def resids(
self,
params: NDArray,
y: Optional[ArrayLike] = None,
regressors: Optional[ArrayLike] = None,
) -> ArrayLike:
regressors = self._fit_regressors if y is None else regressors
y = self._fit_y if y is None else y
assert regressors is not None
return y - regressors.dot(params)
@cached_property
def num_params(self) -> int:
"""
Returns the number of parameters
"""
assert self.regressors is not None
return int(self.regressors.shape[1])
def simulate(
self,
params: Union[NDArray, Sequence[float]],
nobs: int,
burn: int = 500,
initial_value: Optional[Union[float, NDArray]] = None,
x: Optional[ArrayLike] = None,
initial_value_vol: Optional[Union[float, NDArray]] = None,
) -> DataFrame:
"""
Simulates data from a linear regression, AR or HAR models
Parameters
----------
params : ndarray
Parameters to use when simulating the model. Parameter order is
[mean volatility distribution] where the parameters of the | |
# Copyright 2021 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
#
# Aggregation RSSAC plots
import textwrap
import grafanalib.core as GCore
import grafanacommon as GCommon
def dash(myuid, agginfo, nodesel, **kwargs):
return GCommon.Dashboard(
title = "RSSAC Reporting",
tags = [
agginfo['graph_tag']
],
uid = myuid,
rows = [
GCore.Row(
panels = [
GCommon.QPMGraph(
title = 'RCODE volume',
targets = [
GCommon.ClickHouseTarget(
database = agginfo['database'],
table = 'Responses' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT t, groupArray((ResponseRcode, qc))
FROM
(
SELECT
t,ResponseRcode,60*cnt/{interval_divisor} AS qc
FROM
(
SELECT
$timeSeries AS t,
ResponseRcodeMap.ResponseRcode AS ResponseRcode,
sum(toUInt64(ResponseRcodeMap.Count)) AS cnt
FROM $table
ARRAY JOIN ResponseRcodeMap
WHERE $timeFilter AND NodeID IN {nodesel}
GROUP BY t, ResponseRcode
ORDER BY t, ResponseRcode
)
GROUP BY t, ResponseRcode, cnt
ORDER BY t, ResponseRcode
)
GROUP BY t
ORDER BY t""".format(
interval_divisor=agginfo['interval_divisor'],
nodesel=nodesel)),
refId = 'A'
),
],
),
],
),
GCore.Row(
panels = [
GCommon.BarChart(
title = 'DNS UDP Query size for sizes below 1000 bytes',
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = False,
xaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
axrange = [0, 1000],
tick0 = 16,
dtick = 16,
tickangle = -45,
tickmargin = 40,
title = 'Message size (bytes)',
),
yaxis = GCommon.BarChartAxis(
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
dtick = 16,
rangemode = GCommon.BAR_CHART_AXIS_RANGEMODE_TOZERO,
title = 'Messages per minute',
),
),
traces = [
GCommon.BarChartTrace(
name = 'UDP Query',
color = '#1F60C4',
x = 'UDPQueryLen',
y = 'UDPQueryCnt',
text = 'UDPQueryCnt',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryResponseLength' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Len + intDiv({bucketsize}, 2) AS UDPQueryLen,
60 * sum(Cnt)/($to - $from) AS UDPQueryCnt
FROM
(
SELECT
intDiv(QueryLengthMap.Length, {bucketsize})*{bucketsize} AS Len,
sum(toUInt64(QueryLengthMap.Count)) AS Cnt
FROM $table
ARRAY JOIN QueryLengthMap
WHERE $timeFilter
AND TransportTCP=0
AND NodeID IN {nodesel}
AND Len < 1000
GROUP BY Len
UNION ALL
(
SELECT
CAST(number*{bucketsize} AS UInt16) AS Len,
CAST(0 AS UInt64) AS Cnt
FROM system.numbers
WHERE number > 0 LIMIT {bucketlen}
)
)
GROUP BY Len""".format(
nodesel=nodesel,
bucketsize=16, bucketlen=1000//16)),
refId = 'A'
),
],
),
],
),
GCore.Row(
panels = [
GCommon.BarChart(
title = 'DNS TCP Query size for sizes below 1000 bytes',
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = False,
xaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
axrange = [0, 1000],
tick0 = 16,
dtick = 16,
tickangle = -45,
tickmargin = 40,
title = 'Message size (bytes)',
),
yaxis = GCommon.BarChartAxis(
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
dtick = 16,
rangemode = GCommon.BAR_CHART_AXIS_RANGEMODE_TOZERO,
title = 'Messages per minute',
),
),
traces = [
GCommon.BarChartTrace(
name = 'TCP Query',
color = '#1F60C4',
x = 'TCPQueryLen',
y = 'TCPQueryCnt',
text = 'TCPQueryCnt',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryResponseLength' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Len + intDiv({bucketsize}, 2) AS TCPQueryLen,
60 * sum(Cnt)/($to - $from) AS TCPQueryCnt
FROM
(
SELECT
intDiv(QueryLengthMap.Length, {bucketsize})*{bucketsize} AS Len,
sum(toUInt64(QueryLengthMap.Count)) AS Cnt
FROM $table
ARRAY JOIN QueryLengthMap
WHERE $timeFilter
AND TransportTCP=1
AND NodeID IN {nodesel}
AND Len < 1000
GROUP BY Len
UNION ALL
(
SELECT
CAST(number*{bucketsize} AS UInt16) AS Len,
CAST(0 AS UInt64) AS Cnt
FROM system.numbers
WHERE number > 0 LIMIT {bucketlen}
)
)
GROUP BY Len""".format(
nodesel=nodesel,
bucketsize=16, bucketlen=1000//16)),
refId = 'A'
),
],
),
],
),
GCore.Row(
panels = [
GCommon.BarChart(
title = 'DNS UDP Response size for sizes below 1000 bytes',
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = False,
xaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
axrange = [0, 1000],
tick0 = 16,
dtick = 16,
tickangle = -45,
tickmargin = 40,
title = 'Message size (bytes)',
),
yaxis = GCommon.BarChartAxis(
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
dtick = 16,
rangemode = GCommon.BAR_CHART_AXIS_RANGEMODE_TOZERO,
title = 'Messages per minute',
),
),
traces = [
GCommon.BarChartTrace(
name = 'UDP Response',
color = '#1F60C4',
x = 'UDPResponseLen',
y = 'UDPResponseCnt',
text = 'UDPResponseCnt',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryResponseLength' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Len + intDiv({bucketsize}, 2) AS UDPResponseLen,
60 * sum(Cnt)/($to - $from) AS UDPResponseCnt
FROM
(
SELECT
intDiv(ResponseLengthMap.Length, {bucketsize})*{bucketsize} AS Len,
sum(toUInt64(ResponseLengthMap.Count)) AS Cnt
FROM $table
ARRAY JOIN ResponseLengthMap
WHERE $timeFilter
AND TransportTCP=0
AND NodeID IN {nodesel}
AND Len < 1000
GROUP BY Len
UNION ALL
(
SELECT
CAST(number*{bucketsize} AS UInt16) AS Len,
CAST(0 AS UInt64) AS Cnt
FROM system.numbers
WHERE number > 0 LIMIT {bucketlen}
)
)
GROUP BY Len""".format(
nodesel=nodesel,
bucketsize=16, bucketlen=1000//16)),
refId = 'A'
),
],
),
],
),
GCore.Row(
panels = [
GCommon.BarChart(
title = 'DNS TCP Response size for sizes below 1000 bytes',
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = False,
xaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
axrange = [0, 1000],
tick0 = 16,
dtick = 16,
tickangle = -45,
tickmargin = 40,
title = 'Message size (bytes)',
),
yaxis = GCommon.BarChartAxis(
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
dtick = 16,
rangemode = GCommon.BAR_CHART_AXIS_RANGEMODE_TOZERO,
title = 'Messages per minute',
),
),
traces = [
GCommon.BarChartTrace(
name = 'TCP Response',
color = '#1F60C4',
x = 'TCPResponseLen',
y = 'TCPResponseCnt',
text = 'TCPResponseCnt',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryResponseLength' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Len + intDiv({bucketsize}, 2) AS TCPResponseLen,
60 * sum(Cnt)/($to - $from) AS TCPResponseCnt
FROM
(
SELECT
intDiv(ResponseLengthMap.Length, {bucketsize})*{bucketsize} AS Len,
sum(toUInt64(ResponseLengthMap.Count)) AS Cnt
FROM $table
ARRAY JOIN ResponseLengthMap
WHERE $timeFilter
AND TransportTCP=1
AND NodeID IN {nodesel}
AND Len < 1000
GROUP BY Len
UNION ALL
(
SELECT
CAST(number*{bucketsize} AS UInt16) AS Len,
CAST(0 AS UInt64) AS Cnt
FROM system.numbers
WHERE number > 0 LIMIT {bucketlen}
)
)
GROUP BY Len""".format(
nodesel=nodesel,
bucketsize=16, bucketlen=1000//16)),
refId = 'A'
),
],
),
],
),
GCore.Row(
panels = [
GCommon.BarChart(
title = 'DNS UDP Query size for sizes above 1000 bytes',
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = False,
xaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
axrange = [1000, 3000],
tick0 = 1008,
dtick = 32,
tickangle = -90,
tickmargin = 45,
title = 'Message size (bytes)',
),
yaxis = GCommon.BarChartAxis(
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
dtick = 16,
rangemode = GCommon.BAR_CHART_AXIS_RANGEMODE_TOZERO,
title = 'Messages per minute',
),
),
traces = [
GCommon.BarChartTrace(
name = 'UDP Query',
color = '#FFB357',
x = 'UDPQueryLen',
y = 'UDPQueryCnt',
text = 'UDPQueryCnt',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryResponseLength' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Len + intDiv({bucketsize}, 2) AS UDPQueryLen,
60 * sum(Cnt)/($to - $from) AS UDPQueryCnt
FROM
(
SELECT
intDiv(QueryLengthMap.Length, {bucketsize})*{bucketsize} AS Len,
sum(toUInt64(QueryLengthMap.Count)) AS Cnt
FROM $table
ARRAY JOIN QueryLengthMap
WHERE $timeFilter
AND TransportTCP=0
AND NodeID IN {nodesel}
AND Len >= 1000
GROUP BY Len
UNION ALL
(
SELECT
CAST((number + intDiv(1000, {bucketsize}))*{bucketsize} AS UInt16) AS Len,
CAST(0 AS UInt64) AS Cnt
FROM system.numbers
WHERE number > 0 LIMIT {bucketlen}
)
)
GROUP BY Len""".format(
nodesel=nodesel,
bucketsize=16, bucketlen=2000//16)),
refId = 'A'
),
],
),
],
),
GCore.Row(
panels = [
GCommon.BarChart(
title = 'DNS TCP Query size for sizes above 1000 bytes',
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = False,
xaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
axrange = [1000, 3000],
tick0 = 1008,
dtick = 32,
tickangle = -90,
tickmargin = 45,
title = 'Message size (bytes)',
),
yaxis = GCommon.BarChartAxis(
axtype = GCommon.BAR_CHART_AXIS_TYPE_LINEAR,
dtick = 16,
rangemode = GCommon.BAR_CHART_AXIS_RANGEMODE_TOZERO,
title = 'Messages per minute',
),
),
traces = [
GCommon.BarChartTrace(
name = 'TCP Query',
color = '#FFB357',
x = 'TCPQueryLen',
y = 'TCPQueryCnt',
text = 'TCPQueryCnt',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryResponseLength' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Len | |
proofctx):
# Note that if var is a proof dummy variable, then
# proofctx.fvvarmap.get(var, None) is just None and
# check_free_in() will return False unless var occurs _explicitly_
# free in term.
return self.check_free_in(var, term, proofctx.fvvarmap.get(var, None))
# match templ, which is an expression in the variable space of the
# assertion being applied, against exp, an expression in the variable
# space of the current proof, extending dictionary env, which maps from
# the variables in the template space to expressions in the current proof
def match(self, templ, exp, env):
if type(templ) == type('var'):
if env.has_key(templ):
if exp != env[templ]:
# todo: more debug detail
raise VerifyError('Unification error')
else:
# Note, we check elsewhere if a binding variable is matched
# against a non-binding-variable term.
env[templ] = exp
elif type(templ) == type([]):
if type(exp) != type([]):
raise VerifyError('Unification error, expected ' + sexp_to_string(templ) + ' got ' + exp)
if templ[0] != exp[0]:
raise VerifyError('Unification error, expected ' + templ[0] + ' got ' + exp[0])
# todo: next check should never trigger, I think all terms
# given to match are well-formed.
if len(exp) != len(templ):
raise VerifyError('Term ' + templ[0] + ' expects arity ' +
str(len(templ)) + ' got ' + str(len(exp)))
for i in range(1, len(templ)):
self.match(templ[i], exp[i], env)
def apply_subst(self, templ, env):
if type(templ) == type('var'):
return env[templ]
elif type(templ) == type([]):
return [templ[0]] + [self.apply_subst(el, env) for el in templ[1:]]
def term_common(kindname, sig, freespecs, kinds, terms, vars):
""" term parsing support for 'term' and 'defthm' commands """
if type(sig) != type([]) or len(sig) < 1 or type(sig[0]) != type('term'):
raise SyntaxError(\
'A term signature must be a list starting with a term symbol.')
try:
kind = kinds[kindname]
except KeyError:
raise VerifyError('Unknown kind ' + kindname)
except TypeError:
raise SyntaxError('A term kind name must be an atom.')
termname = sig[0]
if termname in terms:
raise VerifyError('A term ' + termname + ' already exists.')
argkinds = []
# freemap will be a list whose length is the number of arguments
# of the term. If freemap[i] < 0, then argument i is a term variable
# argument. If freemap[i] >= 0, then argument i is a binding variable
# and freemap[i] is a bitmap value. For 0 <= j < len(freemap), the jth
# bit in freemap[i] is 1 if, in a term expression built from the term,
# the actual binding variable argument i occurs free in the term expression
# if it occurs free in the actual argument expression substituted for
# argument j.
invmap = {}
args = sig[1:]
nargs = len(args)
freemap = [-1]*nargs
for i in xrange(nargs):
v = args[i]
try:
var = vars[v]
except KeyError:
raise VerifyError('Unknown variable ' + v)
except TypeError:
raise SyntaxError('Term formal argument must be variable')
if v in invmap:
raise VerifyError('Formal argument ' + v + ' reused')
invmap[v] = i
argkinds.append(var[1]) # kinds[var[1]] ?
if var[0] == 'var':
freemap[i] = 0 # empty bitmap
elif var[0] != 'tvar': # might be 'stmt' or 'thm' in defthm case
raise VerifyError('Term formal argument must be a variable.')
if freespecs is None:
return (kind, argkinds, freemap)
for freespec in freespecs:
if type(freespec) != type([]) or len(freespec) < 2:
raise SyntaxError('Each free variable map must be a list of at least two variables.')
try:
bvix = invmap[freespec[0]]
except KeyError:
raise VerifyError(freespec[0] +
' is not a formal argument variable')
except (IndexError, TypeError):
raise SyntaxError('A free variable specification must be a list of formal argument variables, the first of which is a binding variable')
bmap = freemap[bvix]
if bmap < 0:
raise VerifyError(freespec[0] +
' is not a binding variable argument')
if bmap != 0:
raise VerifyError('More than one freespec for ' + freespec[0])
for x in freespec[1:]:
try:
ix = invmap[x]
except TypeError:
raise SyntaxError('Expected a variable, found ' +
sexp_to_string(x) +
' in free variable specification list')
except KeyError:
raise VerifyError('Expected an argument variable, found ' +
x + ' in free variable specification list ')
po2 = (1 << ix)
if (bmap & po2) != 0:
# Might as well be strict here...
raise VerifyError(\
'Duplicate argument variable listed in freespec for ' +
freespec[0])
bmap = bmap | po2
freemap[bvix] = bmap
return (kind, argkinds, freemap)
def invertible_match(newexp, origexp, env, inv):
if type(newexp) == type('var'):
if type(origexp) != type('var'):
return False
v = env.get(newexp, None)
if v != None:
return (v == origexp)
env[newexp] = origexp
if origexp in inv:
return False
inv[origexp] = newexp
return True
if type(origexp) != type([]):
return False
# Note, we know that the arities are equal since this function is
# called only with well-formed expressions. However, for robustness:
n = len(newexp)
if n != len(origexp) or n == 0:
return False
if newexp[0] != origexp[0]:
return False
i = 1
for ne in newexp[1:]:
if not invertible_match(ne, origexp[i], env, inv):
return False
i = i + 1
return True
class InterfaceCtx:
def __init__(self, name, verify, prefix, params, sort="import"):
self.name = name
self.verify = verify
self.prefix = prefix
self.params = params
self.sort = sort
self.used_params = {}
self.vars = {}
# self.myterm holds the terms introduced by this import context.
# self.terms is the larger collection of terms visible at the
# current point in this import context. It contains also terms
# made available via param commands.
self.terms = {}
self.myterms = {}
# self.mykinds is the collection of kinds introduced by the import
# context itself. self.kinds is the larger collection of kinds
# available to the import context(at the current point). It includes
# also kinds made available via param commands.
self.kinds = {}
self.mykinds = {}
self.error_handler = None
def get_kind(self, rawkind):
try:
return self.kinds[rawkind]
except KeyError:
raise VerifyError('Kind ' + rawkind + ' is not known in ' + \
self.sort + ' context.')
except TypeError:
raise SyntaxError ('A kind name must be a string.')
def kind_cmd_common(self, arg):
if type(arg) != type([]) or len(arg) != 1:
raise VerifyError('kind command takes one arg')
kname = arg[0]
if type(kname) != type('str'):
raise VerifyError('kind argument must be string')
if kname in self.kinds:
raise VerifyError('A kind ' + kname +
' is already visible in the current ' + self.sort +
' export context.')
kprefixed = self.prefix + kname
self.kinds[kname] = kprefixed
self.mykinds[kname] = kprefixed
return kprefixed
def var_cmd(self, cmd, arg):
if not isinstance(arg, list) or len(arg) < 1:
raise SyntaxError('Expected ' + cmd + ' (KIND VAR ...)')
kind = self.get_kind(arg[0])
for v in arg[1:]:
if type(v) != type('var'):
raise SyntaxError('Variable names must be atoms.')
if v in self.vars:
raise VerifyError('Variable ' + v + ' already defined')
self.vars[v] = (cmd, kind, v)
def param_cmd(self, arg):
# param (IFACE IGNORED_URL (PARAM ...) PREFIX)
if type(arg) != type([]) or len(arg) != 4:
raise SyntaxError( \
'Expected param (IFACE IGNORED_URL (PARAM ...) PREFIX)')
ifname = arg[0]
url = arg[1] # Unused except to check it is an atom
paramnames = arg[2]
prefix = arg[3]
if type(ifname) != type('ifname') or \
type(url) != type('url') or \
type(paramnames) != type([]) or \
type(prefix) != type('prefix'):
raise SyntaxError( \
'Expected param (IFACE IGNORED_URL (PARAM ...) PREFIX)')
if len(prefix) < 2 or prefix[0] != '"' or prefix[-1] != '"':
raise SyntaxError('Namespace prefix must be enclosed in quotes')
prefix = prefix[1:-1]
if ifname in self.used_params:
raise VerifyError('Interface parameter ' + ifname + \
' was already used.')
n = len(self.used_params)
try:
p = self.params[n]
except IndexError:
raise VerifyError(\
"More param commands than provided interface parameters")
subparams = []
for pn in paramnames:
try:
subparams.append(self.used_params[pn])
except KeyError:
raise VerifyError('Unknown interface parameter name ' + pn)
except TypeError:
raise SyntaxError('param parameter must be interface name')
# note, this check also checks distinctness of subparams
if subparams != p.params:
raise VerifyError('Context ' + self.name + \
' changes | |
<filename>repcap.py
#repcap.py
#version 1.0
#Python 3
#Requires scapy - tested/developed with 2.4.3
#Project to allow users to modify packet captures and replay them
#Author: <NAME> - <EMAIL>
# Command Line Tool that takes in arguments
# Current version supports 5 modes: passed to tool via -m command
# 0 - the ability to identify IP and MAC addresses in a packet capture - default when no mode provided
# 1 - the ability to replay a packet capture unaltered - must include interface to send it
# 2 - the ability to read in a packet capture and rewrite the ips and create a new packet capture file (.pcap)
# 3 - the ability to read and rewrite ( same as 2 ) and then replay the capture ( with interface presented )
# 4 - a walk through to create a configuration file that will be used by modes 1 - 3
import logging, sys, getopt, os.path
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
infile = 'none'
interface = 'none'
outputfile = 'none'
configfile = 'none'
mode = 0
#Supported file types - pcap and pcapng today
ALLOWED_EXTENSIONS = {'pcap', 'pcapng'}
def allowed_file(filename):
# Quick function to validate file type / extension
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def mainargs(argv):
# Take arguments from command and assign to runtime parameters
global cwd, interface, mode, configfile, outputfile
try:
opts, args = getopt.getopt(argv, "hm:i:c:o:", ["mode=","interface=","config=","outfile="])
except getopt.GetoptError:
printhelp()
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt in ("-m", "--mode"):
mode = arg
elif opt in ("-i", "--interface"):
interface = arg
elif opt in ("-c", "--config"):
configfile = arg
elif opt in ("-o", "--outfile"):
outputfile = arg
#print("Selected Mode is ",mode)
#print(argv[len(argv)-1])
def printhelp():
# Function to print the help information when -h is invoked or incorrect commands provided
print("_________________________________________________")
print("usage: repcap.py <options> inputfile")
print("inputfile must be .pcap or .pcapng")
print("example: repcap.py example1.pcap - will list unique ip and mac addresses in example1.pcap")
print("example: repcap.py -m 1 -i en0 example1.pcap - will replay example1.pcap unaltered out interface en0")
print("example: repcap.py -m 4 example1.pcap - will guide through creating a config file and output example1.config")
print("_________________________________________________")
print("options:")
print("-m mode see mode section below")
print("-i target interface - used for packet replay - name of interface")
print("-c configuration file - if not provided will look for default which is based on name of provided pcap <name of sourcefile>.config")
print("-o output file - if not provided will use name of <name of sourcefile>new.pcap")
print("_________________________________________________")
print("modes:")
print("1 = replay packet capture unaltered - must include -i option for interface")
print("2 = read config file and rewrite IPs from source packet capture and create new file")
print("3 = read config file and rewrite IPs from source packet capture, create new file and replay - must include -i option for interface")
print("4 = guided mode for packet rewrite - will create .config text file")
def findUniqueIP(pcap):
# Function to identify all the unique source / destination IP and MAC addresses in a packet capture ( and sort )
print("Loading Packet Capture")
uniqueip = {}
uniquesortedip = {}
packets = rdpcap(pcap)
for p in packets:
if p.haslayer(IP):
if p[IP].src not in uniqueip:
uniqueip[str(p[IP].src)] = str(p[Ether].src)
elif p[IP].dst not in uniqueip:
uniqueip[str(p[IP].dst)] = str(p[Ether].dst)
for ip in sorted(uniqueip.keys(), key = lambda ip: (int(ip.split(".")[0]), int(ip.split(".")[1]), int(ip.split(".")[2]), int(ip.split(".")[3]))):
uniquesortedip[ip] = uniqueip[ip]
return uniquesortedip
def replaypackets(pcap, inter):
# Function to replay a packet capture - used for mode 1 and 3
print("Reading Packet Capture ")
packets = rdpcap(pcap)
print("Starting Packet Replay")
sendp(packets, iface=str(inter), verbose=False)
print("Packet Replay Complete")
def validateipformat(ip):
# Function to validate IP address is in the right format
myregex = "^((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])(\.(?!$)|$)){4}$"
return re.fullmatch(myregex, ip, flags=0)
def validatemacformat(mac):
# Function to validate MAC address is in the right format
myregex = "^([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})$"
return re.fullmatch(myregex, mac, flags=0)
def configbreakdown(configurationfile):
# Function to read in a configuration file and pull out the IP and MAC changes in the file
macdict = {}
ipdict = {}
cf = open(configurationfile, 'r')
for line in cf:
nowhite = line.strip()
if nowhite.split(',')[0] == "ip":
if nowhite.split(',')[2] == "Del":
print("Deleting " + nowhite.split(',')[1])
elif validateipformat(nowhite.split(',')[2]) and validateipformat(nowhite.split(',')[1]):
ipdict[(nowhite.split(',')[1])] = nowhite.split(',')[2]
else:
print("Configuration File Formatting Error, please go through mode 4 to create a new file")
print(nowhite)
sys.exit()
elif nowhite.split(',')[0] == "mac":
if validatemacformat(nowhite.split(',')[1]) and validatemacformat(nowhite.split(',')[2]):
macdict[(nowhite.split(',')[1])] = nowhite.split(',')[2]
else:
print("Configuration File Formatting Error, please go through mode 4 to create new file")
print(nowhite)
sys.exit()
else:
print("Configuration File Formatting Error, please go through mode 4 to create new file")
print(nowhite)
sys.exit()
return macdict, ipdict
def namebreakdown(inputpcapname):
# Function to break down a file and get the name and extension as a list
pcapfilenamepath = inputpcapname.strip()
pcapfilenamepath = pcapfilenamepath.split("/")
pcapfilenamepath = pcapfilenamepath[len(pcapfilenamepath) - 1]
pcapfilenamepath = pcapfilenamepath.split(".")
return pcapfilenamepath
def packetrewrite(macdict, ipdict, pcap):
# Function that takes in MAC address and IP Dictionary to rewrite existing IP and MAC to new ones
# Checks if the output file already exists and checks prior to over writing the file
# output file name may be user provided or auto generated based on name of input file
global outputfile
loop1 = False
if outputfile == 'none':
tempname = namebreakdown(pcap)
outputfile = tempname[0] + "new." + tempname[1]
if os.path.isfile(outputfile):
print("_________________________________________________")
loop1 = True
while loop1 == True:
docontinue = input(outputfile + " already exists! This action will overwrite existing file. enter Y to proceed or N to cancel:")
if str(docontinue) == "N":
loop1 = False
sys.exit()
elif str(docontinue) == "Y":
loop1 = False
else:
print(outputfile + " not found, creating new file")
print("Reading in Packets from " + pcap)
packets = rdpcap(pcap)
print("Re-writing Packets")
for p in packets:
if p.haslayer(IP):
del p[IP].chksum
if p[IP].src in ipdict:
p[IP].src = ipdict[p[IP].src]
if p[IP].dst in ipdict:
p[IP].dst = ipdict[p[IP].dst]
if p[Ether].src in macdict:
p[Ether].src = macdict[p[Ether].src]
if p[Ether].dst in macdict:
p[Ether].dst = macdict[p[Ether].dst]
if p.haslayer(TCP):
del p[TCP].chksum
wrpcap(outputfile, packets)
print("Packets rewritten to " + outputfile)
def modezero():
# Mode 0 Function - default if no mode provided in command - print list of unique IP and MAC addresses
uniqueip = findUniqueIP(infile)
print("Unique IP and MAC addresses in Source PCAP: ")
for ip, mac in uniqueip.items():
print(ip + " " + mac)
sys.exit()
def modeone():
# Mode 1 Function - replay provided packet capture without any changes
global cwd, interface, mode, configfile, outputfile
if interface == 'none':
print("Error: Interface must be provided with Mode 1 command. See 'repcap.py -h' for command help")
sys.exit()
else:
replaypackets(infile, interface)
def modetwo():
# Mode 2 Function - does a packet capture rewrite and outputs to a new file
# Takes in a configuration file and a source packet capture. User can provide a name for the new packet capture
# or will create a packet capture based on the source file name ( with new )
global cwd, interface, mode, configfile, outputfile, infile
cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
resultdict = {}
if configfile == 'none':
tempname = namebreakdown(infile)
cfname = cwd + '/' + tempname[0] + ".config"
else:
cfname = configfile
if os.path.isfile(cfname):
print(cfname + " found - using for configuration")
resultdict = configbreakdown(cfname)
else:
print(cfname + " not found and no configuration file provided. See 'repcap.py -h' for command help")
sys.exit()
packetrewrite(resultdict[0], resultdict[1], infile)
def modethree():
# Mode 3 Function - does a packet capture rewrite and outputs to a new file and replays that new file
# Takes in a configuration file and a source packet capture. User can provide configuration file or looks for default
# Default Configuration file name is source file name with .config extension
# User can provide a name for the new packet capture
# or will create a packet capture based on the source file name ( with new )
global cwd, interface, mode, configfile, outputfile, infile
cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
if interface == 'none':
print("Error: Interface must be provided with Mode 3 command. See 'repcap.py -h' for command help")
sys.exit()
elif configfile == 'none':
tempname = namebreakdown(infile)
cfname = cwd + '/' + tempname[0] + ".config"
else:
cfname = configfile
if os.path.isfile(cfname):
print(cfname + " found - using for configuration")
resultdict = configbreakdown(cfname)
else:
print(cfname + " not found and no configuration file provided. See 'repcap.py -h' for command help")
sys.exit()
packetrewrite(resultdict[0], resultdict[1], infile)
replaypackets(infile, interface)
def modefour():
# Mode 4 Function - walks users through existing IP Addresses and MAC addresses in provided packet capture
# User can choose to maintain the same IP/MAC, Change the IP and/or Change the MAC Address or delete the host from the capture
# User can provide configuration file name or will use the default
# If file exists, check user wants to overwrite existing file
# If user chooses to delete an IP Address, the paired MAC is removed as well - so doesn't prompt for details of that
# User can't choose to delete a MAC address
global cwd, infile, configfile
loop1 = False
skipmac = False
cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
if configfile == 'none':
tempname = namebreakdown(infile)
cfname = cwd + '/' + tempname[0] + ".config"
else:
cfname = configfile
if os.path.isfile(cfname):
print("_________________________________________________")
loop1 = True
while loop1 == True:
docontinue = input(cfname + " already exists! This action will overwrite existing file. enter Y to proceed or N to cancel:")
if str(docontinue) == "N":
loop1 = | |
<filename>src/python/live_plotting.py<gh_stars>0
#!/usr/bin/env python
DEF_RES_PATH = "/home/dimitar/projects/STT_theories/results/APR4"
PREVIUS_FILE_BETA = 0
PREVIUS_FILE_M = 0
PREVIUS_FILE_LAMBDA = 0
#~ DEF_RES_PATH_PREV = "/home/dimitar/projects/STT_theories/results/" \
#~ + "Results_Statia_1/" \
#~ + "beta{:.0f}/".format(PREVIUS_FILE_BETA) \
#~ + "lambda{:.0e}".format(PREVIUS_FILE_LAMBDA)
#~ DEF_RES_FNAME_PREV = "STT_phiScal_J_AkmalPR_" \
#~ + "beta{:.3e}_".format(PREVIUS_FILE_BETA) \
#~ + "m{:.3e}_".format(PREVIUS_FILE_M) \
#~ + "lambda{:.3e}".format(PREVIUS_FILE_LAMBDA)
DEF_RES_PATH_PREV = "/home/dimitar/projects/STT_theories/results/APR4" \
+ "" \
+ "" \
+ ""
DEF_RES_FNAME_PREV = "STT_phiScal_J_APR4_" \
+ "beta{:.3e}_".format(PREVIUS_FILE_BETA) \
+ "m{:.3e}_".format(PREVIUS_FILE_M) \
+ "lambda{:.3e}".format(PREVIUS_FILE_LAMBDA)
_kalin_beta = "beta-6"
DEF_RES_KALIN = "/home/dimitar/Documents/Teaching_Materials/University/" \
+ "Uvod.Fizika.Cherni.Dupki/Doktorant/Moi_Statii_Prezentacii/Statia_3/Kalin_data/" \
+ _kalin_beta + "/"
KALIN_BETA = _kalin_beta
KALIN_LAMBDA = "lambda{}_".format(PREVIUS_FILE_LAMBDA)
KALIN_M = "m{}".format(PREVIUS_FILE_M)
def _check_if_StrNotBlank(string):
"""
check if a sting is blank/empty
Parameters
----------
Returns
-------
: boolean
True if string is not blank/empty
False if string is blank/empty
"""
return bool(string and string.strip())
def _get_latest_file(
path = DEF_RES_PATH,
model = "STT_phiScal_J_"
):
"""
return the name of the latest modified file in path
by creating wildcard of the type model + "*"
the default values are where the results are plus presume that we will
plot the result file only
Parameters
----------
path: string
the path to the directory of interest
model: string
how the file name looks like, it serves as wildcard
of the type model + "*"
Returns
-------
: string
the name of latest modified file
"""
import glob
import os
if (_check_if_StrNotBlank(path) and
_check_if_StrNotBlank(model)):
filename = max(
glob.glob(path + "/" + model + "*"),
key=os.path.getctime
)
filename = filename.split("/")[-1]
#~ print(
#~ "\n latest file in \n\t {} is: {} \n".format(
#~ path, filename
#~ )
#~ )
else:
filename = ""
print("\n path and name not set, some filename var!!! \n")
return filename
def _units_coef_clac():
"""
Calculates the unit coefficients
Parameters
----------
Returns
-------
: dictionary
"density" in (double) g cm^-3
"pressure" in (double) dyns cm^-3
"r" in (double) km
"j" in (double) m^2 kg
"""
# mas of sun in kg
const_msun = 1.9891e30
# gravitational const in m^3kg^-1s^-2
const_g = 6.67384e-11
# speed of light in ms^-1
const_c = 299792458
units = {}
# units of density in g cm^-3
units["density"] = 1e-3 * const_c**6 / (const_g**3 * const_msun**2)
# units of pressure in dyns cm^-3
units["pressure"] = const_c**8 / (const_g**3 * const_msun**2) * 10
# units of rad coordinate in km
units["rad"] = 1e-3 * const_g * const_msun / const_c**2
# units of moment of inertia
units["j"] = 1e7 * const_g**2 * const_msun**3 / const_c**4
return units
def _load_kalin_file(
data=[], headline=[], label=[],
path=DEF_RES_KALIN,
filename=KALIN_BETA+KALIN_LAMBDA+KALIN_M+".txt"
):
"""
load kalin result file with name <filename> by appending the data
after taking care of coefficients to <data> and saving the <headlines>(
what is the name of each column) and label - the filename itself
Parameters
----------
path: string
the full path of the directory where the file is
filename: string
the name of the file itself
data: list
where to append the data of interest
headline: list
what each column is
label: list
the name the given data, expect it will be the filename
Returns
-------
"""
import os.path
all_data = []
if os.path.isfile(path+filename):
with open(path + filename, "r") as f:
all_data = f.readlines()
else:
print(
"from where to load a kalin file!!! {}{} pass ".format(
path, filename
)
)
pass
#~ get rid of inline text
all_data = [
line.strip() for line in all_data
if "lambda" not in line and "f_rot" not in line and line.strip()
]
#~ i am not interested in all data, so only the indexes here
#~ will be saved
#~ since in some files the central pressure is in different column
#~ we check it and take into account
indexes = [3, 5, 7, 10, 6, 2]
_units = _units_coef_clac()
units = [
_units["density"], _units["rad"],
1, 1, 1, 1
]
data.append([[] for i in indexes])
for cnt, line in enumerate(all_data):
for i, u, d in zip(indexes, units, data[-1]):
try:
d.append(float(line.split(" ")[i]) / u)
except ValueError:
print(
"\n ValueError: line {}: {} \n".format(
cnt, line
)
)
break
for i, val in enumerate(data[-1][4]):
data[-1][4][i] *= (-1) if val > 0 else 1
label.append(filename)
return { "data": data[-1], "label": label[-1] }
def _load_GR(
data = [],headline = [], label = [], path = DEF_RES_PATH, filename = ""
):
"""
load the data from <filename> in <path> by appending them to
<data>, append the headline (the name of each column) in
<headline> and the name of the file in <label>
Parameters
----------
path: string
the path to the directory containing the file with data
filename: string
the name of the file which will be loaded
data: list
the loaded data will be appended here
headline: list
the name of each column will be appended here as a list
label: list
the name of the file will be apended here
Returns
-------
: dictionary
dictionary with entries
{ "data" : data, "headline": headline, "label": label }
"""
data.clear()
headline.clear()
label.clear()
if not(_check_if_StrNotBlank(path) and
_check_if_StrNotBlank(filename)):
print(
"\n from where to load a file!!! \n\t {} /{} \n".format(
path, filename
)
)
with open(path + "/" + filename, "r") as f:
all_data = f.readlines()
headline.append(
[
i.strip() for i in
all_data.pop(0).strip().split(" ")
if
"#" not in i and
len(i.strip())
]
)
label.append(filename.split("/")[-1][-41:])
data.append(
[
[] for i in all_data[0].strip().split(" ") if len(i.strip())
]
)
for cnt, line in enumerate(all_data):
for apnd, num in zip(
data[-1], [
float(i) for i in line.strip().split(" ") if len(i.strip())
]
):
apnd.append(num)
return { "data": data[-1], "headline": headline[-1], "label": label[-1] }
def _load_previus_data(
data = [],headline = [], label = [],
path = DEF_RES_PATH_PREV,
filename = DEF_RES_FNAME_PREV
):
"""
load the data from <filename> in <path> by appending them to
<data>, append the headline (the name of each column) in
<headline> and the name of the file in <label>
Parameters
----------
path: string
the path to the directory containing the file with data
filename: string
the name of the file which will be loaded
data: list
the loaded data will be appended here
headline: list
the name of each column will be appended here as a list
label: list
the name of the file will be apended here
Returns
-------
: dictionary
dictionary with entries
{ "data" : data, "headline": headline, "label": label }
"""
data.clear()
headline.clear()
label.clear()
if not(_check_if_StrNotBlank(path) and
_check_if_StrNotBlank(filename)):
print(
"\n from where to load a file!!! \n\t {} /{} \n".format(
path, filename
)
)
try:
with open(path + "/" + filename, "r") as f:
all_data = f.readlines()
except FileNotFoundError:
print(path + "/" + filename," MISSING !!! ")
return { }
headline.append(
[
i.strip() for i in
all_data.pop(0).strip().split(" ")
if
"#" not in i and
len(i.strip())
]
)
label.append(filename.split("/")[-1][-41:])
data.append(
[
[] for i in all_data[0].strip().split(" ") if len(i.strip())
]
)
for cnt, line in enumerate(all_data):
for apnd, num in zip(
data[-1], [
float(i) for i in line.strip().split(" ") if len(i.strip())
]
):
apnd.append(num)
return { "data": data[-1], "headline": headline[-1], "label": label[-1] }
def _load_file(
filename = "", data = [], headline = [], label = [], path = DEF_RES_PATH
):
"""
load the data from <filename> in <path> by appending them to
<data>, append the headline (the name of each column) in
<headline> and the name of the file in <label>
Parameters
----------
path: string
the path to the directory containing the file with data
filename: string
the name of the file which will be loaded
data: list
the loaded data will be appended here
headline: list
the name of each column will be appended here as a list
label: list
the name of the file will be apended here
Returns
-------
: dictionary
dictionary with entries
{ "data" : data, "headline": headline, "label": label }
"""
data.clear()
headline.clear()
label.clear()
if not(_check_if_StrNotBlank(path)):
print(
"\n from where to load a file!!! \n\t {} /{} \n".format(
path, filename
)
)
filename = _get_latest_file()
with open(path + "/" + filename, "r") as f:
all_data = f.readlines()
headline.append(
[
i.strip() for i in
all_data.pop(0).strip().split(" ")
if
"#" not in i and
len(i.strip())
]
)
| |
<filename>ShapeStatistics.py
#Purpose: To implement a suite of 3D shape statistics and to use them for point
#cloud classification
#TODO: Fill in all of this code for group assignment 2
import sys
sys.path.append("S3DGLPy")
from Primitives3D import *
from PolyMesh import *
import numpy as np
import matplotlib.pyplot as plt
POINTCLOUD_CLASSES = ['biplane', 'desk_chair', 'dining_chair', 'fighter_jet', 'fish', 'flying_bird', 'guitar', 'handgun', 'head', 'helicopter', 'human', 'human_arms_out', 'potted_plant', 'race_car', 'sedan', 'shelves', 'ship', 'sword', 'table', 'vase']
NUM_PER_CLASS = 10
#########################################################
## UTILITY FUNCTIONS ##
#########################################################
#Purpose: Export a sampled point cloud into the JS interactive point cloud viewer
#Inputs: Ps (3 x N array of points), Ns (3 x N array of estimated normals),
#filename: Output filename
def exportPointCloud(Ps, Ns, filename):
N = Ps.shape[1]
fout = open(filename, "w")
fmtstr = "%g" + " %g"*5 + "\n"
for i in range(N):
fields = np.zeros(6)
fields[0:3] = Ps[:, i]
fields[3:] = Ns[:, i]
fout.write(fmtstr%tuple(fields.flatten().tolist()))
fout.close()
#Purpose: To sample a point cloud, center it on its centroid, and
#then scale all of the points so that the RMS distance to the origin is 1
def samplePointCloud(mesh, N):
(Ps, Ns) = mesh.randomlySamplePoints(N)
##TODO: Center the point cloud on its centroid and normalize
#by its root mean square distance to the origin. Note that this
#does not change the normals at all, only the points, since it's a
#uniform scale
centroid = np.mean(Ps, 1)[:, None] #return 3 by 1
Ps -= centroid;
scale = np.sqrt(np.sum(np.square(Ps))/N)
Ps /= scale;
return (Ps, Ns)
#Purpose: To sample the unit sphere as evenly as possible. The higher
#res is, the more samples are taken on the sphere (in an exponential
#relationship with res). By default, samples 66 points
def getSphereSamples(res = 2):
m = getSphereMesh(1, res)
return m.VPos.T
#Purpose: To compute PCA on a point cloud
#Inputs: X (3 x N array representing a point cloud)
def doPCA(X):
return np.linalg.eigh(X.dot(X.T))
#########################################################
## SHAPE DESCRIPTORS ##
#########################################################
#Purpose: To compute a shape histogram, counting points
#distributed in concentric spherical shells centered at the origin
#Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here
#but passed along for consistency)
#NShells (number of shells), RMax (maximum radius)
#Returns: hist (histogram of length NShells)
def getShapeHistogram(Ps, Ns, NShells, RMax):
H = np.sqrt(np.sum(Ps**2, 0))[None, :] - np.linspace(0, RMax, NShells, False)[:, None]
S = np.sum((H >= 0).reshape(NShells, Ps.shape[1]), 1)
N = np.resize(S[1:], NShells)
N[-1] = np.sum(np.sqrt(np.sum(Ps**2, 0)) > RMax)
return S-N
#Purpose: To create shape histogram with concentric spherical shells and
#sectors within each shell, sorted in decreasing order of number of points
#Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here
#but passed along for consistency), NShells (number of shells),
#RMax (maximum radius), SPoints: A 3 x S array of points sampled evenly on
#the unit sphere (get these with the function "getSphereSamples")
def getShapeShellHistogram(Ps, Ns, NShells, RMax, SPoints):
NSectors = SPoints.shape[1] #A number of sectors equal to the number of
#points sampled on the sphere
#Create a 2D histogram that is NShells x NSectors
hist = np.zeros((NShells, NSectors))
bins = np.linspace(0, RMax, NShells, False)
indx = np.digitize(np.sqrt(np.sum(np.square(Ps), axis=0)), bins)
for i in range(NShells):
subList = Ps[:, indx == i]
dirList = np.argmax(subList.T.dot(SPoints), axis=1) #across row, size=#SizeOfShell
count = np.bincount(dirList)
hist[i, :count.shape[0]] = np.sort(count)[::-1] #using double slicing to reverse the sort order
return hist.flatten() #Flatten the 2D histogram to a 1D array
#alternative approach
# raw = SPoints.T.dot(Ps)
# sector = np.argmax(raw, axis=0)
# dist = np.sqrt(np.sum(np.square(Ps)))
# combined = zip(sector, dist) #combine two list into tuples
# sorted(combined, key=lambda x: x[0]) #sort the list according to dist value
#Purpose: To create shape histogram with concentric spherical shells and to
#compute the PCA eigenvalues in each shell
#Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here
#but passed along for consistency), NShells (number of shells),
#RMax (maximum radius), sphereRes: An integer specifying points on thes phere
#to be used to cluster shells
def getShapeHistogramPCA(Ps, Ns, NShells, RMax):
#Create a 2D histogram, with 3 eigenvalues for each shell
hist = np.zeros((NShells, 3))
bins = np.linspace(0, RMax, NShells, False)
indx = np.digitize(np.sqrt(np.sum(np.square(Ps), axis=0)), bins)
for i in range(NShells):
sub = Ps[:, indx == i]
(eigVs, eigVecs) = doPCA(sub)
hist[i, :] = np.sort(eigVs)[::-1] # reverse order, omitting eigs.shape(0)
return hist.flatten() #Flatten the 2D histogram to a 1D array
#Purpose: To create shape histogram of the pairwise Euclidean distances between
#randomly sampled points in the point cloud
#Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here
#but passed along for consistency), DMax (Maximum distance to consider),
#NBins (number of histogram bins), NSamples (number of pairs of points sample
#to compute distances)
def getD2Histogram(Ps, Ns, DMax, NBins, NSamples):
N = Ps.shape[1]
S1 = Ps[:, np.random.random_integers(0, N-1, NSamples)]
S2 = Ps[:, np.random.random_integers(0, N-1, NSamples)]
D2 = np.sqrt(np.sum((S1-S2)**2, 0))
hist, be = np.histogram(D2, NBins, (0, DMax))
return hist
#Purpose: To create shape histogram of the angles between randomly sampled
#triples of points
#Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals) (not needed here
#but passed along for consistency), NBins (number of histogram bins),
#NSamples (number of triples of points sample to compute angles)
def getA3Histogram(Ps, Ns, NBins, NSamples):
N = Ps.shape[1]
S1 = Ps[:, np.random.random_integers(0, N-1, NSamples)]
S2 = Ps[:, np.random.random_integers(0, N-1, NSamples)]
S3 = Ps[:, np.random.random_integers(0, N-1, NSamples)]
V1 = S1 - S2
L1 = np.sqrt(np.sum(V1**2, 0))
V2 = S1 - S3
L2 = np.sqrt(np.sum(V2**2, 0))
valid = (L1 > 0) * (L2 > 0)
V1 = V1[:, valid] / L1[valid]
V2 = V2[:, valid] / L2[valid]
C = np.sum(V1*V2, 0)
D2S = np.sum((V1-V2)**2, 0)
C[D2S == 0] = 1
A3 = np.arccos(C)
hist, be = np.histogram(A3, NBins, (0, np.pi))
return hist
#Purpose: To create the Extended Gaussian Image by binning normals to
#sphere directions after rotating the point cloud to align with its principal axes
#Inputs: Ps (3 x N point cloud) (use to compute PCA), Ns (3 x N array of normals),
#SPoints: A 3 x S array of points sampled evenly on the unit sphere used to
#bin the normals
def getEGIHistogram(Ps, Ns, SPoints):
S = SPoints.shape[1]
hist = np.zeros(S)
##TOOD: Finish this; fill in hist
return hist
#Purpose: To create an image which stores the amalgamation of rotating
#a bunch of planes around the largest principal axis of a point cloud and
#projecting the points on the minor axes onto the image.
#Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals, not needed here),
#NAngles: The number of angles between 0 and 2*pi through which to rotate
#the plane, Extent: The extent of each axis, Dim: The number of pixels along
#each minor axis
def getSpinImage(Ps, Ns, NAngles, Extent, Dim):
#Create an image
eigs, V = doPCA(Ps)
P = V[:, :2].T.dot(Ps)
As = np.linspace(0, 2*np.pi, NAngles, False)
C, S = np.cos(As), np.sin(As)
A = np.zeros((NAngles, 2, 2))
A[:, 0, 0], A[:, 0, 1], A[:, 1, 0], A[:, 1, 1] = C, -S, S, C
P = A.dot(P)
x = P[:, 0, :].flatten()
y = P[:, 1, :].flatten()
hist, xe, ye = np.histogram2d(x, y, Dim, [[-Extent, Extent], [-Extent, Extent]])
return hist.flatten()
#Purpose: To create a histogram of spherical harmonic magnitudes in concentric
#spheres after rasterizing the point cloud to a voxel grid
#Inputs: Ps (3 x N point cloud), Ns (3 x N array of normals, not used here),
#VoxelRes: The number of voxels along each axis (for instance, if 30, then rasterize
#to 30x30x30 voxels), Extent: The number of units along each axis (if 2, then
#rasterize in the box [-1, 1] x [-1, 1] x [-1, 1]), NHarmonics: The number of spherical
#harmonics, NSpheres, the number of concentric spheres to take
def getSphericalHarmonicMagnitudes(Ps, Ns, VoxelRes, Extent, NHarmonics, NSpheres):
hist = np.zeros((NSpheres, NHarmonics))
#TODO: Finish this
return hist.flatten()
#Purpose: Utility function for wrapping around the statistics functions.
#Inputs: PointClouds (a python list of N point clouds), Normals (a python
#list of the N corresponding normals), histFunction (a function
#handle for one of the above functions), *args (addditional arguments
#that the descriptor function needs)
#Returns: AllHists (A KxN matrix of all descriptors, where K is the length
#of each | |
<filename>script/geometry.py<gh_stars>0
import collections, os, sys
import numpy as np
import scipy.integrate
from scipy.special import erf, erfc
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import LinearSegmentedColormap
import noise
matplotlib.rcParams['hatch.linewidth'] = 0.7
fontsize = 11/1.4
latex_preamble = r'''
\usepackage{lmodern}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{mathtools}
\usepackage{siunitx}
\usepackage{slantsc}
\usepackage{graphicx}
\usepackage{abraces}
'''
matplotlib.rcParams.update({
'text.usetex' : True,
'font.family' : 'serif',
'font.serif' : 'cmr10',
'font.size' : fontsize,
'mathtext.fontset' : 'cm',
'axes.formatter.use_mathtext': True,
'text.latex.preamble': latex_preamble,
})
# Specs
Mpc = 1
boxsize = 512*Mpc
domain_subdivisions = (2, 3)
gridsize = 54
shortrange_scale = 1.25*boxsize/gridsize
shortrange_range = 4.5*shortrange_scale
shortrange_tilesize = shortrange_range
tiling = np.asarray(
(boxsize/np.asarray(domain_subdivisions))/shortrange_tilesize*(1 + 1e-16),
dtype=int,
)
subtilings = [(2, 1), (1, 1), (3, 2), (1, 1), (2, 2), (4, 3)]
# Initialize figure
fig, ax = plt.subplots()
# Draw box
lw_box = 1.5
plt.plot([0, boxsize, boxsize, 0, 0], [0, 0, boxsize, boxsize, 0],
'k-', lw=lw_box, zorder=300)
# Draw domains
for dim in range(2):
for i in range(1, domain_subdivisions[dim]):
x, y = [0, boxsize], [boxsize/domain_subdivisions[dim]*i]*2
if dim == 1:
x, y = y, x
plt.plot(x, y, 'k-', lw=1.3, zorder=90)
# Draw PM grid
for i in range(1, gridsize):
plt.plot([0, boxsize], [boxsize/gridsize*i]*2, '-', color=[0.89]*3, lw=0.5, zorder=-100)
plt.plot([boxsize/gridsize*i]*2, [0, boxsize], '-', color=[0.89]*3, lw=0.5, zorder=-100)
# Draw tiling
for dim in range(2):
for i in range(1, tiling[dim]*domain_subdivisions[dim]):
x, y = [0, boxsize], [boxsize/domain_subdivisions[dim]/tiling[dim]*i]*2
if dim == 1:
x, y = y, x
plt.plot(x, y, 'C5-', lw=0.95, zorder=70)
# Draw subtilings
Subtile = collections.namedtuple('Subtile', ['x', 'y', 'width', 'height'])
subtiles = []
domain_size_x = boxsize/domain_subdivisions[1]
domain_size_y = boxsize/domain_subdivisions[0]
tile_size_x = domain_size_x/tiling[1]
tile_size_y = domain_size_y/tiling[0]
n = -1
for i in range(domain_subdivisions[0]):
for j in range(domain_subdivisions[1]):
n += 1
subtiling = subtilings[n]
domain_start_x = domain_size_x*j
domain_start_y = domain_size_y*i
for ii in range(tiling[0]):
for jj in range(tiling[1]):
tile_start_x = domain_start_x + tile_size_x*jj
tile_start_y = domain_start_y + tile_size_y*ii
for iii in range(subtiling[0]):
for jjj in range(subtiling[1]):
subtile = Subtile(
tile_start_x + tile_size_x/subtiling[1]*jjj,
tile_start_y + tile_size_y/subtiling[0]*iii,
tile_size_x/subtiling[1],
tile_size_y/subtiling[0],
)
subtiles.append(subtile)
for iii in range(1, subtiling[0]):
plt.plot(
[tile_start_x, tile_start_x + tile_size_x],
[tile_start_y + tile_size_y/subtiling[0]*iii]*2,
'C1-', lw=0.6, zorder=60,
)
for jjj in range(1, subtiling[1]):
plt.plot(
[tile_start_x + tile_size_x/subtiling[1]*jjj]*2,
[tile_start_y, tile_start_y + tile_size_y],
'C1-', lw=0.6, zorder=60,
)
def generate_subtiles(n):
subtiling = subtilings[n]
subtiles = []
for i in range(domain_subdivisions[0]):
for j in range(domain_subdivisions[1]):
domain_start_x = domain_size_x*j
domain_start_y = domain_size_y*i
for ii in range(tiling[0]):
for jj in range(tiling[1]):
tile_start_x = domain_start_x + tile_size_x*jj
tile_start_y = domain_start_y + tile_size_y*ii
for iii in range(subtiling[0]):
for jjj in range(subtiling[1]):
subtile = Subtile(
tile_start_x + tile_size_x/subtiling[1]*jjj,
tile_start_y + tile_size_y/subtiling[0]*iii,
tile_size_x/subtiling[1],
tile_size_y/subtiling[0],
)
subtiles.append(subtile)
return subtiles
ghostly_subtiles = [generate_subtiles(n) for n in range(len(subtilings))]
# Draw particles
def get_subtile_dist(subtile1, subtile2):
x1 = np.array((subtile1.x, subtile1.x + subtile1.width))
y1 = np.array((subtile1.y, subtile1.y + subtile1.height))
x2 = np.array((subtile2.x, subtile2.x + subtile2.width))
y2 = np.array((subtile2.y, subtile2.y + subtile2.height))
if subtile1.x - subtile2.x > 0.5*boxsize:
x2 += boxsize
elif subtile1.x - subtile2.x < -0.5*boxsize:
x2 -= boxsize
if subtile1.y - subtile2.y > 0.5*boxsize:
y2 += boxsize
elif subtile1.y - subtile2.y < -0.5*boxsize:
y2 -= boxsize
if max(y1) < min(y2):
# 1 fully below 2
dy = max(y1) - min(y2)
elif min(y1) > max(y2):
# 1 fully above 2
dy = min(y1) - max(y2)
else:
# overlap in y-direction
dy = 0
if max(x1) < min(x2):
# 1 fully to the left of 2
dx = max(x1) - min(x2)
elif min(x1) > max(x2):
# 1 fully to the right of 2
dx = min(x1) - max(x2)
else:
# overlap in x-direction
dx = 0
return np.sqrt(dx**2 + dy**2)
theta = np.linspace(0, 2*np.pi, 200, endpoint=False)
N = 8
eps_soft = 0.030*boxsize/np.cbrt(N)
def place_particle(x, y, color='r', hatch='/'*8, place_dot=True):
# Draw particle with radius equalt to softening length
if place_dot:
plt.fill(x + eps_soft*np.cos(theta), y + eps_soft*np.sin(theta), color,
zorder=304, edgecolor='none')
X = x + shortrange_range*np.cos(theta)
Y = y + shortrange_range*np.sin(theta)
def plot(x, y):
if len(x) == 0:
return
diff_avg = np.mean(np.abs(np.diff(x)))
index = -1
for i in range(1, len(x)):
diffx = abs(x[i] - x[i - 1])
if diffx > 10*diff_avg:
index = i
break
diffy = abs(y[i] - y[i - 1])
if diffy > 10*diff_avg:
index = i
break
if index != -1:
x = np.roll(x, -index)
y = np.roll(y, -index)
plt.plot(x, y, '-', color=color, lw=1.3, alpha=0.85, zorder=301)
mask_ok_x = (0 <= X) & (X < boxsize)
mask_ok_y = (0 <= Y) & (Y < boxsize)
mask_left_x = (X < 0)
mask_right_x = (boxsize <= X)
mask_down_y = (Y < 0)
mask_up_y = (boxsize <= Y)
mask = mask_ok_x & mask_ok_y
plot(X[mask], Y[mask])
mask = mask_ok_x & mask_down_y
plot(X[mask], Y[mask] + boxsize)
mask = mask_ok_x & mask_up_y
plot(X[mask], Y[mask] - boxsize)
mask = mask_left_x & mask_ok_y
plot(X[mask] + boxsize, Y[mask])
mask = mask_right_x & mask_ok_y
plot(X[mask] - boxsize, Y[mask])
mask = mask_left_x & mask_down_y
plot(X[mask] + boxsize, Y[mask] + boxsize)
mask = mask_left_x & mask_up_y
plot(X[mask] + boxsize, Y[mask] - boxsize)
mask = mask_right_x & mask_down_y
plot(X[mask] - boxsize, Y[mask] + boxsize)
mask = mask_right_x & mask_up_y
plot(X[mask] - boxsize, Y[mask] - boxsize)
# Hatch subtiles within which to search
n = int(x/domain_size_x) + domain_subdivisions[1]*int(y/domain_size_y)
for subtile in ghostly_subtiles[n]:
if (
(subtile.x <= x < subtile.x + subtile.width)
and (subtile.y <= y < subtile.y + subtile.height)
):
break
else:
print(f'Failed to find subtile of particle at ({x}, {y})!', file=sys.stderr)
sys.exit(1)
Xs = []
Ys = []
for other_subtile in ghostly_subtiles[n]:
dist = get_subtile_dist(subtile, other_subtile)
if dist < shortrange_range:
X = np.array([
other_subtile.x,
other_subtile.x + other_subtile.width,
other_subtile.x + other_subtile.width,
other_subtile.x,
other_subtile.x,
])
Y = np.array([
other_subtile.y,
other_subtile.y,
other_subtile.y + other_subtile.height,
other_subtile.y + other_subtile.height,
other_subtile.y,
])
if hatch is not None:
plt.fill(X, Y,
color='none', edgecolor=color, zorder=-90, hatch=hatch,
fill=False, lw=0, alpha=0.50,
)
Xs.append(X)
Ys.append(Y)
# Draw boundary of hatched region
X = np.concatenate(Xs)
Y = np.concatenate(Ys)
def draw_hatched_boundary(x, y, X, Y, no=None):
indices = []
fac = 1/np.prod(subtilings[n])**1.2
for q in theta:
L = 0.5*boxsize
while L > 0:
L -= 0.2*boxsize/gridsize
dist2 = (X - (x + L*np.cos(q)))**2 + (Y - (y + L*np.sin(q)))**2
if np.min(dist2) < (19.8*boxsize/gridsize*fac)**2:
index = np.argmin(dist2)
if index not in indices:
indices.append(index)
break
indices.append(indices[0])
indices = np.array(indices)
X = X[indices]
Y = Y[indices]
if no is None:
no = []
if isinstance(no, str):
no = [no]
no = list(no)
for no in no:
nans_to_be_inserted = []
if no == 'bottom':
Z = Y
extrema = np.nanmin(Z)
elif no == 'top':
Z = Y
extrema = np.nanmax(Z)
elif no == 'left':
Z = X
extrema = np.nanmin(Z)
elif no == 'right':
Z = X
extrema = np.nanmax(Z)
if no is not None:
reached = False
for i, zi in enumerate(Z):
if not reached:
if zi == extrema:
reached = True
continue
if zi == extrema:
nans_to_be_inserted.append(i)
else:
reached = False
count = 0
for index in nans_to_be_inserted:
X = np.insert(X, index + count, np.nan)
Y = np.insert(Y, index + count, np.nan)
count += 1
if hatch is not None:
plt.plot(X, Y, '--', color=color, lw=0.9, zorder=301, alpha=0.80)
i = int(x/tile_size_x)
j = int(y/tile_size_y)
if 0 < j < domain_subdivisions[0]*tiling[0] - 1 and 0 < i < domain_subdivisions[1]*tiling[1] - 1:
draw_hatched_boundary(x, y, X, Y)
elif j == 0 and 0 < i < domain_subdivisions[1]*tiling[1] - 1:
draw_hatched_boundary(x, y, X, Y, no='bottom')
draw_hatched_boundary(x, y + boxsize, X, Y, no='top')
elif i == 0 and 0 < j < domain_subdivisions[0]*tiling[0] - 1:
draw_hatched_boundary(x, y, X, Y, no='left')
draw_hatched_boundary(x + boxsize, y, X, Y, no='right')
elif j == domain_subdivisions[0]*tiling[0] - 1 and 0 < i < domain_subdivisions[1]*tiling[1] - 1:
draw_hatched_boundary(x, y, X, Y, no='top')
draw_hatched_boundary(x, y - boxsize, X, Y, no='bottom')
elif i == domain_subdivisions[1]*tiling[1] - 1 and 0 < j < domain_subdivisions[0]*tiling[0] - 1:
draw_hatched_boundary(x, y, X, Y, no='right')
draw_hatched_boundary(x - boxsize, y, X, Y, no='left')
elif j == 0 and i == 0:
draw_hatched_boundary(x, y, X, Y, no=('left', 'bottom'))
draw_hatched_boundary(x + boxsize, y + boxsize, X, Y, no=('right', 'top'))
draw_hatched_boundary(x + boxsize, y, X, Y, no=('right', 'bottom'))
draw_hatched_boundary(x, y + boxsize, X, Y, no=('left', 'top'))
elif j == 0 and i == domain_subdivisions[1]*tiling[1] - 1:
draw_hatched_boundary(x, y, X, Y, no=('right', 'bottom'))
draw_hatched_boundary(x - boxsize, y, X, Y, no=('left', 'bottom'))
draw_hatched_boundary(x, y + boxsize, X, Y, no=('right', 'top'))
draw_hatched_boundary(x - boxsize, y + boxsize, X, Y, no=('left', 'top'))
| |
from io import BytesIO
from sys import version_info
from unittest import TestCase
from xml.etree import ElementTree
import datetime
import pytest
from pyclarity_lims.constants import nsmap
from pyclarity_lims.descriptors import StringDescriptor, StringAttributeDescriptor, StringListDescriptor, \
StringDictionaryDescriptor, IntegerDescriptor, BooleanDescriptor, UdfDictionary, EntityDescriptor, \
InputOutputMapList, EntityListDescriptor, PlacementDictionary, EntityList, SubTagDictionary, ExternalidList,\
XmlElementAttributeDict, XmlAttributeList, XmlReagentLabelList, XmlPooledInputDict, XmlAction, QueuedArtifactList
from pyclarity_lims.entities import Artifact, ProtocolStep, Container, Process, Step
from pyclarity_lims.lims import Lims
from tests import elements_equal
if version_info[0] == 2:
from mock import Mock
else:
from unittest.mock import Mock
def _tostring(e):
outfile = BytesIO()
ElementTree.ElementTree(e).write(outfile, encoding='utf-8', xml_declaration=True)
return outfile.getvalue().decode("utf-8")
class TestDescriptor(TestCase):
def _make_desc(self, klass, *args, **kwargs):
return klass(*args, **kwargs)
class TestStringDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="utf-8"?>
<test-entry>
<name>test name</name>
</test-entry>
""")
self.instance = Mock(root=self.et)
def test__get__(self):
sd = self._make_desc(StringDescriptor, 'name')
assert sd.__get__(self.instance, None) == "test name"
def test__set__(self):
sd = self._make_desc(StringDescriptor, 'name')
sd.__set__(self.instance, "new test name")
assert self.et.find('name').text == "new test name"
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
sd = self._make_desc(StringDescriptor, 'name')
sd.__set__(instance_new, "test name")
assert instance_new.root.find('name').text == 'test name'
class TestIntegerDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<count>32</count>
</test-entry>
""")
self.instance = Mock(root=self.et)
def test__get__(self):
sd = self._make_desc(IntegerDescriptor, 'count')
assert sd.__get__(self.instance, None) == 32
def test__set__(self):
sd = self._make_desc(IntegerDescriptor, 'count')
sd.__set__(self.instance, 23)
assert self.et.find('count').text == '23'
sd.__set__(self.instance, '23')
assert self.et.find('count').text == '23'
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
sd = self._make_desc(IntegerDescriptor, 'count')
sd.__set__(instance_new, 23)
assert instance_new.root.find('count').text == '23'
class TestBooleanDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<istest>true</istest>
</test-entry>
""")
self.instance = Mock(root=self.et)
def test__get__(self):
bd = self._make_desc(BooleanDescriptor, 'istest')
assert bd.__get__(self.instance, None)
def test__set__(self):
bd = self._make_desc(BooleanDescriptor, 'istest')
bd.__set__(self.instance, False)
assert self.et.find('istest').text == 'false'
bd.__set__(self.instance, 'true')
assert self.et.find('istest').text == 'true'
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
bd = self._make_desc(BooleanDescriptor, 'istest')
bd.__set__(instance_new, True)
assert instance_new.root.find('istest').text == 'true'
class TestEntityDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
</test-entry>
""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.a1 = Artifact(self.lims, id='a1')
self.a2 = Artifact(self.lims, id='a2')
self.instance = Mock(root=self.et, lims=self.lims)
def test__get__(self):
ed = self._make_desc(EntityDescriptor, 'artifact', Artifact)
assert ed.__get__(self.instance, None) == self.a1
def test__set__(self):
ed = self._make_desc(EntityDescriptor, 'artifact', Artifact)
ed.__set__(self.instance, self.a2)
assert self.et.find('artifact').attrib['uri'] == 'http://testgenologics.com:4040/api/v2/artifacts/a2'
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
ed = self._make_desc(EntityDescriptor, 'artifact', Artifact)
ed.__set__(instance_new, self.a1)
assert instance_new.root.find('artifact').attrib['uri'] == 'http://testgenologics.com:4040/api/v2/artifacts/a1'
class TestEntityListDescriptor(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a2"></artifact>
</test-entry>
""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.a1 = Artifact(self.lims, id='a1')
self.a2 = Artifact(self.lims, id='a2')
self.instance1 = Mock(root=et, lims=self.lims)
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<nesting>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a2"></artifact>
</nesting>
</test-entry>
""")
self.instance2 = Mock(root=et, lims=self.lims)
def test__get__(self):
ed = self._make_desc(EntityListDescriptor, 'artifact', Artifact)
assert ed.__get__(self.instance1, None) == [self.a1, self.a2]
ed = self._make_desc(EntityListDescriptor, 'artifact', Artifact, nesting=['nesting'])
assert ed.__get__(self.instance2, None) == [self.a1, self.a2]
class TestStringAttributeDescriptor(TestDescriptor):
def setUp(self):
self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry name="test name">
</test-entry>""")
self.instance = Mock(root=self.et)
def test__get__(self):
sd = self._make_desc(StringAttributeDescriptor, 'name')
assert sd.__get__(self.instance, None) == "test name"
def test__set__(self):
sd = self._make_desc(StringAttributeDescriptor, 'name')
sd.__set__(self.instance, "test name2")
assert self.et.attrib['name'] == "test name2"
def test_create(self):
instance_new = Mock(root=ElementTree.Element('test-entry'))
bd = self._make_desc(StringAttributeDescriptor, 'name')
bd.__set__(instance_new, "test name2")
assert instance_new.root.attrib['name'] == "test name2"
class TestStringListDescriptor(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<test-subentry>A01</test-subentry>
<test-subentry>B01</test-subentry>
</test-entry>""")
self.instance1 = Mock(root=et)
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<nesting>
<test-subentry>A01</test-subentry>
<test-subentry>B01</test-subentry>
</nesting>
</test-entry>""")
self.instance2 = Mock(root=et)
def test__get__(self):
sd = self._make_desc(StringListDescriptor, 'test-subentry')
assert sd.__get__(self.instance1, None) == ['A01', 'B01']
sd = self._make_desc(StringListDescriptor, 'test-subentry', nesting=['nesting'])
assert sd.__get__(self.instance2, None) == ['A01', 'B01']
def test__set__(self):
sd = self._make_desc(StringListDescriptor, 'test-subentry')
sd.__set__(self.instance1, ['A02', 'B02'])
res = sd.__get__(self.instance1, None)
assert isinstance(res, list)
assert res == ['A02', 'B02']
class TestStringDictionaryDescriptor(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<test-subentry>
<test-firstkey/>
<test-secondkey>second value</test-secondkey>
</test-subentry>
</test-entry>""")
self.instance = Mock(root=et)
def test__get__(self):
sd = self._make_desc(StringDictionaryDescriptor, 'test-subentry')
res = sd.__get__(self.instance, None)
assert isinstance(res, dict)
assert res['test-firstkey'] is None
assert res['test-secondkey'] == 'second value'
def test__set__(self):
sd = self._make_desc(StringDictionaryDescriptor, 'test-subentry')
sd.__set__(self.instance, {'mykey1': 'myvalue1'})
res = sd.__get__(self.instance, None)
assert isinstance(res, dict)
assert res['mykey1'] == 'myvalue1'
class TestUdfDictionary(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<udf:field type="String" name="test">stuff</udf:field>
<udf:field type="Numeric" name="how much">42</udf:field>
<udf:field type="Boolean" name="really?">true</udf:field>
</test-entry>""")
self.instance1 = Mock(root=et)
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<nesting>
<udf:field type="String" name="test">stuff</udf:field>
<udf:field type="Numeric" name="how much">42</udf:field>
<udf:field type="Boolean" name="really?">true</udf:field>
</nesting>
</test-entry>""")
self.instance2 = Mock(root=et)
self.dict1 = UdfDictionary(self.instance1)
self.dict2 = UdfDictionary(self.instance2, nesting=['nesting'])
self.dict_fail = UdfDictionary(self.instance2)
self.empty_et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
</test-entry>""")
def _get_udf_value(self, udf_dict, key):
for e in udf_dict._elems:
if e.attrib['name'] != key:
continue
else:
return e.text
def test___getitem__(self):
assert self.dict1.__getitem__('test') == self._get_udf_value(self.dict1, 'test')
assert self.dict2.__getitem__('test') == self._get_udf_value(self.dict2, 'test')
self.assertRaises(KeyError, self.dict_fail.__getitem__, 'test')
def test___setitem__(self):
assert self._get_udf_value(self.dict1, 'test') == 'stuff'
self.dict1.__setitem__('test', 'other')
assert self._get_udf_value(self.dict1, 'test') == 'other'
assert self._get_udf_value(self.dict1, 'how much') == '42'
self.dict1.__setitem__('how much', 21)
assert self._get_udf_value(self.dict1, 'how much') == '21'
assert self._get_udf_value(self.dict1, 'really?') == 'true'
self.dict1.__setitem__('really?', False)
assert self._get_udf_value(self.dict1, 'really?') == 'false'
self.assertRaises(TypeError, self.dict1.__setitem__, 'how much', '433')
# FIXME: I'm not sure if this is the expected behaviour
self.dict1.__setitem__('how much', None)
assert self._get_udf_value(self.dict1, 'how much') == b'None'
assert self._get_udf_value(self.dict2, 'test') == 'stuff'
self.dict2.__setitem__('test', 'other')
assert self._get_udf_value(self.dict2, 'test') == 'other'
def test___setitem__new(self):
self.dict1.__setitem__('new string', 'new stuff')
assert self._get_udf_value(self.dict1, 'new string') == 'new stuff'
self.dict1.__setitem__('new numeric', 21)
assert self._get_udf_value(self.dict1, 'new numeric') == '21'
self.dict1.__setitem__('new bool', False)
assert self._get_udf_value(self.dict1, 'new bool') == 'false'
self.dict2.__setitem__('new string', 'new stuff')
assert self._get_udf_value(self.dict2, 'new string') == 'new stuff'
def test___setitem__unicode(self):
assert self._get_udf_value(self.dict1, 'test') == 'stuff'
self.dict1.__setitem__('test', u'unicode')
assert self._get_udf_value(self.dict1, 'test') == 'unicode'
self.dict1.__setitem__(u'test', 'unicode2')
assert self._get_udf_value(self.dict1, 'test') == 'unicode2'
def test_create(self):
instance = Mock(root=self.empty_et)
dict1 = UdfDictionary(instance)
dict1['test'] = 'value1'
assert self._get_udf_value(dict1, 'test') == 'value1'
def test_create_with_nesting(self):
instance = Mock(root=self.empty_et)
dict1 = UdfDictionary(instance, nesting=['cocoon'])
dict1['test'] = 'value1'
assert self._get_udf_value(dict1, 'test') == 'value1'
def test___delitem__(self):
assert self.dict1['test'] == self._get_udf_value(self.dict1, 'test')
del self.dict1['test']
with pytest.raises(KeyError):
_ = self.dict1['test']
assert self._get_udf_value(self.dict1, 'test') is None
def test_items(self):
pass
def test_clear(self):
assert self.dict1
self.dict1.clear()
assert not self.dict1
assert len(self.dict1) == 0
def test___iter__(self):
expected_content = [
("test", "stuff"),
("really?", True),
("how much", 42)
]
for k in self.dict1:
assert (k, self.dict1[k]) in expected_content
class TestPlacementDictionary(TestDescriptor):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<placement uri="http://testgenologics.com:4040/api/v2/artifacts/a1" limsid="a1">
<value>A:1</value>
</placement>
<other>thing</other>
</test-entry>""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.dict1 = PlacementDictionary(self.instance1)
self.art1 = Artifact(lims=self.lims, id='a1')
def test___getitem__(self):
assert self.dict1['A:1'] == self.art1
def test___setitem__(self):
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 1
art2 = Artifact(lims=self.lims, id='a2')
self.dict1['A:1'] = art2
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 1
self.dict1['A:2'] = art2
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 2
assert self.dict1['A:2'] == art2
def test___setitem__2(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
</test-entry>""")
instance = Mock(root=et, lims=self.lims)
d = PlacementDictionary(instance)
assert len(d.rootnode(d.instance).findall('placement')) == 0
d['A:1'] = self.art1
assert len(d.rootnode(d.instance).findall('placement')) == 1
def test___delitem__(self):
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 1
del self.dict1['A:1']
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 0
def test_clear(self):
sd = self._make_desc(StringDescriptor, 'other')
assert sd.__get__(self.instance1, None) == "thing"
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 1
self.dict1.clear()
assert len(self.dict1.rootnode(self.dict1.instance).findall('placement')) == 0
assert sd.__get__(self.instance1, None) == "thing"
class TestSubTagDictionary(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<test-tag>
<key1>value1</key1>
</test-tag>
</test-entry>""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.dict1 = SubTagDictionary(self.instance1, tag='test-tag')
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
</test-entry>""")
self.instance2 = Mock(root=et, lims=self.lims)
self.dict2 = SubTagDictionary(self.instance2, tag='test-tag')
def test___getitem__(self):
assert self.dict1['key1'] == 'value1'
def test___setitem__(self):
assert len(self.dict1.rootnode(self.dict1.instance)) == 1
assert self.dict1.rootnode(self.dict1.instance).find('key1').text == 'value1'
self.dict1['key1'] = 'value11'
assert len(self.dict1.rootnode(self.dict1.instance)) == 1
assert self.dict1.rootnode(self.dict1.instance).find('key1').text == 'value11'
self.dict1['key2'] = 'value2'
assert len(self.dict1.rootnode(self.dict1.instance)) == 2
assert self.dict1.rootnode(self.dict1.instance).find('key2').text == 'value2'
assert self.dict1['key2'] == 'value2'
def test___setitem__from_empty(self):
assert len(self.dict2.rootnode(self.dict2.instance)) == 0
self.dict2['key1'] = 'value1'
assert self.dict2.rootnode(self.dict2.instance).find('key1').text == 'value1'
assert len(self.dict2.rootnode(self.dict2.instance)) == 1
class TestXmlElementAttributeDict(TestCase):
def setUp(self):
et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry xmlns:udf="http://genologics.com/ri/userdefined">
<test-tag attrib1="value1" attrib2="value2"/>
<test-tag attrib1="value11" attrib2="value12" attrib3="value13"/>
</test-entry>""")
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.dict1 = XmlElementAttributeDict(self.instance1, tag='test-tag', position=0)
self.dict2 = XmlElementAttributeDict(self.instance1, tag='test-tag', position=1)
def test___getitem__(self):
assert self.dict1['attrib1'] == 'value1'
assert self.dict2['attrib1'] == 'value11'
def test__len__(self):
assert len(self.dict1) == 2
assert len(self.dict2) == 3
def test___setitem__(self):
assert self.dict1['attrib1'] == 'value1'
assert self.dict1.rootnode(self.dict1.instance).findall('test-tag')[0].attrib['attrib1'] == 'value1'
self.dict1['attrib1'] = 'value2'
assert self.dict1.rootnode(self.dict1.instance).findall('test-tag')[0].attrib['attrib1'] == 'value2'
class TestXmlPooledInputDict(TestCase):
def setUp(self):
et = ElementTree.fromstring('''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<pooled-inputs>
<pool output-uri="{uri}/out1" name="pool1">
<input uri="{uri}/in1"/>
<input uri="{uri}/in2"/>
</pool>
<pool output-uri="{uri}/out2" name="pool2">
<input uri="{uri}/in3"/>
<input uri="{uri}/in4"/>
</pool>
</pooled-inputs>
</test-entry>'''.format(uri='http://testgenologics.com:4040'))
self.lims = Lims('http://testgenologics.com:4040', username='test', password='password')
self.instance1 = Mock(root=et, lims=self.lims)
self.dict1 = XmlPooledInputDict(self.instance1)
self.out1 = Artifact(self.lims, uri='http://testgenologics.com:4040/out1')
self.in1 = Artifact(self.lims, uri='http://testgenologics.com:4040/in1')
self.in2 = Artifact(self.lims, uri='http://testgenologics.com:4040/in2')
def test___getitem__(self):
assert self.dict1['pool1'] == (self.out1, (self.in1, self.in2))
def test___setitem1__(self):
assert len(self.dict1) == 2
assert len(self.dict1.rootnode(self.dict1.instance)) == 2
# This works in the test but does not work in reality because
# the pool artifact needs to be creaated by the LIMS.
self.dict1['pool3'] = (self.out1, (self.in1, | |
in group.items() if v is not None})
physical_interface_list.append(physical_interface_dict)
continue
m = p2.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p3.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p4.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p5.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p6.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p7.match(line)
if m:
group = m.groupdict()
if_device_flags = physical_interface_dict.setdefault('if-device-flags', {})
if_device_flags.update({k.replace('_', '-'):
True for k, v in group.items() if v is not None})
continue
m = p8.match(line)
if m:
group = m.groupdict()
if_config_flags = physical_interface_dict.setdefault('if-config-flags', {})
if_config_flags.update({k.replace('_', '-'):
True for k, v in group.items() if v is not None and k != "internal_flags"})
if "internal_flags" in group:
if_config_flags.update({"internal-flags": group["internal_flags"]})
continue
m = p9.match(line)
if m:
group = m.groupdict()
if_media_flags = physical_interface_dict.setdefault('if-media-flags', {})
if_media_flags.update({k.replace('_', '-'):
True for k, v in group.items() if v is not None})
continue
m = p10.match(line)
if m:
group = m.groupdict()
phys_cos_info = physical_interface_dict.setdefault('physical-interface-cos-information', {})
phys_cos_info.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
m = p11.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p12.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p13.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p14.match(line)
if m:
group = m.groupdict()
traffic_stats = physical_interface_dict.setdefault('traffic-statistics', {})
traffic_stats.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
m = p15.match(line)
if m:
group = m.groupdict()
traffic_stats.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
m = p16.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p17.match(line)
if m:
group = m.groupdict()
active_alarm = physical_interface_dict.setdefault('active-alarms', {})
interface_alarm = active_alarm.setdefault('interface-alarms', {})
interface_alarm.update({k.replace('_', '-'):
True for k, v in group.items() if v is not None})
continue
m = p18.match(line)
if m:
group = m.groupdict()
active_alarm = physical_interface_dict.setdefault('active-defects', {})
interface_defect = active_alarm.setdefault('interface-alarms', {})
interface_defect.update({k.replace('_', '-'):
True for k, v in group.items() if v is not None})
continue
m = p19.match(line)
if m:
group = m.groupdict()
physical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p20.match(line)
if m:
group = m.groupdict()
logical_interface_list = physical_interface_dict.setdefault('logical-interface', [])
logical_interface_dict = {}
logical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
logical_interface_list.append(logical_interface_dict)
continue
m = p21.match(line)
if m:
group = m.groupdict()
if_config_flags = logical_interface_dict.setdefault('if-config-flags', {})
if_config_flags.update({k.replace('_','-'):
True for k, v in group.items() if v is not None and k not in [
"encapsulation",
"internal_flags"]})
if "encapsulation" in group and group["encapsulation"]:
logical_interface_dict.update({"encapsulation": group["encapsulation"]})
if "internal_flags" in group and group["internal_flags"]:
if_config_flags.update({"internal-flags": group["internal_flags"]})
continue
m = p22.match(line)
if m:
group = m.groupdict()
traffic_stats = logical_interface_dict.setdefault('traffic-statistics', {})
traffic_stats.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p23.match(line)
if m:
group = m.groupdict()
traffic_stats.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p24.match(line)
if m:
group = m.groupdict()
logical_interface_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
continue
m = p25.match(line)
if m:
group = m.groupdict()
allowed_in_traffic = logical_interface_dict.setdefault('allowed-host-inbound-traffic', {})
allowed_in_traffic.update({k.replace('_','-'):
True for k, v in group.items() if v is not None})
continue
m = p26.match(line)
if m:
group = m.groupdict()
address_family_list = logical_interface_dict.setdefault('address-family', [])
address_family_dict = {}
address_family_dict.update({k.replace('_','-'):
v for k, v in group.items() if v is not None})
address_family_list.append(address_family_dict)
continue
m = p27.match(line)
if m:
group = m.groupdict()
address_family_flags = address_family_dict.setdefault('address-family-flags', {})
address_family_flags.update({k.replace('_','-'):
True for k, v in group.items() if v is not None})
continue
m = p28.match(line)
if m:
group = m.groupdict()
interface_address_list = address_family_dict.setdefault('interface-address', [])
interface_address_dict = {}
ifa_flags = interface_address_dict.setdefault('ifa-flags', {})
if 'flags' in group and group['flags']:
flags = group['flags'].split(' ')
ifa_flags.update({"ifaf-current-{}".format(f.split('-')[-1].lower()):
True for f in flags})
interface_address_list.append(interface_address_dict)
continue
m = p29.match(line)
if m:
group = m.groupdict()
if group['ifa_destination']:
interface_address_dict.update({'ifa-destination': group['ifa_destination']})
if group['ifa_local']:
interface_address_dict.update({'ifa-local': group['ifa_local']})
if group['ifa_broadcast']:
interface_address_dict.update({'ifa-broadcast': group['ifa_broadcast']})
continue
return ret_dict
# =======================================================
# Schema for 'show interfaces policers {interface}'
# =======================================================
class ShowInterfacesPolicersInterfaceSchema(MetaParser):
"""Schema for show interfaces policers {interface}"""
'''schema = {
Optional("@xmlns:junos"): str,
"interface-policer-information": {
Optional("@junos:style"): str,
Optional("@xmlns"): str,
"physical-interface": [
"admin-status": str,
"logical-interface": [
"admin-status": str,
"name": str,
"oper-status": str,
"policer-information": [
{
"policer-family": str,
"policer-input": str,
"policer-output": str
}
]
],
"name": str,
"oper-status": str
]
}
}'''
def validate_policer_information_list(value):
# Pass ospf3-interface list as value
if not isinstance(value, list):
raise SchemaError('policer-information is not a list')
policer_information_schema = Schema({
"policer-family": str,
"policer-input": str,
Optional("policer-output"): Or(str,None)
})
# Validate each dictionary in list
for item in value:
policer_information_schema.validate(item)
return value
def validate_logical_interface_list(value):
# Pass ospf3-interface list as value
if not isinstance(value, list):
raise SchemaError('logical-interface is not a list')
logical_interface_schema = Schema({
"admin-status": str,
"name": str,
"oper-status": str,
"policer-information": Use(ShowInterfacesPolicersInterface.validate_policer_information_list)
})
# Validate each dictionary in list
for item in value:
logical_interface_schema.validate(item)
return value
def validate_physical_interface_list(value):
# Pass ospf3-interface list as value
if not isinstance(value, list):
raise SchemaError('physical-interface is not a list')
physical_interface_schema = Schema({
"admin-status": str,
"logical-interface": Use(ShowInterfacesPolicersInterface.validate_logical_interface_list),
"name": str,
"oper-status": str
})
# Validate each dictionary in list
for item in value:
physical_interface_schema.validate(item)
return value
schema = {
Optional("@xmlns:junos"): str,
"interface-policer-information": {
Optional("@junos:style"): str,
Optional("@xmlns"): str,
"physical-interface": Use(validate_physical_interface_list)
}
}
# =======================================================
# Parser for 'show interfaces policers {interface}'
# =======================================================
class ShowInterfacesPolicersInterface(ShowInterfacesPolicersInterfaceSchema):
""" Parser for:
- show interfaces policers {interface}
"""
cli_command = 'show interfaces policers {interface}'
def cli(self, interface=None, output=None):
# execute the command
if output is None:
out = self.device.execute(self.cli_command.format(interface=interface))
else:
out = output
ret_dict = {}
# ge-0/0/2 up up
p1 = re.compile(r'^(?P<interface>[a-zA-Z\-\d\/]+)((?P<physical_interface_value>[\.\d]+))? +(?P<admin>\w+) +(?:(?P<link>\w+))?$')
# inet GE_1M-ge-0/0/2.0-log_int-i GE_1M-ge-0/0/2.0-log_int-o
# multiservice __default_arp_policer__
p2 = re.compile(r'^(?P<policer_family>\w+) +(?P<policer_input>\S+)( +((?P<policer_output>\S+)))?$')
for line in out.splitlines():
line = line.strip()
# ge-0/0/2 up up
# ge-0/0/2.0 up up
m = p1.match(line)
if m:
interface_policer_info = ret_dict.setdefault(
"interface-policer-information", {}).setdefault("physical-interface",[])
#logical_interface_list = interface_policer_info.setdefault("logical-interface",[])
#policer_information_list = logical_interface_list.setdefault("policer-information", [])
exists = False
policer_information_list = None
group = m.groupdict()
for group_key, group_value in group.items():
if group_key == "physical_interface_value":
if group_value != None:
exists = True
if exists:
logical_interface_dict['name'] = group['interface'] + group['physical_interface_value']
logical_interface_dict['admin-status'] = group['admin']
logical_interface_dict['oper-status'] = group['link']
policer_information_list = []
logical_interface_dict["policer-information"] = policer_information_list
logical_interface_list.append(logical_interface_dict)
interface_policer_info_dict['logical-interface'] = logical_interface_list
interface_policer_info.append(interface_policer_info_dict)
else:
logical_interface_list = []
logical_interface_dict = {}
interface_policer_info_dict= {}
interface_policer_info_dict['name'] = group['interface']
interface_policer_info_dict['admin-status'] = group['admin']
interface_policer_info_dict['oper-status'] = group['link']
# inet GE_1M-ge-0/0/2.0-log_int-i GE_1M-ge-0/0/2.0-log_int-o
# multiservice __default_arp_policer__
m = p2.match(line)
if m:
group = m.groupdict()
policer_information_dict = {}
policer_information_dict['policer-family'] = group['policer_family']
policer_information_dict['policer-input'] = group['policer_input']
for group_key, group_value in group.items():
if group_key == "policer_output":
if group_value != None:
policer_information_dict['policer-output'] = group['policer_output']
policer_information_list.append(policer_information_dict)
exists = False
return ret_dict
# =======================================================
# Schema for 'show interfaces queue {interface}'
# =======================================================
class ShowInterfacesQueueSchema(MetaParser):
"""
Schema for:
* show interfaces queue {interface}
"""
def validate_queue(value):
if not isinstance(value, list):
raise SchemaError('queue is not a list')
queue_schema = Schema(
{
"forwarding-class-name": str,
"queue-counters-queued-bytes": str,
"queue-counters-queued-bytes-rate": str,
"queue-counters-queued-packets": str,
"queue-counters-queued-packets-rate": str,
"queue-counters-red-bytes": str,
"queue-counters-red-bytes-high": str,
"queue-counters-red-bytes-low": str,
"queue-counters-red-bytes-medium-high": str,
"queue-counters-red-bytes-medium-low": str,
"queue-counters-red-bytes-rate": str,
"queue-counters-red-bytes-rate-high": str,
"queue-counters-red-bytes-rate-low": str,
"queue-counters-red-bytes-rate-medium-high": str,
"queue-counters-red-bytes-rate-medium-low": str,
"queue-counters-red-packets": str,
"queue-counters-red-packets-high": str,
"queue-counters-red-packets-low": str,
"queue-counters-red-packets-medium-high": str,
"queue-counters-red-packets-medium-low": str,
"queue-counters-red-packets-rate": str,
"queue-counters-red-packets-rate-high": str,
"queue-counters-red-packets-rate-low": str,
"queue-counters-red-packets-rate-medium-high": str,
"queue-counters-red-packets-rate-medium-low": str,
"queue-counters-tail-drop-packets": str,
"queue-counters-tail-drop-packets-rate": str,
Optional("queue-counters-rl-drop-packets"): str,
Optional("queue-counters-rl-drop-packets-rate"): str,
Optional("queue-counters-rl-drop-bytes"): str,
Optional("queue-counters-rl-drop-bytes-rate"): str,
"queue-counters-trans-bytes": str,
"queue-counters-trans-bytes-rate": str,
"queue-counters-trans-packets": str,
"queue-counters-trans-packets-rate": str,
"queue-number": str
}
)
for item in value:
queue_schema.validate(item)
return value
schema = {
"interface-information": {
"physical-interface": {
Optional("description"): str,
"local-index": str,
"snmp-index": str,
"name": str,
"oper-status": str,
"queue-counters": {
"interface-cos-summary": {
"intf-cos-forwarding-classes-in-use": str,
"intf-cos-forwarding-classes-supported": str,
"intf-cos-num-queues-in-use": str,
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from . import http
import time
from . import util
from . import exceptions
from . import compat
from . import verlib
import re
import json
from xml.etree import ElementTree
from . import signalsmixin
from . import plexobjects
from . import plexresource
from . import plexlibrary
from . import asyncadapter
from six.moves import range
# from plexapi.client import Client
# from plexapi.playqueue import PlayQueue
TOTAL_QUERIES = 0
DEFAULT_BASEURI = 'http://localhost:32400'
class PlexServer(plexresource.PlexResource, signalsmixin.SignalsMixin):
TYPE = 'PLEXSERVER'
def __init__(self, data=None):
signalsmixin.SignalsMixin.__init__(self)
plexresource.PlexResource.__init__(self, data)
self.accessToken = None
self.multiuser = False
self.isSupported = None
self.hasFallback = False
self.supportsAudioTranscoding = False
self.supportsVideoTranscoding = False
self.supportsPhotoTranscoding = False
self.supportsVideoRemuxOnly = False
self.supportsScrobble = True
self.allowsMediaDeletion = False
self.allowChannelAccess = False
self.activeConnection = None
self.serverClass = None
self.pendingReachabilityRequests = 0
self.pendingSecureRequests = 0
self.features = {}
self.librariesByUuid = {}
self.server = self
self.session = http.Session()
self.owner = None
self.owned = False
self.synced = False
self.sameNetwork = False
self.uuid = None
self.name = None
self.platform = None
self.versionNorm = None
self.rawVersion = None
self.transcodeSupport = False
if data is None:
return
self.owner = data.attrib.get('sourceTitle')
self.owned = data.attrib.get('owned') == '1'
self.synced = data.attrib.get('synced') == '1'
self.sameNetwork = data.attrib.get('publicAddressMatches') == '1'
self.uuid = data.attrib.get('clientIdentifier')
self.name = data.attrib.get('name')
self.platform = data.attrib.get('platform')
self.rawVersion = data.attrib.get('productVersion')
self.versionNorm = util.normalizedVersion(self.rawVersion)
self.transcodeSupport = data.attrib.get('transcodeSupport') == '1'
def __eq__(self, other):
if not other:
return False
if self.__class__ != other.__class__:
return False
return self.uuid == other.uuid and self.owner == other.owner
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<PlexServer {0} owned: {1} uuid: {2} version: {3}>".format(repr(self.name), self.owned, self.uuid, self.versionNorm)
def __repr__(self):
return self.__str__()
def close(self):
self.session.cancel()
def get(self, attr, default=None):
return default
@property
def isSecure(self):
if self.activeConnection:
return self.activeConnection.isSecure
def getObject(self, key):
data = self.query(key)
return plexobjects.buildItem(self, data[0], key, container=self)
def hubs(self, section=None, count=None, search_query=None):
hubs = []
params = {"includeMarkers": 1}
if search_query:
q = '/hubs/search'
params['query'] = search_query.lower()
if section:
params['sectionId'] = section
if count is not None:
params['limit'] = count
else:
q = '/hubs'
if section:
if section == 'playlists':
audio = plexlibrary.AudioPlaylistHub(False, server=self.server)
video = plexlibrary.VideoPlaylistHub(False, server=self.server)
if audio.items:
hubs.append(audio)
if video.items:
hubs.append(video)
return hubs
else:
q = '/hubs/sections/%s' % section
if count is not None:
params['count'] = count
data = self.query(q, params=params)
container = plexobjects.PlexContainer(data, initpath=q, server=self, address=q)
for elem in data:
hubs.append(plexlibrary.Hub(elem, server=self, container=container))
return hubs
def playlists(self, start=0, size=10, hub=None):
try:
return plexobjects.listItems(self, '/playlists/all')
except exceptions.BadRequest:
return None
@property
def library(self):
if self.platform == 'cloudsync':
return plexlibrary.Library(None, server=self)
else:
return plexlibrary.Library(self.query('/library/'), server=self)
def buildUrl(self, path, includeToken=False):
if self.activeConnection:
return self.activeConnection.buildUrl(self, path, includeToken)
else:
util.WARN_LOG("Server connection is None, returning an empty url")
return ""
def query(self, path, method=None, **kwargs):
method = method or self.session.get
url = self.buildUrl(path, includeToken=True)
# If URL is empty, try refresh resources and return empty set for now
if not url:
util.WARN_LOG("Empty server url, returning None and refreshing resources")
util.MANAGER.refreshResources(True)
return None
util.LOG('{0} {1}'.format(method.__name__.upper(), re.sub('X-Plex-Token=[^&]+', 'X-Plex-Token=****', url)))
try:
response = method(url, **kwargs)
if response.status_code not in (200, 201):
codename = http.status_codes.get(response.status_code, ['Unknown'])[0]
raise exceptions.BadRequest('({0}) {1}'.format(response.status_code, codename))
data = response.text.encode('utf8')
except asyncadapter.TimeoutException:
util.ERROR()
util.MANAGER.refreshResources(True)
return None
except http.requests.ConnectionError:
util.ERROR()
return None
except asyncadapter.CanceledException:
return None
return ElementTree.fromstring(data) if data else None
def getImageTranscodeURL(self, path, width, height, **extraOpts):
if not path:
return ''
params = ("&width=%s&height=%s" % (width, height)) + ''.join(["&%s=%s" % (key, extraOpts[key]) for key in extraOpts])
if "://" in path:
imageUrl = self.convertUrlToLoopBack(path)
else:
imageUrl = "http://127.0.0.1:" + self.getLocalServerPort() + path
path = "/photo/:/transcode?url=" + compat.quote_plus(imageUrl) + params
# Try to use a better server to transcode for synced servers
if self.synced:
from . import plexservermanager
selectedServer = plexservermanager.MANAGER.getTranscodeServer("photo")
if selectedServer:
return selectedServer.buildUrl(path, True)
if self.activeConnection:
return self.activeConnection.simpleBuildUrl(self, path)
else:
util.WARN_LOG("Server connection is None, returning an empty url")
return ""
def isReachable(self, onlySupported=True):
if onlySupported and not self.isSupported:
return False
return self.activeConnection and self.activeConnection.state == plexresource.ResourceConnection.STATE_REACHABLE
def isLocalConnection(self):
return self.activeConnection and (self.sameNetwork or self.activeConnection.isLocal)
def isRequestToServer(self, url):
if not self.activeConnection:
return False
if ':' in self.activeConnection.address[8:]:
schemeAndHost = self.activeConnection.address.rsplit(':', 1)[0]
else:
schemeAndHost = self.activeConnection.address
return url.startswith(schemeAndHost)
def getToken(self):
# It's dangerous to use for each here, because it may reset the index
# on self.connections when something else was in the middle of an iteration.
for i in range(len(self.connections)):
conn = self.connections[i]
if conn.token:
return conn.token
return None
def getLocalServerPort(self):
# TODO(schuyler): The correct thing to do here is to iterate over local
# connections and pull out the port. For now, we're always returning 32400.
return '32400'
def collectDataFromRoot(self, data):
# Make sure we're processing data for our server, and not some other
# server that happened to be at the same IP.
if self.uuid != data.attrib.get('machineIdentifier'):
util.LOG("Got a reachability response, but from a different server")
return False
self.serverClass = data.attrib.get('serverClass')
self.supportsAudioTranscoding = data.attrib.get('transcoderAudio') == '1'
self.supportsVideoTranscoding = data.attrib.get('transcoderVideo') == '1' or data.attrib.get('transcoderVideoQualities')
self.supportsVideoRemuxOnly = data.attrib.get('transcoderVideoRemuxOnly') == '1'
self.supportsPhotoTranscoding = data.attrib.get('transcoderPhoto') == '1' or (
not data.attrib.get('transcoderPhoto') and not self.synced and not self.isSecondary()
)
self.allowChannelAccess = data.attrib.get('allowChannelAccess') == '1' or (
not data.attrib.get('allowChannelAccess') and self.owned and not self.synced and not self.isSecondary()
)
self.supportsScrobble = not self.isSecondary() or self.synced
self.allowsMediaDeletion = not self.synced and self.owned and data.attrib.get('allowMediaDeletion') == '1'
self.multiuser = data.attrib.get('multiuser') == '1'
self.name = data.attrib.get('friendlyName') or self.name
self.platform = data.attrib.get('platform')
# TODO(schuyler): Process transcoder qualities
self.rawVersion = data.attrib.get('version')
if self.rawVersion:
self.versionNorm = util.normalizedVersion(self.rawVersion)
features = {
'mkvTranscode': '0.9.11.11',
'themeTranscode': '0.9.14.0',
'allPartsStreamSelection': '0.9.12.5',
'claimServer': '0.9.14.2',
'streamingBrain': '1.2.0'
}
for f, v in features.items():
if util.normalizedVersion(v) <= self.versionNorm:
self.features[f] = True
appMinVer = util.INTERFACE.getGlobal('minServerVersionArr', '0.0.0.0')
self.isSupported = self.isSecondary() or util.normalizedVersion(appMinVer) <= self.versionNorm
util.DEBUG_LOG("Server information updated from reachability check: {0}".format(self))
return True
def updateReachability(self, force=True, allowFallback=False):
if not force and self.activeConnection and self.activeConnection.state != plexresource.ResourceConnection.STATE_UNKNOWN:
return
util.LOG('Updating reachability for {0}: conns={1}, allowFallback={2}'.format(repr(self.name), len(self.connections), allowFallback))
epoch = time.time()
retrySeconds = 60
minSeconds = 10
for i in range(len(self.connections)):
conn = self.connections[i]
diff = epoch - (conn.lastTestedAt or 0)
if conn.hasPendingRequest:
util.DEBUG_LOG("Skip reachability test for {0} (has pending request)".format(conn))
elif diff < minSeconds or (not self.isSecondary() and self.isReachable() and diff < retrySeconds):
util.DEBUG_LOG("Skip reachability test for {0} (checked {1} secs ago)".format(conn, diff))
elif conn.testReachability(self, allowFallback):
self.pendingReachabilityRequests += 1
if conn.isSecure:
self.pendingSecureRequests += 1
if self.pendingReachabilityRequests == 1:
self.trigger("started:reachability")
if self.pendingReachabilityRequests <= 0:
self.trigger("completed:reachability")
def cancelReachability(self):
for i in range(len(self.connections)):
conn = self.connections[i]
conn.cancelReachability()
def onReachabilityResult(self, connection):
connection.lastTestedAt = time.time()
connection.hasPendingRequest = None
self.pendingReachabilityRequests -= 1
if connection.isSecure:
self.pendingSecureRequests -= 1
util.DEBUG_LOG("Reachability result for {0}: {1} is {2}".format(repr(self.name), connection.address, connection.state))
# Noneate active connection if the state is unreachable
if self.activeConnection and self.activeConnection.state != plexresource.ResourceConnection.STATE_REACHABLE:
self.activeConnection = None
# Pick a best connection. If we already had an active connection and
# it's still reachable, stick with it. (replace with local if
# available)
best = self.activeConnection
for i in range(len(self.connections) - 1, -1, -1):
conn = self.connections[i]
if not best or conn.getScore() > best.getScore():
best = conn
if best and best.state == best.STATE_REACHABLE:
if best.isSecure or self.pendingSecureRequests <= 0:
self.activeConnection = best
else:
util.DEBUG_LOG("Found a good connection for {0}, but holding out for better".format(repr(self.name)))
if self.pendingReachabilityRequests <= 0:
# Retest the server with fallback enabled. hasFallback will only
# be True if there are available insecure connections and fallback
# is allowed.
if self.hasFallback:
self.updateReachability(False, True)
else:
self.trigger("completed:reachability")
util.LOG("Active connection for {0} is {1}".format(repr(self.name), self.activeConnection))
from . import plexservermanager
plexservermanager.MANAGER.updateReachabilityResult(self, bool(self.activeConnection))
def markAsRefreshing(self):
for i in range(len(self.connections)):
conn = self.connections[i]
conn.refreshed = False
def markUpdateFinished(self, source):
# Any connections for the given source which haven't been refreshed should
# be removed. Since removing from a list is hard, we'll make a new list.
toKeep = []
hasSecureConn = False
for i in range(len(self.connections)):
conn = self.connections[i]
if not conn.refreshed:
conn.sources = conn.sources & (~source)
# If we lost our plex.tv connection, don't remember the token.
if source == conn.SOURCE_MYPLEX:
conn.token = None
if conn.sources:
if conn.address[:5] == "https":
hasSecureConn = True
toKeep.append(conn)
else:
util.DEBUG_LOG("Removed connection for {0} after updating connections for {1}".format(repr(self.name), source))
if conn == self.activeConnection:
util.DEBUG_LOG("Active connection lost")
| |
this entity from a cached registry
Returns:
Returns either the specified entity, or raises an exception if
none is found
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for entity_proto in registry_proto.entities:
if entity_proto.spec.name == name and entity_proto.spec.project == project:
return Entity.from_proto(entity_proto)
raise EntityNotFoundException(name, project=project)
def apply_feature_view(
self, feature_view: BaseFeatureView, project: str, commit: bool = True
):
"""
Registers a single feature view with Feast
Args:
feature_view: Feature view that will be registered
project: Feast project that this feature view belongs to
commit: Whether the change should be persisted immediately
"""
feature_view.ensure_valid()
now = datetime.utcnow()
if not feature_view.created_timestamp:
feature_view.created_timestamp = now
feature_view.last_updated_timestamp = now
feature_view_proto = feature_view.to_proto()
feature_view_proto.spec.project = project
self._prepare_registry_for_changes()
assert self.cached_registry_proto
self._check_conflicting_feature_view_names(feature_view)
existing_feature_views_of_same_type: RepeatedCompositeFieldContainer
if isinstance(feature_view, FeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.feature_views
)
elif isinstance(feature_view, OnDemandFeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.on_demand_feature_views
)
elif isinstance(feature_view, RequestFeatureView):
existing_feature_views_of_same_type = (
self.cached_registry_proto.request_feature_views
)
else:
raise ValueError(f"Unexpected feature view type: {type(feature_view)}")
for idx, existing_feature_view_proto in enumerate(
existing_feature_views_of_same_type
):
if (
existing_feature_view_proto.spec.name == feature_view_proto.spec.name
and existing_feature_view_proto.spec.project == project
):
if (
feature_view.__class__.from_proto(existing_feature_view_proto)
== feature_view
):
return
else:
del existing_feature_views_of_same_type[idx]
break
existing_feature_views_of_same_type.append(feature_view_proto)
if commit:
self.commit()
def list_on_demand_feature_views(
self, project: str, allow_cache: bool = False
) -> List[OnDemandFeatureView]:
"""
Retrieve a list of on demand feature views from the registry
Args:
project: Filter on demand feature views based on project name
allow_cache: Whether to allow returning on demand feature views from a cached registry
Returns:
List of on demand feature views
"""
registry = self._get_registry_proto(allow_cache=allow_cache)
on_demand_feature_views = []
for on_demand_feature_view in registry.on_demand_feature_views:
if on_demand_feature_view.spec.project == project:
on_demand_feature_views.append(
OnDemandFeatureView.from_proto(on_demand_feature_view)
)
return on_demand_feature_views
def get_on_demand_feature_view(
self, name: str, project: str, allow_cache: bool = False
) -> OnDemandFeatureView:
"""
Retrieves an on demand feature view.
Args:
name: Name of on demand feature view
project: Feast project that this on demand feature view belongs to
allow_cache: Whether to allow returning this on demand feature view from a cached registry
Returns:
Returns either the specified on demand feature view, or raises an exception if
none is found
"""
registry = self._get_registry_proto(allow_cache=allow_cache)
for on_demand_feature_view in registry.on_demand_feature_views:
if (
on_demand_feature_view.spec.project == project
and on_demand_feature_view.spec.name == name
):
return OnDemandFeatureView.from_proto(on_demand_feature_view)
raise OnDemandFeatureViewNotFoundException(name, project=project)
def get_data_source(
self, name: str, project: str, allow_cache: bool = False
) -> DataSource:
"""
Retrieves a data source.
Args:
name: Name of data source
project: Feast project that this data source belongs to
allow_cache: Whether to allow returning this data source from a cached registry
Returns:
Returns either the specified data source, or raises an exception if none is found
"""
registry = self._get_registry_proto(allow_cache=allow_cache)
for data_source in registry.data_sources:
if data_source.project == project and data_source.name == name:
return DataSource.from_proto(data_source)
raise DataSourceObjectNotFoundException(name, project=project)
def apply_materialization(
self,
feature_view: FeatureView,
project: str,
start_date: datetime,
end_date: datetime,
commit: bool = True,
):
"""
Updates materialization intervals tracked for a single feature view in Feast
Args:
feature_view: Feature view that will be updated with an additional materialization interval tracked
project: Feast project that this feature view belongs to
start_date (datetime): Start date of the materialization interval to track
end_date (datetime): End date of the materialization interval to track
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_feature_view_proto in enumerate(
self.cached_registry_proto.feature_views
):
if (
existing_feature_view_proto.spec.name == feature_view.name
and existing_feature_view_proto.spec.project == project
):
existing_feature_view = FeatureView.from_proto(
existing_feature_view_proto
)
existing_feature_view.materialization_intervals.append(
(start_date, end_date)
)
existing_feature_view.last_updated_timestamp = datetime.utcnow()
feature_view_proto = existing_feature_view.to_proto()
feature_view_proto.spec.project = project
del self.cached_registry_proto.feature_views[idx]
self.cached_registry_proto.feature_views.append(feature_view_proto)
if commit:
self.commit()
return
raise FeatureViewNotFoundException(feature_view.name, project)
def list_feature_views(
self, project: str, allow_cache: bool = False
) -> List[FeatureView]:
"""
Retrieve a list of feature views from the registry
Args:
allow_cache: Allow returning feature views from the cached registry
project: Filter feature views based on project name
Returns:
List of feature views
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
feature_views: List[FeatureView] = []
for feature_view_proto in registry_proto.feature_views:
if feature_view_proto.spec.project == project:
feature_views.append(FeatureView.from_proto(feature_view_proto))
return feature_views
def list_request_feature_views(
self, project: str, allow_cache: bool = False
) -> List[RequestFeatureView]:
"""
Retrieve a list of request feature views from the registry
Args:
allow_cache: Allow returning feature views from the cached registry
project: Filter feature views based on project name
Returns:
List of feature views
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
feature_views: List[RequestFeatureView] = []
for request_feature_view_proto in registry_proto.request_feature_views:
if request_feature_view_proto.spec.project == project:
feature_views.append(
RequestFeatureView.from_proto(request_feature_view_proto)
)
return feature_views
def get_feature_view(
self, name: str, project: str, allow_cache: bool = False
) -> FeatureView:
"""
Retrieves a feature view.
Args:
name: Name of feature view
project: Feast project that this feature view belongs to
allow_cache: Allow returning feature view from the cached registry
Returns:
Returns either the specified feature view, or raises an exception if
none is found
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for feature_view_proto in registry_proto.feature_views:
if (
feature_view_proto.spec.name == name
and feature_view_proto.spec.project == project
):
return FeatureView.from_proto(feature_view_proto)
raise FeatureViewNotFoundException(name, project)
def delete_feature_service(self, name: str, project: str, commit: bool = True):
"""
Deletes a feature service or raises an exception if not found.
Args:
name: Name of feature service
project: Feast project that this feature service belongs to
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, feature_service_proto in enumerate(
self.cached_registry_proto.feature_services
):
if (
feature_service_proto.spec.name == name
and feature_service_proto.spec.project == project
):
del self.cached_registry_proto.feature_services[idx]
if commit:
self.commit()
return
raise FeatureServiceNotFoundException(name, project)
def delete_feature_view(self, name: str, project: str, commit: bool = True):
"""
Deletes a feature view or raises an exception if not found.
Args:
name: Name of feature view
project: Feast project that this feature view belongs to
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_feature_view_proto in enumerate(
self.cached_registry_proto.feature_views
):
if (
existing_feature_view_proto.spec.name == name
and existing_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.feature_views[idx]
if commit:
self.commit()
return
for idx, existing_request_feature_view_proto in enumerate(
self.cached_registry_proto.request_feature_views
):
if (
existing_request_feature_view_proto.spec.name == name
and existing_request_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.request_feature_views[idx]
if commit:
self.commit()
return
for idx, existing_on_demand_feature_view_proto in enumerate(
self.cached_registry_proto.on_demand_feature_views
):
if (
existing_on_demand_feature_view_proto.spec.name == name
and existing_on_demand_feature_view_proto.spec.project == project
):
del self.cached_registry_proto.on_demand_feature_views[idx]
if commit:
self.commit()
return
raise FeatureViewNotFoundException(name, project)
def delete_entity(self, name: str, project: str, commit: bool = True):
"""
Deletes an entity or raises an exception if not found.
Args:
name: Name of entity
project: Feast project that this entity belongs to
commit: Whether the change should be persisted immediately
"""
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_entity_proto in enumerate(
self.cached_registry_proto.entities
):
if (
existing_entity_proto.spec.name == name
and existing_entity_proto.spec.project == project
):
del self.cached_registry_proto.entities[idx]
if commit:
self.commit()
return
raise EntityNotFoundException(name, project)
def apply_saved_dataset(
self, saved_dataset: SavedDataset, project: str, commit: bool = True,
):
"""
Registers a single entity with Feast
Args:
saved_dataset: SavedDataset that will be added / updated to registry
project: Feast project that this dataset belongs to
commit: Whether the change should be persisted immediately
"""
now = datetime.utcnow()
if not saved_dataset.created_timestamp:
saved_dataset.created_timestamp = now
saved_dataset.last_updated_timestamp = now
saved_dataset_proto = saved_dataset.to_proto()
saved_dataset_proto.spec.project = project
self._prepare_registry_for_changes()
assert self.cached_registry_proto
for idx, existing_saved_dataset_proto in enumerate(
self.cached_registry_proto.saved_datasets
):
if (
existing_saved_dataset_proto.spec.name == saved_dataset_proto.spec.name
and existing_saved_dataset_proto.spec.project == project
):
del self.cached_registry_proto.saved_datasets[idx]
break
self.cached_registry_proto.saved_datasets.append(saved_dataset_proto)
if commit:
self.commit()
def get_saved_dataset(
self, name: str, project: str, allow_cache: bool = False
) -> SavedDataset:
"""
Retrieves a saved dataset.
Args:
name: Name of dataset
project: Feast project that this dataset belongs to
allow_cache: Whether to allow returning this dataset from a cached registry
Returns:
Returns either the specified SavedDataset, or raises an exception if
none is found
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
for saved_dataset in registry_proto.saved_datasets:
if (
saved_dataset.spec.name == name
and saved_dataset.spec.project == project
):
return SavedDataset.from_proto(saved_dataset)
raise SavedDatasetNotFound(name, project=project)
def list_saved_datasets(
self, project: str, allow_cache: bool = False
) -> List[SavedDataset]:
"""
Retrieves a list of all saved datasets in specified project
Args:
project: Feast project
allow_cache: Whether to allow returning this dataset from a cached registry
Returns:
Returns the list of SavedDatasets
"""
registry_proto = self._get_registry_proto(allow_cache=allow_cache)
return [
SavedDataset.from_proto(saved_dataset)
for saved_dataset in registry_proto.saved_datasets
if saved_dataset.spec.project == project
]
def commit(self):
"""Commits the state of the registry cache to the remote registry store."""
| |
# Licensed under an MIT style license -- see LICENSE.md
import numpy as np
from matplotlib import gridspec
from scipy.stats import gaussian_kde
import copy
from pesummary.core.plots.figure import figure
from .corner import hist2d
from pesummary import conf
__author__ = ["<NAME> <<EMAIL>>"]
DEFAULT_LEGEND_KWARGS = {"loc": "best", "frameon": False}
def pcolormesh(
x, y, density, ax=None, levels=None, smooth=None, bins=None, label=None,
level_kwargs={}, range=None, grid=True, legend=False, legend_kwargs={},
**kwargs
):
"""Generate a colormesh plot on a given axis
Parameters
----------
x: np.ndarray
array of floats for the x axis
y: np.ndarray
array of floats for the y axis
density: np.ndarray
2d array of probabilities
ax: matplotlib.axes._subplots.AxesSubplot, optional
axis you wish to use for plotting
levels: list, optional
contour levels to show on the plot. Default None
smooth: float, optional
sigma to use for smoothing. Default, no smoothing applied
level_kwargs: dict, optional
optional kwargs to use for ax.contour
**kwargs: dict, optional
all additional kwargs passed to ax.pcolormesh
"""
if smooth is not None:
import scipy.ndimage.filters as filter
density = filter.gaussian_filter(density, sigma=smooth)
_cmap = kwargs.get("cmap", None)
_off = False
if _cmap is not None and isinstance(_cmap, str) and _cmap.lower() == "off":
_off = True
if grid and "zorder" not in kwargs:
_zorder = -10
else:
_zorder = kwargs.pop("zorder", 10)
if not _off:
ax.pcolormesh(x, y, density, zorder=_zorder, **kwargs)
if levels is not None:
CS = ax.contour(x, y, density, levels=levels, **level_kwargs)
if legend:
_legend_kwargs = DEFAULT_LEGEND_KWARGS.copy()
_legend_kwargs.update(legend_kwargs)
CS.collections[0].set_label(label)
ax.legend(**_legend_kwargs)
return ax
def analytic_twod_contour_plot(*args, smooth=None, **kwargs):
"""Generate a 2d contour plot given an analytic PDF
Parameters
----------
*args: tuple
all args passed to twod_contour_plot
smooth: float, optional
degree of smoothing to apply to probabilities
**kwargs: dict, optional
all additional kwargs passed to twod_contour_plot
"""
return twod_contour_plot(
*args, smooth=smooth, _function=pcolormesh, **kwargs
)
def twod_contour_plot(
x, y, *args, rangex=None, rangey=None, fig=None, ax=None, return_ax=False,
levels=[0.9], bins=300, smooth=7, xlabel=None, ylabel=None,
fontsize={"label": 12}, grid=True, label=None, truth=None,
_function=hist2d, truth_lines=True, truth_kwargs={},
_default_truth_kwargs={
"marker": 'o', "markeredgewidth": 2, "markersize": 6, "color": 'k'
}, **kwargs
):
"""Generate a 2d contour contour plot for 2 marginalized posterior
distributions
Parameters
----------
x: np.array
array of posterior samples to use for the x axis
y: np.array
array of posterior samples to use for the y axis
rangex: tuple, optional
range over which to plot the x axis
rangey: tuple, optional
range over which to plot the y axis
fig: matplotlib.figure.Figure, optional
figure you wish to use for plotting
ax: matplotlib.axes._subplots.AxesSubplot, optional
axis you wish to use for plotting
return_ax: Bool, optional
if True return the axis used for plotting. Else return the figure
levels: list, optional
levels you wish to use for the 2d contours. Default [0.9]
bins: int, optional
number of bins to use for gridding 2d parameter space. Default 300
smooth: int, optional
how much smoothing you wish to use for the 2d contours
xlabel: str, optional
label to use for the xaxis
ylabel: str, optional
label to use for the yaxis
fontsize: dict, optional
dictionary containing the fontsize to use for the plot
grid: Bool, optional
if True, add a grid to the plot
label: str, optional
label to use for a given contour
truth: list, optional
the true value of the posterior. `truth` is a list of length 2 with
first element being the true x value and second element being the true
y value
truth_lines: Bool, optional
if True, add vertical and horizontal lines spanning the 2d space to show
injected value
truth_kwargs: dict, optional
kwargs to use to indicate truth
**kwargs: dict, optional
all additional kwargs are passed to the
`pesummary.core.plots.corner.hist2d` function
"""
if fig is None and ax is None:
fig, ax = figure(gca=True)
elif fig is None and ax is not None:
return_ax = True
elif ax is None:
ax = fig.gca()
xlow, xhigh = np.min(x), np.max(x)
ylow, yhigh = np.min(y), np.max(y)
if rangex is not None:
xlow, xhigh = rangex
if rangey is not None:
ylow, yhigh = rangey
if "range" not in list(kwargs.keys()):
kwargs["range"] = [[xlow, xhigh], [ylow, yhigh]]
_function(
x, y, *args, ax=ax, levels=levels, bins=bins, smooth=smooth,
label=label, grid=grid, **kwargs
)
if truth is not None:
_default_truth_kwargs.update(truth_kwargs)
ax.plot(*truth, **_default_truth_kwargs)
if truth_lines:
ax.axvline(
truth[0], color=_default_truth_kwargs["color"], linewidth=0.5
)
ax.axhline(
truth[1], color=_default_truth_kwargs["color"], linewidth=0.5
)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=fontsize["label"])
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=fontsize["label"])
ax.grid(grid)
if fig is not None:
fig.tight_layout()
if return_ax:
return ax
return fig
def comparison_twod_contour_plot(
x, y, labels=None, plot_density=None, rangex=None, rangey=None,
legend_kwargs={"loc": "best", "frameon": False},
colors=list(conf.colorcycle), linestyles=None, **kwargs
):
"""Generate a comparison 2d contour contour plot for 2 marginalized
posterior distributions from multiple analyses
Parameters
----------
x: np.ndarray
2d array of posterior samples to use for the x axis; array for each
analysis
y: np.ndarray
2d array of posterior samples to use for the y axis; array for each
analysis
labels: list, optional
labels to assign to each contour
plot_density: str, optional
label of the analysis you wish to plot the density for. If you wish
to plot both, simply pass `plot_density='both'`
rangex: tuple, optional
range over which to plot the x axis
rangey: tuple, optional
range over which to plot the y axis
legend_kwargs: dict, optional
kwargs to use for the legend
colors: list, optional
list of colors to use for each contour
linestyles: list, optional
linestyles to use for each contour
**kwargs: dict, optional
all additional kwargs are passed to the
`pesummary.core.plots.publication.twod_contour_plot` function
"""
if labels is None and plot_density is not None:
plot_density = None
if labels is None:
labels = [None] * len(x)
xlow = np.min([np.min(_x) for _x in x])
xhigh = np.max([np.max(_x) for _x in x])
ylow = np.min([np.min(_y) for _y in y])
yhigh = np.max([np.max(_y) for _y in y])
if rangex is None:
rangex = [xlow, xhigh]
if rangey is None:
rangey = [ylow, yhigh]
fig = None
for num, (_x, _y) in enumerate(zip(x, y)):
if plot_density is not None and plot_density == labels[num]:
plot_density = True
elif plot_density is not None and isinstance(plot_density, list):
if labels[num] in plot_density:
plot_density = True
else:
plot_density = False
elif plot_density is not None and plot_density == "both":
plot_density = True
else:
plot_density = False
_label = _color = _linestyle = None
if labels is not None:
_label = labels[num]
if colors is not None:
_color = colors[num]
if linestyles is not None:
_linestyle = linestyles[num]
fig = twod_contour_plot(
_x, _y, plot_density=plot_density, label=_label, fig=fig,
rangex=rangex, rangey=rangey, color=_color, linestyles=_linestyle,
**kwargs
)
ax = fig.gca()
legend = ax.legend(**legend_kwargs)
return fig
def _triangle_axes(
figsize=(8, 8), width_ratios=[4, 1], height_ratios=[1, 4], wspace=0.0,
hspace=0.0,
):
"""Initialize the axes for a 2d triangle plot
Parameters
----------
figsize: tuple, optional
figure size you wish to use. Default (8, 8)
width_ratios: list, optional
ratio of widths for the triangular axis. Default 4:1
height_ratios: list, optional
ratio of heights for the triangular axis. Default 1:4
wspace: float, optional
horizontal space between the axis. Default 0.0
hspace: float, optional
vertical space between the axis. Default 0.0
"""
high1d = 1.0
fig = figure(figsize=figsize, gca=False)
gs = gridspec.GridSpec(
2, 2, width_ratios=width_ratios, height_ratios=height_ratios,
wspace=wspace, hspace=hspace
)
ax1, ax2, ax3, ax4 = (
fig.add_subplot(gs[0]),
fig.add_subplot(gs[1]),
fig.add_subplot(gs[2]),
fig.add_subplot(gs[3]),
)
ax1.minorticks_on()
ax3.minorticks_on()
ax4.minorticks_on()
ax1.xaxis.set_ticklabels([])
ax4.yaxis.set_ticklabels([])
return fig, ax1, ax2, ax3, ax4
def _generate_triangle_plot(
*args, function=None, fig_kwargs={}, existing_figure=None, **kwargs
):
"""Generate a triangle plot according to a given function
Parameters
----------
*args: tuple
all args passed to function
function: func, optional
function you wish to use to generate triangle plot. Default
_triangle_plot
**kwargs: dict, optional
all kwargs passed to function
"""
if existing_figure is None:
fig, ax1, ax2, ax3, ax4 = _triangle_axes(**fig_kwargs)
ax2.axis("off")
else:
fig, ax1, ax3, ax4 = existing_figure
if function is None:
function = _triangle_plot
return function(fig, [ax1, ax3, ax4], *args, **kwargs)
def triangle_plot(*args, **kwargs):
"""Generate a triangular plot made of 3 axis. One central axis showing the
2d marginalized posterior and two smaller axes showing the marginalized 1d
posterior distribution (above and to the right of central axis)
Parameters
----------
x: list
list of samples for the x axis
y: list
list of samples for the y axis
kde: Bool/func, optional
kde | |
'''
this Adapter is designed to wrap the sending and receiving activities of the Battery Management extension if
it is applied.
'''
from tools.singleton import Singleton
import api.ecu_sim_api as api
import copy
from api.core.component_specs import SimpleECUSpec, SimpleBusSpec
from enums.sec_cfg_enum import CAEnum
from components.security.communication.stream import MessageStream
from api.core.api_core import TimingFunctionSet
from components.security.ecu.types.impl_sec_mod_lwa import StdSecurLwSecModTimingFunctions
from components.security.ecu.types.impl_ecu_secure import StdSecurECUTimingFunctions
import logging
class BatManAdministrator(Singleton):
''' centrally saves all BatManCANBusAdapter objects
so that they can be mapped
'''
def __init__(self):
''' constructor '''
self.can_bus_adapter = {} # key: cell_id, value: BatManCANBusAdapter
self._ecu_spec = None # ecu spec used for all ECUs per default
self._ecu_class_name = None # class of the ECU that is to be used
self._individual_spec = {} # if one ECU is to use different settings
self._monitor = False # monitor object connected to the environment
self._result_reader = False # result reader connected to the monitor
self._view = False # view connected to the environment
self._view_options = [] # options of the view w.g. EventlineViewPlugin
self.q_application = None
self.active = False
def activate_ecu_simulation(self):
self.active = True
def adapter_used(self):
''' this method returns True if the battery management
is using this adapter implementation for the can bus
connection
Input: -
Output: -
'''
# available adapters
lst = self.can_bus_adapter.keys()
# True if adapters available
if lst:
return True
else:
return False
def add_view(self, direct_view, view_options):
''' adds a view (GUI) to the environment which
is opened together with the environment
Input: direct_view DirectViewer direct view gui that is to be connected
view_options list list of GUI Plugins that are to be used
Output: -
'''
self._view = direct_view
self._view_options = view_options
def available_can_adapters(self):
''' returns all adapters that are connected
to this environment
Input: -
Output: adapters list list of BatManCANBusAdapter objects that are connected to
this environment
'''
return self.can_bus_adapter.keys()
def set_individual_ecu_spec(self, ecu_spec, ecu_class_name):
''' if certain ECUs with certain ECU ids shall have defined
ecu specs set and be of a defined class, then this method
sets those ECU Specs individually.
Input: ecu_spec AbstractECUSpec ECU Spec defining which ECU Ids are to be set
and which ECU specs should be used for those ECU Ids
ecu_class_name string name of the class that the ECUs should get assigned
'''
for ecu_id in ecu_spec.ecu_id_list:
self._individual_spec[ecu_id] = [ecu_spec, ecu_class_name]
def set_ecu_spec(self, ecu_spec, ecu_class_name):
''' this method sets the ecu spec that will be used per default to
set the ecu specification
Input: ecu_spec AbstractECUSpec spec that will be used to define the properties of the ECU
in this ECU Simulation environment
ecu_class_name string name of the class that the ECUs should get assigned
Output: -
'''
self._ecu_spec = ecu_spec
self._ecu_class_name = ecu_class_name
def connect_monitor_reader(self, monitor, result_reader):
''' this method connects a Monitor and a ResultReader to the environment
to be able to extract data from the simulation
Input: monitor Monitor monitor connected to the environment
result_reader ResultReader result reader connected to the monitor
Output: -
'''
self._monitor = monitor
self._result_reader = result_reader
def prepare_configuration(self, simpy_env, life_time):
''' this method prepares the configuration on the side of the
ECU simulation. It sets up the defined environment by mapping
the ECU Ids as they are defined in the battery management system
on the ECUs and the defined specs. So the constellation used is
implemented here.
Input: simpy_env simpy.Environment environment used in the battery management system
life_time float life time of the simulation
Output: -
'''
# lifetime
life_time = 50000
# logging
# api_log_path = os.path.join(os.path.dirname(__file__), "../logs/api.log")
# api.show_logging(logging.INFO, api_log_path, True)
# create environment
sim_env = api.create_environment(life_time)
sim_env.set_env(simpy_env)
ecu_list = []
# generate a ecu from the ECU specs setting the adapter
for ecu_id in self.available_can_adapters():
# logging.info("ECU ID %s: " % ecu_id)
# define individual ECU Spec
ecu_spec = copy.deepcopy(self._ecu_spec)
ecu_class_name = self._ecu_class_name
if ecu_id in self._individual_spec:
ecu_spec = self._individual_spec[ecu_id][0]
ecu_class_name = self._individual_spec[ecu_id][1]
ecu_spec.ecu_id_list = [str(ecu_id)]
# create ecu
ecu = api.set_ecus(sim_env, 1, ecu_class_name, ecu_spec)[0]
ecu_list += [ecu]
# connect ecu to adapter
ecu.connect_adapter(self.can_bus_adapter[ecu_id])
# add security module
# create ECU specification
ecu_spec = SimpleECUSpec(['SEC 1'], 200000, 200000) # 200 KB
ecu_spec.set_ecu_setting('t_ecu_auth_trigger_process', 0)
ecu_spec.set_ecu_setting('t_ecu_auth_trigger_intervall', 80000)
ecu_spec.set_apply_jitter(0.0001)
sec_mod_group = api.set_ecus(sim_env, 1, 'SecLwAuthSecurityModule', ecu_spec)
security_module = sec_mod_group[0]
# connect to one bus
# create the bus specifications
bus_spec = SimpleBusSpec(['CAN_0'])
api.set_busses(sim_env, 1, 'StdCANBus', bus_spec)
api.connect_bus_by_obj(sim_env, 'CAN_0', ecu_list + sec_mod_group)
# security constellation
all_ecu_groups = [ecu_list]
api.register_ecu_groups_to_secmod(sim_env, sec_mod_group[0].ecu_id, all_ecu_groups)
certificate_manager = api.create_cert_manager()
all_created_ecus = api.ecu_list_from_groups([all_ecu_groups])
ecu_ids = [str(ecu.ecu_id) for ecu in all_created_ecus]
for ecu_id in ecu_ids:
api.generate_valid_ecu_cert_cfg(certificate_manager, ecu_id, CAEnum.CA_L313, security_module.ecu_id, 0, float('inf'))
api.generate_valid_sec_mod_cert_cfg(certificate_manager, security_module.ecu_id, CAEnum.CA_L313, ecu_ids, 0, float('inf'))
api.apply_certification(sim_env, certificate_manager)
# define allowed streams -------------------------------------------------------------- TODO very IMPORTANT
for broadcast_stream_id in [0x0080, 0x0081, 0x0082, 0x0083, 0x0012, 0x0013, 0x0020, 0x00A0, 0x00A1 ]:
for ecu_id in ecu_ids:
lst = copy.deepcopy(ecu_ids)
lst.remove(ecu_id)
stream = MessageStream(ecu_id, lst, broadcast_stream_id, float('inf'), 0, float('inf'))
api.add_allowed_stream(sim_env, security_module.ecu_id, stream)
# -------------------------------------------------------------- TODO very IMPORTANT
# set gateways
api.autoset_gateway_filters(sim_env, sec_mod_group[0].ecu_id)
# set timing functions
function_set = TimingFunctionSet()
ecu_func_set = StdSecurLwSecModTimingFunctions(main_library_tag='CyaSSL')
ecu_func_set.library_tags['t_ecu_auth_reg_msg_validate_cert'] = 'Crypto_Lib_SW'
function_set.set_mapping_from_function_set(security_module.ecu_id, ecu_func_set)
api.apply_timing_functions_set(sim_env, security_module.ecu_id, function_set)
function_set_2 = TimingFunctionSet()
ecu_func_set = StdSecurECUTimingFunctions(main_library_tag='CyaSSL')
ecu_func_set.library_tags['t_adv_msg_secmodcert_enc'] = 'Crypto_Lib_SW'
for ecu_id in ecu_ids:
function_set_2.set_mapping_from_function_set(ecu_id, ecu_func_set)
api.apply_timing_functions_set(sim_env, ecu_id, function_set_2)
# add monitor
if self._monitor and self._result_reader:
api.connect_monitor(sim_env, self._monitor, 0.5)
api.connect_result_reader(sim_env, self._monitor, self._result_reader)
# run view if defined
if self._view:
self._view.show(self._result_reader, self._view_options, self.q_application)
# run simulation
api.open_simulation_stop_button(sim_env)
api.build_simulation(sim_env)
api.run_simulation(sim_env)
def add_adapter(self, cell_id, adapter):
''' adds a adapter to the simulation environment
Input: cell_id string id of the ECU/CMU that is added to the system
adapter BatManCANBusAdapter adapter that maps the ECUSimulation environment
to the Battery management system by intercepting
the CANBus sending and receiving process
Output: -
'''
self.can_bus_adapter[cell_id] = adapter
class BatManCANBusAdapter(object):
'''
this class is instantiated in the Battery Management environment once per ECU/cell. It is implemented as one
CAN Bus instance (per cell) that is accessed during the sending and receiving process of each cell. This
adapter is then mapped onto the ECU Simulation environment by connecting each of those CAN Bus instances
to one BatManECU and its application layer. So every time the cell in the Battery Management environment
invokes the send or received message, the corresponding ECU that resembles this cell is called in the
ecu simulation environment.
'''
# message ids definitions for the battery management system
CAN_SOC_BROADCAST = [0x0080, 32] # ('TARGET':'BROADCAST', 'ORIGIN':, 'soc' :)
CAN_VOLTAGE_BROADCAST = [0x0081, 32] # ('TARGET':'BROADCAST', 'ORIGIN':, 'voltage' :)
CAN_BLOCK_REQUEST = [0x0082, 16] # ('TARGET':'BROADCAST', 'ORIGIN':, 'SENDER_ID':, 'RECEIVER_ID':)
CAN_UNBLOCK_REQUEST = [0x0083, 16] # ('TARGET':'BROADCAST', 'ORIGIN':, 'SENDER_ID':, 'RECEIVER_ID':)
CAN_SEND_REQUEST = [0x0012, 0] # ('TARGET':, 'ORIGIN':)
CAN_SEND_ACKNOWLEDGE = [0x0013, 64] # ('TARGET':, 'ORIGIN':, 'transferTime':, 'transferRate')
CAN_STATUS_RESPONSE = [0x0020, 24] # ('TARGET':, 'ORIGIN':, 'STATUS', 'BLOCKERID1', 'BLOCKERID2')
CAN_BALANCE_CONTROL = [0x00A0, 0]
CAN_SUPPLY_LOAD_MODE = [0x00A1, 66]
def __init__(self, cmu):
''' Constructor
Input: cmu CMU cmu in the battery management system that corresponds to a ECU in the
ECUSimulation environment
'''
# define default ecu specs
self.ecu_specs = None
self.cmu = cmu
# register at Administrator
self.ecu_id = cmu.objectId
BatManAdministrator().add_adapter(self.ecu_id, self)
# dummy parameters (needed for instantiation)
self.dataCount = 0
self.speed = 2
self.avgSpeed = 2
def CANsend(self, message):
''' when a CMU of the battery management system wants to
send a message this method is invoked. If this adapter is
connected to a BatManECU this method is overridden with the
method that sends in the ECU Environment
Input: message CANBusMessage can bus message that is sent by the batterymanagement
Output: -
'''
# logging.info('ID: {}, DATA: {}'.format(message.identifier, message.data))
self.receive(message)
logging.info("CAN send not overridden")
def receive(self, msg):
''' when a CMU of | |
import os
import datetime
import tempfile
import numpy as np
from simtk import openmm as mm
from simtk import unit
from simtk.openmm.app.pdbfile import PDBFile
from simtk.openmm.app.topology import Topology
import simtk.openmm.app.element as elem
from cg_openmm.simulation.tools import build_mm_simulation
from cg_openmm.utilities.util import lj_v
from cg_openmm.utilities.iotools import write_pdbfile_without_topology
def add_new_elements(cgmodel):
"""
Add coarse grained particle types to OpenMM.
:param cgmodel: CGModel object (contains all attributes for a coarse grained model).
:type cgmodel: class
:returns:
- new_particles (list) - a list of the particle names that were added to OpenMM's 'Element' List.
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> particle_types = add_new_elements(cgmodel)
.. warning:: If the particle names were user defined, and any of the names conflict with existing element names in OpenMM, OpenMM will issue an error exit.
"""
element_index = 117
new_particles = []
for particle in cgmodel.particle_list:
particle_name = particle["name"]
if particle_name.upper() not in elem.Element._elements_by_symbol:
elem.Element(element_index, particle_name, particle_name, cgmodel.get_particle_mass(particle))
element_index = element_index + 1
new_particles.append(particle_name)
return new_particles
def write_xml_file(cgmodel, xml_file_name):
"""
Write an XML-formatted forcefield file for a coarse grained model.
:param cgmodel: CGModel() class object.
:type cgmodel: class
:param xml_file_name: Path to XML output file.
:type xml_file_name: str
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> xml_file_name = "openmm_cgmodel.xml"
>>> write_xml_file(cgmodel,xml_file_name)
"""
particle_list = add_new_elements(cgmodel)
xml_object = open(xml_file_name, "w")
xml_object.write("<ForceField>\n")
xml_object.write(" <Info>\n")
date = str(datetime.datetime.today()).split()[0]
xml_object.write(f" <DateGenerated> {date} </DateGenerated>\n")
xml_object.write(" <Source> https://github.com/shirtsgroup/cg_openmm </Source>\n")
xml_object.write(" <Reference>\n")
xml_object.write(" </Reference>\n")
xml_object.write(" </Info>\n")
xml_object.write(" <AtomTypes>\n")
unique_particle_names = []
unique_masses = []
for particle_index in range(len(cgmodel.particle_list)):
if cgmodel.particle_list[particle_index] not in unique_particle_names:
unique_particle_names.append(cgmodel.particle_list[particle_index])
unique_masses.append(cgmodel.get_particle_mass(particle_index))
for particle_index in range(len(unique_particle_names)):
particle_type = cgmodel.get_particle_type_name(particle_index)
xml_object.write(
' <Type name="'
+ str(unique_particle_names[particle_index])
+ '" class="'
+ str(particle_type)
+ '" element="'
+ str(unique_particle_names[particle_index])
+ '" mass="'
+ str(unique_masses[particle_index]._value)
+ '"/>\n'
)
xml_object.write(" </AtomTypes>\n")
xml_object.write(" <Residues>\n")
xml_object.write(' <Residue name="M">\n')
for particle_index in range(len(unique_particle_names)):
xml_object.write(
' <Atom name="'
+ str(unique_particle_names[particle_index])
+ '" type="'
+ str(unique_particle_names[particle_index])
+ '"/>\n'
)
for bond in cgmodel.bond_list:
if all(bond[i] < len(unique_particle_names) for i in range(2)):
particle_1_name = cgmodel.get_particle_name(bond[0])
particle_2_name = cgmodel.get_particle_name(bond[1])
xml_object.write(
' <Bond atomName1="'
+ str(particle_1_name)
+ '" atomName2="'
+ str(particle_2_name)
+ '"/>\n'
)
xml_object.write(' <ExternalBond atomName="' + str(unique_particle_names[0]) + '"/>\n')
external_parent = unique_particle_names[
len(unique_particle_names) - cgmodel.monomer_types[0]["sidechain_length"] - 1
]
xml_object.write(' <ExternalBond atomName="' + str(external_parent) + '"/>\n')
xml_object.write(" </Residue>\n")
xml_object.write(' <Residue name="MT">\n')
for particle_index in range(len(unique_particle_names)):
xml_object.write(
' <Atom name="'
+ str(unique_particle_names[particle_index])
+ '" type="'
+ str(unique_particle_names[particle_index])
+ '"/>\n'
)
for bond in cgmodel.bond_list:
if all(bond[i] < len(unique_particle_names) for i in range(2)):
particle_1_name = cgmodel.get_particle_name(bond[0])
particle_2_name = cgmodel.get_particle_name(bond[1])
xml_object.write(
' <Bond atomName1="'
+ str(particle_1_name)
+ '" atomName2="'
+ str(particle_2_name)
+ '"/>\n'
)
xml_object.write(' <ExternalBond atomName="' + str(external_parent) + '"/>\n')
xml_object.write(" </Residue>\n")
xml_object.write(" </Residues>\n")
if cgmodel.include_bond_forces:
xml_object.write(" <HarmonicBondForce>\n")
unique_bond_list = []
for bond in cgmodel.bond_list:
if any(bond[i] < len(unique_particle_names) for i in range(2)):
unique_bond_list.append(bond)
for bond in unique_bond_list:
particle_1_name = cgmodel.get_particle_name(bond[0])
particle_2_name = cgmodel.get_particle_name(bond[1])
# unique_particle_names.index(particle_1_name)
xml_type_1 = particle_1_name
# unique_particle_names.index(particle_2_name)
xml_type_2 = particle_2_name
bond_length = cgmodel.get_bond_length(bond).value_in_unit(unit.nanometer)
bond_force_constant = cgmodel.get_bond_force_constant(bond)
xml_object.write(
' <Bond type1="'
+ str(xml_type_1)
+ '" type2="'
+ str(xml_type_2)
+ '" length="'
+ str(bond_length)
+ '" k="'
+ str(bond_force_constant)
+ '"/>\n'
)
xml_object.write(" </HarmonicBondForce>\n")
if cgmodel.include_bond_angle_forces:
xml_object.write(" <HarmonicAngleForce>\n")
unique_angle_list = []
for angle in cgmodel.bond_angle_list:
if any(angle[i] < len(unique_particle_names) for i in range(3)):
unique_angle_list.append(angle)
for angle in unique_angle_list:
bond_angle_force_constant = cgmodel.get_bond_angle_force_constant(angle)
equil_bond_angle = cgmodel.get_equil_bond_angle(angle)
particle_1_name = cgmodel.get_particle_name(angle[0])
particle_2_name = cgmodel.get_particle_name(angle[1])
particle_3_name = cgmodel.get_particle_name(angle[2])
# unique_particle_names.index(particle_1_name)
xml_type_1 = particle_1_name
# unique_particle_names.index(particle_2_name)
xml_type_2 = particle_2_name
# unique_particle_names.index(particle_3_name)
xml_type_3 = particle_3_name
xml_object.write(
' <Angle angle="'
+ str(equil_bond_angle)
+ '" k="'
+ str(bond_angle_force_constant)
+ '" type1="'
+ str(xml_type_1)
+ '" type2="'
+ str(xml_type_2)
+ '" type3="'
+ str(xml_type_3)
+ '"/>\n'
)
xml_object.write(" </HarmonicAngleForce>\n")
if cgmodel.include_torsion_forces:
xml_object.write(' <PeriodicTorsionForce ordering="amber">\n')
unique_torsion_list = []
# print(cgmodel.torsion_list)
for torsion in cgmodel.torsion_list:
if any(torsion[i] < len(unique_particle_names) for i in range(4)):
unique_torsion_list.append(torsion)
for torsion in unique_torsion_list:
torsion_force_constant = cgmodel.get_torsion_force_constant(
[torsion[0], torsion[1], torsion[2], torsion[3]]
)
torsion_phase_angle = cgmodel.get_torsion_phase_angle(
[torsion[0], torsion[1], torsion[2], torsion[3]]
)
particle_1_name = cgmodel.get_particle_name(torsion[0])
particle_2_name = cgmodel.get_particle_name(torsion[1])
particle_3_name = cgmodel.get_particle_name(torsion[2])
particle_4_name = cgmodel.get_particle_name(torsion[3])
# unique_particle_names.index(particle_1_name)
xml_type_1 = particle_1_name
# unique_particle_names.index(particle_2_name)
xml_type_2 = particle_2_name
# unique_particle_names.index(particle_3_name)
xml_type_3 = particle_3_name
# unique_particle_names.index(particle_4_name)
xml_type_4 = particle_4_name
periodicity = cgmodel.get_torsion_periodicity(torsion)
xml_object.write(
' <Proper k1="'
+ str(torsion_force_constant)
+ '" periodicity1="'
+ str(periodicity)
+ '" phase1="'
+ str(torsion_phase_angle)
+ '" type1="'
+ str(xml_type_1)
+ '" type2="'
+ str(xml_type_2)
+ '" type3="'
+ str(xml_type_3)
+ '" type4="'
+ str(xml_type_4)
+ '"/>\n'
)
xml_object.write(" </PeriodicTorsionForce>\n")
if cgmodel.include_nonbonded_forces:
xml_object.write(' <NonbondedForce coulomb14scale="0.833333" lj14scale="0.5">\n')
for particle_index in range(len(unique_particle_names)):
charge = cgmodel.get_particle_charge(particle_index)._value
sigma = cgmodel.get_particle_sigma(particle_index).in_units_of(unit.nanometer)._value
epsilon = cgmodel.get_particle_epsilon(particle_index)._value
particle_name = cgmodel.get_particle_name(particle_index)
xml_object.write(f' <Atom type=\"{particle_name}\" charge=\"{charge}\" sigma=\"{charge} epsilon=\"{epsilon}\"/>\n')
xml_object.write(" </NonbondedForce>\n")
xml_object.write("</ForceField>\n")
xml_object.close()
return
def verify_topology(cgmodel):
"""
Given a coarse grained model that contains a Topology() (cgmodel.topology), this function verifies the validity of the topology.
:param cgmodel: CGModel() class object.
:type cgmodel: class
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> verify_topology(cgmodel)
.. warning:: The function will force an error exit if the topology is invalid, and will proceed as normal if the topology is valid.
"""
if cgmodel.num_beads != cgmodel.topology.getNumAtoms():
print("ERROR: The number of particles in the coarse grained model\n")
print("does not match the number of particles in the OpenMM topology.\n")
print("There are " + str(cgmodel.num_beads) + " particles in the coarse grained model\n")
print("and " + str(cgmodel.topology.getNumAtoms()) + " particles in the OpenMM topology.")
exit()
if cgmodel.polymer_length != cgmodel.topology.getNumResidues():
print("ERROR: The number of monomers in the coarse grained model\n")
print("does not match the number of residues in the OpenMM topology.\n")
print(
"There are " + str(cgmodel.polymer_length) + " monomers in the coarse grained model\n"
)
print(
"and " + str(cgmodel.topology.getNumResidues()) + " monomers in the OpenMM topology."
)
exit()
return
def build_topology(cgmodel, use_pdbfile=False, pdbfile=None):
"""
Construct an OpenMM `Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_ class object for our coarse grained model,
:param cgmodel: CGModel() class object
:type cgmodel: class
:param use_pdbfile: Determines whether or not to use a PDB file in order to generate the Topology().
:type use_pdbfile: Logical
:param pdbfile: Name of a PDB file to use when building the topology.
:type pdbfile: str
:returns:
- topology (`Topology() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1topology_1_1Topology.html>`_ ) - OpenMM Topology() object
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> from foldamers.util.iotools import write_pdbfile_without_topology
>>> input_pdb = "top.pdb"
>>> cgmodel = CGModel()
>>> write_pdbfile_without_topology(cgmodel,input_pdb)
>>> topology = build_topology(cgmodel,use_pdbfile=True,pdbfile=input_pdb)
>>> cgmodel.topology = topology
.. warning:: When 'use_pdbfile'=True, this function will use the `PDBFile() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1app_1_1pdbfile_1_1PDBFile.html>`_ class object from OpenMM to build the Topology(). In order for this approach to function correctly, the particle names in the PDB file must match the particle names in the coarse grained model.
"""
if cgmodel.constrain_bonds:
use_pdbfile = True
if use_pdbfile:
if pdbfile is None:
tf = tempfile.NamedTemporaryFile()
write_pdbfile_without_topology(cgmodel, tf.name)
pdb = PDBFile(tf.name)
topology = pdb.getTopology()
tf.close()
return topology
else:
pdb = PDBFile(pdbfile)
topology = pdb.getTopology()
return topology
topology = Topology()
chain = topology.addChain()
residue_index = -1
openmm_particle_list = list()
for particle in cgmodel.particle_list:
if particle["monomer"] > residue_index:
residue_index = particle["monomer"]
residue = topology.addResidue(str(residue_index), chain)
particle_symbol = particle["name"]
element = elem.Element.getBySymbol(particle_symbol)
openmm_particle = topology.addAtom(particle_symbol, element, residue)
openmm_particle_list.append(particle)
if cgmodel.include_bond_forces or cgmodel.constrain_bonds:
for bond in cgmodel.bond_list:
topology.addBond(openmm_particle_list[bond[0]],openmm_particle_list[bond[1]])
cgmodel.topology = topology
verify_topology(cgmodel)
return topology
def get_num_forces(cgmodel):
"""
Given a CGModel() class object, this function determines how many forces we are including when evaluating the energy.
:param cgmodel: CGModel() class object
:type cgmodel: class
:returns:
- total_forces (int) - Number of forces in the coarse grained model
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> total_number_forces = get_num_forces(cgmodel)
"""
total_forces = 0
if cgmodel.include_bond_forces:
total_forces = total_forces + 1
if cgmodel.include_nonbonded_forces:
total_forces = total_forces + 1
if cgmodel.include_bond_angle_forces:
total_forces = total_forces + 1
if cgmodel.include_torsion_forces:
total_forces = total_forces + 1
return total_forces
def verify_system(cgmodel):
"""
Given a CGModel() class object, this function confirms that its OpenMM `System() <https://simtk.org/api_docs/openmm/api4_1/python/classsimtk_1_1openmm_1_1openmm_1_1System.html>`_ object is configured correctly.
:param cgmodel: CGModel() class object
:type cgmodel: class
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> verify_system(cgmodel)
.. warning:: The function will force an error exit if the system is invalid, and will proceed as normal if the system is | |
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter(name="processingConfiguration")
def processing_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs']]:
"""
The data processing configuration. More details are given below.
"""
return pulumi.get(self, "processing_configuration")
@processing_configuration.setter
def processing_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs']]):
pulumi.set(self, "processing_configuration", value)
@property
@pulumi.getter(name="s3BackupConfiguration")
def s3_backup_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationArgs']]:
"""
The configuration for backup in Amazon S3. Required if `s3_backup_mode` is `Enabled`. Supports the same fields as `s3_configuration` object.
"""
return pulumi.get(self, "s3_backup_configuration")
@s3_backup_configuration.setter
def s3_backup_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationArgs']]):
pulumi.set(self, "s3_backup_configuration", value)
@property
@pulumi.getter(name="s3BackupMode")
def s3_backup_mode(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon S3 backup mode. Valid values are `Disabled` and `Enabled`. Default value is `Disabled`.
"""
return pulumi.get(self, "s3_backup_mode")
@s3_backup_mode.setter
def s3_backup_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_backup_mode", value)
@pulumi.input_type
class FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptionsArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
log_group_name: Optional[pulumi.Input[str]] = None,
log_stream_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] enabled: Enables or disables the logging. Defaults to `false`.
:param pulumi.Input[str] log_group_name: The CloudWatch group name for logging. This value is required if `enabled` is true.
:param pulumi.Input[str] log_stream_name: The CloudWatch log stream name for logging. This value is required if `enabled` is true.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if log_group_name is not None:
pulumi.set(__self__, "log_group_name", log_group_name)
if log_stream_name is not None:
pulumi.set(__self__, "log_stream_name", log_stream_name)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables the logging. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="logGroupName")
def log_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The CloudWatch group name for logging. This value is required if `enabled` is true.
"""
return pulumi.get(self, "log_group_name")
@log_group_name.setter
def log_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_group_name", value)
@property
@pulumi.getter(name="logStreamName")
def log_stream_name(self) -> Optional[pulumi.Input[str]]:
"""
The CloudWatch log stream name for logging. This value is required if `enabled` is true.
"""
return pulumi.get(self, "log_stream_name")
@log_stream_name.setter
def log_stream_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_stream_name", value)
@pulumi.input_type
class FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationArgs:
def __init__(__self__, *,
input_format_configuration: pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs'],
output_format_configuration: pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs'],
schema_configuration: pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs'],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs'] input_format_configuration: Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
:param pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs'] output_format_configuration: Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
:param pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs'] schema_configuration: Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
:param pulumi.Input[bool] enabled: Defaults to `true`. Set it to `false` if you want to disable format conversion while preserving the configuration details.
"""
pulumi.set(__self__, "input_format_configuration", input_format_configuration)
pulumi.set(__self__, "output_format_configuration", output_format_configuration)
pulumi.set(__self__, "schema_configuration", schema_configuration)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="inputFormatConfiguration")
def input_format_configuration(self) -> pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs']:
"""
Nested argument that specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. More details below.
"""
return pulumi.get(self, "input_format_configuration")
@input_format_configuration.setter
def input_format_configuration(self, value: pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs']):
pulumi.set(self, "input_format_configuration", value)
@property
@pulumi.getter(name="outputFormatConfiguration")
def output_format_configuration(self) -> pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs']:
"""
Nested argument that specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. More details below.
"""
return pulumi.get(self, "output_format_configuration")
@output_format_configuration.setter
def output_format_configuration(self, value: pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs']):
pulumi.set(self, "output_format_configuration", value)
@property
@pulumi.getter(name="schemaConfiguration")
def schema_configuration(self) -> pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs']:
"""
Nested argument that specifies the AWS Glue Data Catalog table that contains the column information. More details below.
"""
return pulumi.get(self, "schema_configuration")
@schema_configuration.setter
def schema_configuration(self, value: pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs']):
pulumi.set(self, "schema_configuration", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to `true`. Set it to `false` if you want to disable format conversion while preserving the configuration details.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs:
def __init__(__self__, *,
deserializer: pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs']):
"""
:param pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs'] deserializer: Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
"""
pulumi.set(__self__, "deserializer", deserializer)
@property
@pulumi.getter
def deserializer(self) -> pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs']:
"""
Nested argument that specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. More details below.
"""
return pulumi.get(self, "deserializer")
@deserializer.setter
def deserializer(self, value: pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs']):
pulumi.set(self, "deserializer", value)
@pulumi.input_type
class FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs:
def __init__(__self__, *,
hive_json_ser_de: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs']] = None,
open_x_json_ser_de: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs']] = None):
"""
:param pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs'] hive_json_ser_de: Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
:param pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs'] open_x_json_ser_de: Nested argument that specifies the OpenX SerDe. More details below.
"""
if hive_json_ser_de is not None:
pulumi.set(__self__, "hive_json_ser_de", hive_json_ser_de)
if open_x_json_ser_de is not None:
pulumi.set(__self__, "open_x_json_ser_de", open_x_json_ser_de)
@property
@pulumi.getter(name="hiveJsonSerDe")
def hive_json_ser_de(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs']]:
"""
Nested argument that specifies the native Hive / HCatalog JsonSerDe. More details below.
"""
return pulumi.get(self, "hive_json_ser_de")
@hive_json_ser_de.setter
def hive_json_ser_de(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs']]):
pulumi.set(self, "hive_json_ser_de", value)
@property
@pulumi.getter(name="openXJsonSerDe")
def open_x_json_ser_de(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs']]:
"""
Nested argument that specifies the OpenX SerDe. More details below.
"""
return pulumi.get(self, "open_x_json_ser_de")
@open_x_json_ser_de.setter
def open_x_json_ser_de(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs']]):
pulumi.set(self, "open_x_json_ser_de", value)
@pulumi.input_type
class FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs:
def __init__(__self__, *,
timestamp_formats: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] timestamp_formats: A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html). You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
"""
if timestamp_formats is not None:
pulumi.set(__self__, "timestamp_formats", timestamp_formats)
@property
@pulumi.getter(name="timestampFormats")
def timestamp_formats(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html). You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
"""
return pulumi.get(self, "timestamp_formats")
@timestamp_formats.setter
def timestamp_formats(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "timestamp_formats", value)
@pulumi.input_type
class FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs:
def __init__(__self__, *,
case_insensitive: Optional[pulumi.Input[bool]] = None,
column_to_json_key_mappings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
convert_dots_in_json_keys_to_underscores: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] case_insensitive: When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] column_to_json_key_mappings: A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to `{ ts = "timestamp" }` to map this key to a column named ts.
:param pulumi.Input[bool] convert_dots_in_json_keys_to_underscores: When set to `true`, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option. Defaults to `false`.
"""
if case_insensitive is not None:
pulumi.set(__self__, "case_insensitive", case_insensitive)
if column_to_json_key_mappings is not None:
pulumi.set(__self__, "column_to_json_key_mappings", column_to_json_key_mappings)
if convert_dots_in_json_keys_to_underscores is not None:
pulumi.set(__self__, "convert_dots_in_json_keys_to_underscores", convert_dots_in_json_keys_to_underscores)
@property
@pulumi.getter(name="caseInsensitive")
def case_insensitive(self) -> Optional[pulumi.Input[bool]]:
"""
When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
"""
return pulumi.get(self, "case_insensitive")
@case_insensitive.setter
def case_insensitive(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "case_insensitive", value)
@property
@pulumi.getter(name="columnToJsonKeyMappings")
def column_to_json_key_mappings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to | |
user namespace.
Return value: a flag indicating whether the code to be run completed
successfully:
- 0: successful execution.
- 1: an error occurred.
"""
outflag = 1 # start by assuming error, success will reset it
try:
exec code in self.user_ns
outflag = 0
except SystemExit:
self.resetbuffer()
self.traceback_trap.args = sys.exc_info()
except:
self.traceback_trap.args = sys.exc_info()
return outflag
def execute_macro(self, macro):
""" Execute the value of a macro.
Parameters
----------
macro : Macro
"""
python = macro.value
if self.translator is not None:
python = self.translator(python)
self.execute_python(python)
def getCommand(self, i=None):
"""Gets the ith message in the message_cache.
This is implemented here for compatibility with the old ipython1 shell
I am not sure we need this though. I even seem to remember that we
were going to get rid of it.
"""
return self.message_cache.get_message(i)
def reset(self):
"""Reset the interpreter.
Currently this only resets the users variables in the namespace.
In the future we might want to also reset the other stateful
things like that the Interpreter has, like In, Out, etc.
"""
self.user_ns.clear()
self.setup_namespace()
def complete(self,line,text=None, pos=None):
"""Complete the given text.
:Parameters:
text : str
Text fragment to be completed on. Typically this is
"""
# fixme: implement
raise NotImplementedError
def push(self, ns):
""" Put value into the namespace with name key.
Parameters
----------
**kwds
"""
self.user_ns.update(ns)
def push_function(self, ns):
# First set the func_globals for all functions to self.user_ns
new_kwds = {}
for k, v in ns.iteritems():
if not isinstance(v, FunctionType):
raise TypeError("function object expected")
new_kwds[k] = FunctionType(v.func_code, self.user_ns)
self.user_ns.update(new_kwds)
def pack_exception(self,message,exc):
message['exception'] = exc.__class__
message['exception_value'] = \
traceback.format_exception_only(exc.__class__, exc)
def feed_block(self, source, filename='<input>', symbol='single'):
"""Compile some source in the interpreter.
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError).
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is:
- True in case 2
- False in the other cases, unless an exception is raised, where
None is returned instead. This can be used by external callers to
know whether to continue feeding input or not.
The return value can be used to decide whether to use sys.ps1 or
sys.ps2 to prompt the next line."""
self.message = self.setup_message()
try:
code = self.command_compiler(source,filename,symbol)
except (OverflowError, SyntaxError, IndentationError, ValueError ), e:
# Case 1
self.traceback_trap.args = sys.exc_info()
self.pack_exception(self.message,e)
return COMPILER_ERROR,False
if code is None:
# Case 2: incomplete input. This means that the input can span
# multiple lines. But we still need to decide when to actually
# stop taking user input. Later we'll add auto-indentation support
# somehow. In the meantime, we'll just stop if there are two lines
# of pure whitespace at the end.
last_two = source.rsplit('\n',2)[-2:]
print 'last two:',last_two # dbg
if len(last_two)==2 and all(s.isspace() for s in last_two):
return COMPLETE_INPUT,False
else:
return INCOMPLETE_INPUT, True
else:
# Case 3
return COMPLETE_INPUT, False
def pull(self, keys):
""" Get an item out of the namespace by key.
Parameters
----------
key : str
Returns
-------
value : object
Raises
------
TypeError if the key is not a string.
NameError if the object doesn't exist.
"""
if isinstance(keys, str):
result = self.user_ns.get(keys, NotDefined())
if isinstance(result, NotDefined):
raise NameError('name %s is not defined' % keys)
elif isinstance(keys, (list, tuple)):
result = []
for key in keys:
if not isinstance(key, str):
raise TypeError("objects must be keyed by strings.")
else:
r = self.user_ns.get(key, NotDefined())
if isinstance(r, NotDefined):
raise NameError('name %s is not defined' % key)
else:
result.append(r)
if len(keys)==1:
result = result[0]
else:
raise TypeError("keys must be a strong or a list/tuple of strings")
return result
def pull_function(self, keys):
return self.pull(keys)
#### Interactive user API ##################################################
def ipsystem(self, command):
""" Execute a command in a system shell while expanding variables in the
current namespace.
Parameters
----------
command : str
"""
# Expand $variables.
command = self.var_expand(command)
system_shell(command,
header='IPython system call: ',
verbose=self.rc.system_verbose,
)
def ipmagic(self, arg_string):
""" Call a magic function by name.
ipmagic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use ipmagic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements. It is added by IPython to the Python builtin
namespace upon initialization.
Parameters
----------
arg_string : str
A string containing the name of the magic function to call and any
additional arguments to be passed to the magic.
Returns
-------
something : object
The return value of the actual object.
"""
# Taken from IPython.
raise NotImplementedError('Not ported yet')
args = arg_string.split(' ', 1)
magic_name = args[0]
magic_name = magic_name.lstrip(self.config.ESC_MAGIC)
try:
magic_args = args[1]
except IndexError:
magic_args = ''
fn = getattr(self.magic, 'magic_'+magic_name, None)
if fn is None:
self.error("Magic function `%s` not found." % magic_name)
else:
magic_args = self.var_expand(magic_args)
return fn(magic_args)
#### Private 'Interpreter' interface #######################################
def setup_message(self):
"""Return a message object.
This method prepares and returns a message dictionary. This dict
contains the various fields that are used to transfer information about
execution, results, tracebacks, etc, to clients (either in or out of
process ones). Because of the need to work with possibly out of
process clients, this dict MUST contain strictly pickle-safe values.
"""
return dict(number=self.current_cell_number)
def setup_namespace(self):
""" Add things to the namespace.
"""
self.user_ns.setdefault('__name__', '__main__')
self.user_ns.setdefault('__builtins__', __builtin__)
self.user_ns['__IP'] = self
if self.raw_input_builtin is not None:
self.user_ns['raw_input'] = self.raw_input_builtin
if self.input_builtin is not None:
self.user_ns['input'] = self.input_builtin
builtin_additions = dict(
ipmagic=self.ipmagic,
)
__builtin__.__dict__.update(builtin_additions)
if self.history is not None:
self.history.setup_namespace(self.user_ns)
def set_traps(self):
""" Set all of the output, display, and traceback traps.
"""
self.output_trap.set()
self.display_trap.set()
self.traceback_trap.set()
def unset_traps(self):
""" Unset all of the output, display, and traceback traps.
"""
self.output_trap.unset()
self.display_trap.unset()
self.traceback_trap.unset()
def split_commands(self, python):
""" Split multiple lines of code into discrete commands that can be
executed singly.
Parameters
----------
python : str
Pure, exec'able Python code.
Returns
-------
commands : list of str
Separate commands that can be exec'ed independently.
"""
# compiler.parse treats trailing spaces after a newline as a
# SyntaxError. This is different than codeop.CommandCompiler, which
# will compile the trailng spaces just fine. We simply strip any
# trailing whitespace off. Passing a string with trailing whitespace
# to exec will fail however. There seems to be some inconsistency in
# how trailing whitespace is handled, but this seems to work.
python = python.strip()
# The compiler module does not like unicode. We need to convert
# it encode it:
if isinstance(python, unicode):
# Use the utf-8-sig BOM so the compiler detects this a UTF-8
# encode string.
python = '\xef\xbb\xbf' + python.encode('utf-8')
# The compiler module will parse the code into an abstract syntax tree.
# This has a bug with str("a\nb"), but not str("""a\nb""")!!!
ast = compiler.parse(python)
# Uncomment to help debug the ast tree
# for n in ast.node:
# print n.lineno,'->',n
# Each separate command is available by iterating over ast.node. The
# lineno attribute is the line number (1-indexed) beginning the commands
# suite.
# lines ending with ";" yield a Discard Node that doesn't have a lineno
# attribute. These nodes can and should be discarded. But there are
# other situations that cause Discard nodes that shouldn't be discarded.
# We might eventually discover other cases where lineno is None and have
# to put in a more sophisticated test.
linenos = [x.lineno-1 for x in ast.node if x.lineno is not None]
# When we finally get the slices, we will need to slice all the way to
# | |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
# Adapted from https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/coco.py
# which has the following license...
# https://github.com/MIC-DKFZ/nnDetection/blob/main/LICENSE
#
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
# Adapted from https://github.com/cocodataset/cocoapi
# which has the following license...
# https://github.com/cocodataset/cocoapi/blob/master/license.txt
# Copyright (c) 2014, <NAME> and <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""
This script is almost same with https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/coco.py
The changes include 1) code reformatting, 2) docstrings.
"""
import logging as logger
import time
from typing import Dict, List, Sequence, Tuple, Union
import numpy as np
class COCOMetric:
def __init__(
self,
classes: Sequence[str],
iou_list: Sequence[float] = (0.1, 0.5, 0.75),
iou_range: Sequence[float] = (0.1, 0.5, 0.05),
max_detection: Sequence[int] = (1, 5, 100),
per_class: bool = True,
verbose: bool = True,
):
"""
Class to compute COCO metrics
Metrics computed includes,
- mAP over the IoU range specified by `iou_range` at last value of `max_detection`
- AP values at IoU thresholds specified by `iou_list` at last value of `max_detection`
- AR over max detections thresholds defined by `max_detection` (over iou range)
Args:
classes (Sequence[str]): name of each class (index needs to correspond to predicted class indices!)
iou_list (Sequence[float]): specific thresholds where ap is evaluated and saved
iou_range (Sequence[float]): (start, stop, step) for mAP iou thresholds
max_detection (Sequence[int]): maximum number of detections per image
verbose (bool): log time needed for evaluation
Example:
.. code-block:: python
from monai.data.box_utils import box_iou
from monai.apps.detection.metrics.coco import COCOMetric
from monai.apps.detection.metrics.matching import matching_batch
# 3D example outputs of one image from detector
val_outputs_all = [
{"boxes": torch.tensor([[1,1,1,3,4,5]],dtype=torch.float16),
"labels": torch.randint(3,(1,)),
"scores": torch.randn((1,)).absolute()},
]
val_targets_all = [
{"boxes": torch.tensor([[1,1,1,2,6,4]],dtype=torch.float16),
"labels": torch.randint(3,(1,))},
]
coco_metric = COCOMetric(
classes=['c0','c1','c2'], iou_list=[0.1], max_detection=[10]
)
results_metric = matching_batch(
iou_fn=box_iou,
iou_thresholds=coco_metric.iou_thresholds,
pred_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_outputs_all],
pred_classes=[val_data_i["labels"].numpy() for val_data_i in val_outputs_all],
pred_scores=[val_data_i["scores"].numpy() for val_data_i in val_outputs_all],
gt_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_targets_all],
gt_classes=[val_data_i["labels"].numpy() for val_data_i in val_targets_all],
)
val_metric_dict = coco_metric(results_metric)
print(val_metric_dict)
"""
self.verbose = verbose
self.classes = classes
self.per_class = per_class
iou_list_np = np.array(iou_list)
_iou_range = np.linspace(
iou_range[0], iou_range[1], int(np.round((iou_range[1] - iou_range[0]) / iou_range[2])) + 1, endpoint=True
)
self.iou_thresholds = np.union1d(iou_list_np, _iou_range)
self.iou_range = iou_range
# get indices of iou values of ious range and ious list for later evaluation
self.iou_list_idx = np.nonzero(iou_list_np[:, np.newaxis] == self.iou_thresholds[np.newaxis])[1]
self.iou_range_idx = np.nonzero(_iou_range[:, np.newaxis] == self.iou_thresholds[np.newaxis])[1]
if (
not (self.iou_thresholds[self.iou_list_idx] == iou_list_np).all()
or not (self.iou_thresholds[self.iou_range_idx] == _iou_range).all()
):
raise ValueError(
"Require self.iou_thresholds[self.iou_list_idx] == iou_list_np and "
"self.iou_thresholds[self.iou_range_idx] == _iou_range."
)
self.recall_thresholds = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
self.max_detections = max_detection
def __call__(self, *args, **kwargs) -> Tuple[Dict[str, float], Union[Dict[str, np.ndarray], None]]:
"""
Compute metric. See :func:`compute` for more information.
Args:
*args: positional arguments passed to :func:`compute`
**kwargs: keyword arguments passed to :func:`compute`
Returns:
Dict[str, float]: dictionary with scalar values for evaluation
Dict[str, np.ndarray]: dictionary with arrays, e.g. for visualization of graphs
"""
return self.compute(*args, **kwargs)
def check_number_of_iou(self, *args) -> None:
"""
Check if shape of input in first dimension is consistent with expected IoU values
(assumes IoU dimension is the first dimension)
Args:
args: array like inputs with shape function
"""
num_ious = len(self.get_iou_thresholds())
for arg in args:
if arg.shape[0] != num_ious:
raise ValueError(
f"Require arg.shape[0] == len(self.get_iou_thresholds()). Got arg.shape[0]={arg.shape[0]}, "
f"self.get_iou_thresholds()={self.get_iou_thresholds()}."
)
def get_iou_thresholds(self) -> Sequence[float]:
"""
Return IoU thresholds needed for this metric in an numpy array
Returns:
Sequence[float]: IoU thresholds [M], M is the number of thresholds
"""
return list(self.iou_thresholds)
def compute(self, results_list: List[Dict[int, Dict[str, np.ndarray]]]) -> Tuple[Dict[str, float], None]:
"""
Compute COCO metrics
Args:
results_list (List[Dict[int, Dict[str, np.ndarray]]]): list with results per image (in list)
per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
- `dtMatches`: matched detections [T, D], where T = number of
thresholds, D = number of detections
- `gtMatches`: matched ground truth boxes [T, G], where T = number
of thresholds, G = number of ground truth
- `dtScores`: prediction scores [D] detection scores
- `gtIgnore`: ground truth boxes which should be ignored
[G] indicate whether ground truth should be ignored
- `dtIgnore`: detections which should be ignored [T, D],
indicate which detections should be ignored
Returns:
Dict[str, float], dictionary with coco metrics
"""
if self.verbose:
logger.info("Start COCO metric computation...")
tic = time.time()
dataset_statistics = self._compute_statistics(results_list=results_list) # Dict[str, Union[np.ndarray, List]]
if self.verbose:
toc = time.time()
logger.info(f"Statistics for COCO metrics finished (t={(toc - tic):0.2f}s).")
results = {}
results.update(self._compute_ap(dataset_statistics))
results.update(self._compute_ar(dataset_statistics))
if self.verbose:
toc = time.time()
logger.info(f"COCO metrics computed in t={(toc - tic):0.2f}s.")
return results, None
def _compute_ap(self, dataset_statistics: Dict[str, Union[np.ndarray, List]]) -> Dict[str, float]:
"""
Compute AP metrics
Args:
dataset_statistics (List[Dict[int, Dict[str, np.ndarray]]]): list with result s per image (in list)
per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
- `dtMatches`: matched detections [T, D], where T = number of
thresholds, D = number of detections
- `gtMatches`: matched ground truth boxes [T, G], where T = number
of thresholds, G = number of ground truth
- `dtScores`: prediction scores [D] detection scores
- `gtIgnore`: ground truth boxes which should be ignored
[G] indicate whether ground truth should be ignored
- `dtIgnore`: detections which should be ignored [T, D],
indicate which detections should be ignored
"""
results = {}
if self.iou_range: # mAP
key = (
f"mAP_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
f"MaxDet_{self.max_detections[-1]}"
)
results[key] = self._select_ap(dataset_statistics, iou_idx=self.iou_range_idx, max_det_idx=-1)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = (
f"{cls_str}_"
f"mAP_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
f"MaxDet_{self.max_detections[-1]}"
)
results[key] = self._select_ap(
dataset_statistics, iou_idx=self.iou_range_idx, cls_idx=cls_idx, max_det_idx=-1
)
for idx in self.iou_list_idx: # AP@IoU
key = f"AP_IoU_{self.iou_thresholds[idx]:.2f}_MaxDet_{self.max_detections[-1]}"
results[key] = self._select_ap(dataset_statistics, iou_idx=[idx], max_det_idx=-1)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = f"{cls_str}_" f"AP_IoU_{self.iou_thresholds[idx]:.2f}_" f"MaxDet_{self.max_detections[-1]}"
results[key] = self._select_ap(dataset_statistics, iou_idx=[idx], cls_idx=cls_idx, max_det_idx=-1)
return results
def _compute_ar(self, dataset_statistics: Dict[str, Union[np.ndarray, List]]) -> Dict[str, | |
# -*- coding: utf-8 -*-
"""This python module aims to manage
`DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the
provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is
compatible with python2.7 and python3+.
Installation
------------
It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use
the ``pip`` command to install it::
pip install dokuwiki
Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_
"""
import re
import sys
import base64
import weakref
from xml.parsers.expat import ExpatError
if sys.version_info[0] == 3:
from xmlrpc.client import ServerProxy, Binary, Fault, Transport
from urllib.parse import urlencode
else:
from xmlrpclib import ServerProxy, Binary, Fault, Transport
from urllib import urlencode
from datetime import datetime, timedelta
ERR = 'XML or text declaration not at start of entity: line 2, column 0'
_URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?')
def date(date):
"""DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime``
type and the format changes between DokuWiki versions ... This function
convert *date* to a `datetime` object.
"""
date = date.value
return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S')
if len(date) == 24
else datetime.strptime(date, '%Y%m%dT%H:%M:%S'))
def utc2local(date):
"""DokuWiki returns date with a +0000 timezone. This function convert *date*
to the local time.
"""
date_offset = (datetime.now() - datetime.utcnow())
# Python < 2.7 don't have the 'total_seconds' method so calculate it by hand!
date_offset = (date_offset.microseconds +
(date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) / 1e6
date_offset = int(round(date_offset / 60 / 60))
return date + timedelta(hours=date_offset)
class DokuWikiError(Exception):
"""Exception raised by this module when there is an error."""
pass
class CookiesTransport(Transport):
"""A Python3 xmlrpc.client.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_headers(self, connection, headers):
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
Transport.send_headers(self, connection, headers)
def parse_response(self, response):
"""parse and store cookie"""
try:
for header in response.msg.get_all("Set-Cookie"):
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response)
class CookiesTransport2(Transport):
"""A Python2 xmlrpclib.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_request(self, connection, handler, request_body):
Transport.send_request(self, connection, handler, request_body)
# set cookie below handler
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
def parse_response(self, response):
"""parse and store cookie"""
try:
for header in response.getheader("set-cookie").split(", "):
# filter 'expire' information
if not header.startswith("D"):
continue
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response)
class DokuWiki(object):
"""Initialize a connection to a DokuWiki wiki. *url*, *user* and
*password* are respectively the URL, the login and the password for
connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client`
**ServerProxy** parameters.
The exception `DokuWikiError` is raised if the authentification
fails but others exceptions (like ``gaierror`` for invalid domain,
``ProtocolError`` for an invalid wiki, ...) are not catched.
.. code::
try:
wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False)
except (DokuWikiError, Exception) as err:
print('unable to connect: %s' % err)
"""
def __init__(self, url, user, password, cookieAuth=False, **kwargs):
"""Initialize the object by connecting to the XMLRPC server."""
# Initialize XMLRPC client.
try:
params = _URL_RE.search(url).groupdict()
if cookieAuth == False:
url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % (
params['proto'], user, password, params['host'], params['uri'] or '')
else:
url = '%s://%s%s/lib/exe/xmlrpc.php' % (
params['proto'], params['host'], params['uri'] or '')
except AttributeError:
raise DokuWikiError("invalid url '%s'" % url)
if cookieAuth == False:
self.proxy = ServerProxy(url, **kwargs)
else:
if sys.version_info[0] == 3:
self.proxy = ServerProxy(url, CookiesTransport(), **kwargs)
else:
self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs)
# Force login to check the connection.
if not self.login(user, password):
raise DokuWikiError('invalid login or password!')
# Set "namespaces" for pages and medias functions.
self.pages = _Pages(weakref.ref(self)())
self.medias = _Medias(weakref.ref(self)())
def send(self, command, *args, **kwargs):
"""Generic method for executing an XML-RPC *command*. *args* and
*kwargs* are the arguments and parameters needed by the command.
"""
args = list(args)
if kwargs:
args.append(kwargs)
method = self.proxy
for elt in command.split('.'):
method = getattr(method, elt)
try:
return method(*args)
except Fault as err:
if err.faultCode == 121:
return {}
elif err.faultCode == 321:
return []
raise DokuWikiError(err)
except ExpatError as err:
if str(err) != ERR:
raise DokuWikiError(err)
@property
def version(self):
"""Property that returns the DokuWiki version of the remote Wiki."""
return self.send('dokuwiki.getVersion')
@property
def time(self):
"""Property that returns the current time at the remote wiki server as
Unix timestamp.
"""
return self.send('dokuwiki.getTime')
@property
def xmlrpc_version(self):
"""Property that returns the XML RPC interface version of the remote
Wiki. This is DokuWiki implementation specific and independent of the
supported standard API version returned by ``wiki.getRPCVersionSupported``.
"""
return self.send('dokuwiki.getXMLRPCAPIVersion')
@property
def xmlrpc_supported_version(self):
"""Property that returns *2* with the supported RPC API version."""
return self.send('wiki.getRPCVersionSupported')
@property
def title(self):
"""Property that returns the title of the wiki."""
return self.send('dokuwiki.getTitle')
def login(self, user, password):
"""Log to the wiki using *user* and *password* credentials. It returns
a boolean that indicates if the user succesfully authenticate."""
return self.send('dokuwiki.login', user, password)
def add_acl(self, scope, user, permission):
"""Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts
the page/namespace *scope* to *user* (use *@group* syntax for groups)
with *permission* level. It returns a boolean that indicate if the rule
was correctly added.
"""
return self.send('plugin.acl.addAcl', scope, user, permission)
def del_acl(self, scope, user):
"""Delete any ACL matching the given *scope* and *user* (or group if
*@group* syntax is used). It returns a boolean that indicate if the rule
was correctly removed.
"""
return self.send('plugin.acl.delAcl', scope, user)
class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
class _Medias(object):
"""This | |
database change flag and the version number.
self.changed = 1
self.version += 1
# Return the results.
return val
def __remove(self,which,var):
# Special helper function - it assumes the table is already locked. This is
# used in the loading function when a variable needs to be removed.
#
# Do we have the specific sub-dictionary we need? If not, the
# variable doesn't exist, so no need in gaing any further.
if which in self.db.keys():
# Does the variable exist in the dictionary?
if var in self.db[which].keys():
# Yes. So delete it from the dictionary.
del self.db[which][var]
# If there is an expiry on the variable, get rid of it too.
self.__clearExpire(which,var)
@synchronized(lock)
def remove(self,which,var):
# Do the actual remove
self.__remove(which,var)
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
@synchronized(lock)
def keys(self,which,match):
"""
Return all the variable names in the database.
"""
matched = [x for x in self.db[which].keys() if fnmatch.fnmatch(x,match)]
s = " ".join(matched)
# Increment the total op. count.
self.totalOperations += 1
# Return results
return s
@synchronized(lock)
def re(self,which,match):
"""
Return all the variable names in the database that match
a the regular expression.
"""
p = re.compile(match)
matched = [x for x in self.db[which].keys() if p.match(x)]
s = " ".join(matched)
# Increment the total op. count.
self.totalOperations += 1
# Return results
return s
@synchronized(lock)
def getType(self,which,var):
"""
Return the type of a variable, which can be one of:
none - variable doesn't exist
string - variable is a string type
list - variable is a list
set - variable is a set
"""
if var in self.db[which].keys():
val = self.db[which][var]
if type(val) is type(""):
val = "string"
elif type(val) is type([]):
val = "list"
else:
val = "set"
else:
val = "none"
# Imcrement the total op. count.
self.totalOperations += 1
# Return the results
return val
def __expire(self,which,var,secs):
# Special helper function; used when loading files.
#
dt = time.time() + secs
self.__setExpire(which,var,dt)
# Trigger an event to remove it.
self.timerque.enq({'data': [which,var,self], 'timestamp':dt })
@synchronized(lock)
def expire(self,which,var,secs):
"""
Set an time to live limit on a variable. The variable must exist
and the time is given in seconds. Tiem to live values live across
invocations.
"""
# Make sure we have the variable in the system.
if var not in self.db[which].keys():
raise ERR("no such key")
# See if there is an expiration already on the variable. If so
# we don't update it. Otherwise we do set it. Notice if we don't
# take the "then" clause the return code is 0. This means it
# failed.
if self.__isExpire(which,var) == 0:
self.__expire(which,var,secs)
# Things have changed...
self.changed = 1
self.version += 1
# UPdate the operation count.
self.totalOperations += 1
# Return the results
return 1
@synchronized(lock)
def getTTL(self,which,var):
"""
Get the current time to live value for a variable.
"""
# Just check to see if the expire exists.
pass
@synchronized(lock)
def flushAll(self):
"""
Remove all the variables behind all the database tables.
"""
self.db = {"0":{}}
self.expiredb = {}
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
@synchronized(lock)
def flush(self,which):
"""
Remove all the variables behind a specific table.
"""
self.db[which] = {}
self.expiredb[which] = {}
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
@synchronized(lock)
def size(self,which):
"""
Return the number of keys in a specific table.
"""
# Compute how many keys are in the subdictionary
val = len(self.db[which].keys())
# Update the counters.
self.totalOperations += 1
# Return the count.
return val
@synchronized(lock)
def insert(self,which,var,index,val):
# Insert it...
if index == -1:
self.db[which][var].append(val)
else :
self.db[which][var].insert(index,val)
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
@synchronized(lock)
def listlen(self,which,var):
# Get the length of the list
rc = len(self.db[which][var])
# Update the total op. counter
self.totalOperations += 1
# Return the results.
return rc
@synchronized(lock)
def lrange(self,which,var,s,e):
# Does the variable exist?
if var in self.db[which].keys():
# Yes, so just extract the piece we want.
v = self.db[which][var]
if s < 0 : s = len(v) + s
if e < 0 : e = len(v) + e
e = e + 1
rc = v[s:e]
else:
# Nope, return an empty list.
rc = []
# Update the operation counter.
self.totalOperations += 1
# Return the results
return rc
@synchronized(lock)
def ltrim(self,which,var,s,e):
# Update the list variable...we keep the slice [s;e] of the list
v = self.db[which][var]
if e > len(v): e = len(v)
try:
self.db[which][var] = v[s:e]
except IndexError:
self.db[which][var] = []
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
@synchronized(lock)
def lindex(self,which,var,i):
try:
r = self.db[which][var][i]
except IndexError:
r = None
self.totalOperations += 1
return r
@synchronized(lock)
def lset(self,which,var,i,val):
# Find the variable and make sure we have a good
# index.
r = self.db[which][var]
if i >= len(r):
self.totalOperations += 1
return -1
r[i] = val
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
# Return the results
return 0
@synchronized(lock)
def lrem(self,which,var,num,val):
# Get the list we are to change...we are going to remove
# at least 'num' instances of 'val' from the list. If
# num is 0, we will remove all instances.
v = self.db[which][var]
if num >= 0 :
start = 0
end = len(v)
incr = 1
rev = 0
if num == 0: num = end
else:
start = len(v)-1
end = -1
incr = -1
num = abs(num)
rev = 1
l = []
cnt = 0
for i in range(start,end,incr):
if v[i] != val:
l.append(v[i])
else:
if num > 0:
cnt = cnt + 1
num = num - 1
else:
l.append(v[i])
# Append always puts things at the end of the list, which is okay
# if we are scanning the list from 0 to the end ( num > -1). If we
# are scanning it the other way, we need to reverse the list.
if rev : l.reverse()
self.db[which][var] = l
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
# Return the results.
return cnt
@synchronized(lock)
def lpop(self,which,var):
try:
# Does the variable exist? If so, pop it off the
# front of the list. If not, return None!
if var in self.db[which].keys():
rc = self.db[which][var].pop(0)
else:
rc = None
except IndexError:
rc = None
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
# Return either None, if there is no list or nothing
# on it or the thing.
return rc
@synchronized(lock)
def rpop(self,which,var):
try:
# See lpop for commentary...
if var in self.db[which].keys():
rc = self.db[which][var].pop()
else:
rc = None
except IndexError:
rc = None
# Increment the total op. count.
self.totalOperations += 1
self.totalChangeOperations += 1
# Update the database change flag and the version number.
self.changed = 1
self.version += 1
# Return any defined values.
return rc
@synchronized(lock)
def sadd(self,which,var,item):
if var not in self.db[which].keys():
self.db[which][var] = set()
if item in | |
<reponame>mlraglin/fury
"""UI container module."""
__all__ = ["Panel2D", "TabPanel2D", "TabUI", "ImageContainer2D",
"GridUI"]
import numpy as np
import vtk
from fury.io import load_image
from fury.ui.core import UI, Rectangle2D, TextBlock2D
from fury.utils import set_input, rotate
from fury.actor import grid
class Panel2D(UI):
"""A 2D UI Panel.
Can contain one or more UI elements.
Attributes
----------
alignment : [left, right]
Alignment of the panel with respect to the overall screen.
"""
def __init__(self, size, position=(0, 0), color=(0.1, 0.1, 0.1),
opacity=0.7, align="left"):
"""Init class instance.
Parameters
----------
size : (int, int)
Size (width, height) in pixels of the panel.
position : (float, float)
Absolute coordinates (x, y) of the lower-left corner of the panel.
color : (float, float, float)
Must take values in [0, 1].
opacity : float
Must take values in [0, 1].
align : [left, right]
Alignment of the panel with respect to the overall screen.
"""
super(Panel2D, self).__init__(position)
self.resize(size)
self.alignment = align
self.color = color
self.opacity = opacity
self.position = position
self._drag_offset = None
def _setup(self):
"""Setup this UI component.
Create the background (Rectangle2D) of the panel.
"""
self._elements = []
self.element_offsets = []
self.background = Rectangle2D()
self.add_element(self.background, (0, 0))
# Add default events listener for this UI component.
self.background.on_left_mouse_button_pressed = self.left_button_pressed
self.background.on_left_mouse_button_dragged = self.left_button_dragged
def _get_actors(self):
"""Get the actors composing this UI component."""
actors = []
for element in self._elements:
actors += element.actors
return actors
def _add_to_scene(self, scene):
"""Add all subcomponents or VTK props that compose this UI component.
Parameters
----------
scene : scene
"""
for element in self._elements:
element.add_to_scene(scene)
def _get_size(self):
return self.background.size
def resize(self, size):
"""Set the panel size.
Parameters
----------
size : (float, float)
Panel size (width, height) in pixels.
"""
self.background.resize(size)
def _set_position(self, coords):
"""Set the lower-left corner position of this UI component.
Parameters
----------
coords: (float, float)
Absolute pixel coordinates (x, y).
"""
coords = np.array(coords)
for element, offset in self.element_offsets:
element.position = coords + offset
@property
def color(self):
return self.background.color
@color.setter
def color(self, color):
self.background.color = color
@property
def opacity(self):
return self.background.opacity
@opacity.setter
def opacity(self, opacity):
self.background.opacity = opacity
def add_element(self, element, coords, anchor="position"):
"""Add a UI component to the panel.
The coordinates represent an offset from the lower left corner of the
panel.
Parameters
----------
element : UI
The UI item to be added.
coords : (float, float) or (int, int)
If float, normalized coordinates are assumed and they must be
between [0,1].
If int, pixels coordinates are assumed and it must fit within the
panel's size.
"""
coords = np.array(coords)
if np.issubdtype(coords.dtype, np.floating):
if np.any(coords < 0) or np.any(coords > 1):
raise ValueError("Normalized coordinates must be in [0,1].")
coords = coords * self.size
if anchor == "center":
element.center = self.position + coords
elif anchor == "position":
element.position = self.position + coords
else:
msg = ("Unknown anchor {}. Supported anchors are 'position'"
" and 'center'.")
raise ValueError(msg)
self._elements.append(element)
offset = element.position - self.position
self.element_offsets.append((element, offset))
def remove_element(self, element):
"""Remove a UI component from the panel.
Parameters
----------
element : UI
The UI item to be removed.
"""
idx = self._elements.index(element)
del self._elements[idx]
del self.element_offsets[idx]
def update_element(self, element, coords, anchor="position"):
"""Update the position of a UI component in the panel.
Parameters
----------
element : UI
The UI item to be updated.
coords : (float, float) or (int, int)
New coordinates.
If float, normalized coordinates are assumed and they must be
between [0,1].
If int, pixels coordinates are assumed and it must fit within the
panel's size.
"""
self.remove_element(element)
self.add_element(element, coords, anchor)
def left_button_pressed(self, i_ren, _obj, panel2d_object):
click_pos = np.array(i_ren.event.position)
self._drag_offset = click_pos - panel2d_object.position
i_ren.event.abort() # Stop propagating the event.
def left_button_dragged(self, i_ren, _obj, _panel2d_object):
if self._drag_offset is not None:
click_position = np.array(i_ren.event.position)
new_position = click_position - self._drag_offset
self.position = new_position
i_ren.force_render()
def re_align(self, window_size_change):
"""Re-organise the elements in case the window size is changed.
Parameters
----------
window_size_change : (int, int)
New window size (width, height) in pixels.
"""
if self.alignment == "left":
pass
elif self.alignment == "right":
self.position += np.array(window_size_change)
else:
msg = "You can only left-align or right-align objects in a panel."
raise ValueError(msg)
class TabPanel2D(UI):
"""Render content within a Tab.
Attributes
----------
content_panel: :class: 'Panel2D'
Hold all the content UI components.
text_block: :class: 'TextBlock2D'
Renders the title of the tab.
"""
def __init__(self, position=(0, 0), size=(100, 100),
title="New Tab", color=(0.5, 0.5, 0.5), content_panel=None):
"""Init class instance.
Parameters
----------
position : (float, float)
Absolute coordinates (x, y) of the lower-left corner of the
UI component
size : (int, int)
Width and height of the pixels of this UI component.
title : str
Renders the title for Tab panel.
color : list of 3 floats
Background color of tab panel.
content_panel : Panel2D
Panel consisting of the content UI elements.
"""
self.content_panel = content_panel
self.panel_size = size
self._text_size = (int(1.0 * size[0]), size[1])
super(TabPanel2D, self).__init__()
self.title = title
self.panel.position = position
self.color = color
def _setup(self):
"""Setup this UI component.
Create parent panel.
Create Text to hold tab information.
Create Button to close tab.
"""
self.panel = Panel2D(size=self.panel_size)
self.text_block = TextBlock2D(size=self._text_size,
color=(0, 0, 0))
self.panel.add_element(self.text_block, (0, 0))
def _get_actors(self):
"""Get the actors composing this UI component."""
return self.panel.actors + self.content_panel.actors
def _add_to_scene(self, _scene):
"""Add all subcomponents or VTK props that compose this UI component.
Parameters
----------
scene : scene
"""
self.panel.add_to_scene(_scene)
self.content_panel.add_to_scene(_scene)
def _set_position(self, _coords):
"""Set the lower-left corner position of this UI component.
Parameters
----------
coords: (float, float)
Absolute pixel coordinates (x, y).
"""
self.panel.position = _coords
def _get_size(self):
self.panel.size
def resize(self, size):
"""Resize Tab panel.
Parameters
----------
size : (int, int)
New width and height in pixels.
"""
self._text_size = (int(0.7 * size[0]), size[1])
self._button_size = (int(0.3 * size[0]), size[1])
self.panel.resize(size)
self.text_block.resize(self._text_size)
@property
def color(self):
"""Return the background color of tab panel."""
return self.panel.color
@color.setter
def color(self, color):
"""Set background color of tab panel.
Parameters
----------
color : list of 3 floats.
"""
self.panel.color = color
@property
def title(self):
"""Return the title of tab panel."""
return self.text_block.message
@title.setter
def title(self, text):
"""Set the title of tab panel.
Parameters
----------
text : str
New title for tab panel.
"""
self.text_block.message = text
def add_element(self, element, coords, anchor="position"):
"""Add a UI component to the content panel.
The coordinates represent an offset from the lower left corner of the
panel.
Parameters
----------
element : UI
The UI item to be added.
coords : (float, float) or (int, int)
If float, normalized coordinates are assumed and they must be
between [0,1].
If int, pixels coordinates are assumed and it must fit within the
panel's size.
"""
element.set_visibility(False)
self.content_panel.add_element(element, coords, anchor)
def remove_element(self, element):
"""Remove a UI component from the content panel.
Parameters
----------
element : UI
The UI item to be removed.
"""
self.content_panel.remove_element(element)
def update_element(self, element, coords, anchor="position"):
"""Update the position of a UI component in the content panel.
Parameters
----------
element : UI
The UI item to be updated.
coords : (float, float) or (int, int)
New coordinates.
If float, normalized coordinates are assumed and they must be
between [0,1].
If int, pixels coordinates are assumed and it must fit within the
panel's size.
"""
self.content_panel.update_element(element, coords, anchor="position")
class TabUI(UI):
"""UI element to add multiple panels within a single window.
Attributes
----------
tabs: :class: List of 'TabPanel2D'
Stores all the instances of 'TabPanel2D' that renderes the contents.
"""
def __init__(self, position=(0, 0), size=(100, 100), nb_tabs=1,
active_color=(1, 1, 1), inactive_color=(0.5, 0.5, 0.5),
draggable=False):
"""Init class instance.
Parameters
----------
position : (float, float)
Absolute coordinates (x, y) of the lower-left corner of this
UI component.
size : (int, int)
Width and height in pixels of this UI component.
nb_tabs : int
Number of tabs to be renders.
active_color : tuple of 3 floats.
Background color of active tab panel.
inactive_color : tuple of 3 floats.
Background color of inactive tab panels.
draggable : bool
Whether the UI element is draggable or not.
"""
self.tabs = []
self.nb_tabs = nb_tabs
self.parent_size = size
self.content_size = (size[0], int(0.9 * size[1]))
| |
<filename>cmiputil/dds.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module to parse DDS (Dataset Descriptor Structure) used in OPeNDAP.
DDS
---
For the definition of DDS, see `OpenDAP UserGuide`_.
In this module, we change the notation in the DDS syntax as follows:
| *declarations* := list(*declaration*)
| *declaration* := *Var* | *Struct*
| *Struct* := *stype* { *declarations* } (*name* | *name* *arr*)
| *stype* := Dataset|Structure|Sequence|Grid
| *Grid* := Grid { ARRAY: *declaration* MAPS: *declarations* } (*name* | *name* *arr*)
| *Var* := *btype* (*name* | *name* *arr*)
| *btype* := Byte|Int32|UInt32|Float64|String|Url| ...
| *arr* := [integer] | [*name* = integer]
As you can see from above syntax, one *Struct* can contain other *Struct* recursively, and consists
the tree structure. The root of the tree must be one "Dataset".
In this module, each element of above syntax is implemented as one class.
Basic Usage
-----------
Text form of DDS will be obtained by, for example,
:meth:`.ESGFDataInfo.getDDS`. Use :func:`parse_dataset` to parse it to
get the tree structure. The root of the tree is a :class:`Dataset`
instance, and you can access nodes and leafs of the tree by dot
notation (see also 'Example' section below)::
ds = parse_dataset(text=sample1)
ds.tas # Grid('tas, arrary=Var(tas, ...), maps={'time':..., 'lat':..., 'lon':...})
ds.tas.array.arr[0] # Arr('time', 8412)
.. _OpenDAP UserGuide: https://opendap.github.io/documentation/UserGuideComprehensive.html#DDS
Example:
>>> sample1 = '''
... Dataset {
... Float64 lat[lat = 160];
... Float64 lat_bnds[lat = 160][bnds = 2];
... Float64 lon[lon = 320];
... Float64 lon_bnds[lon = 320][bnds = 2];
... Float64 height;
... Float64 time[time = 8412];
... Float64 time_bnds[time = 8412][bnds = 2];
... Grid {
... ARRAY:
... Float32 tas[time = 8412][lat = 160][lon = 320];
... MAPS:
... Float64 time[time = 8412];
... Float64 lat[lat = 160];
... Float64 lon[lon = 320];
... } tas;
... } CMIP6.CMIP.MRI.MRI-ESM2-0.piControl.r1i1p1f1.Amon.tas.gn.tas.20190222.aggregation.1;'''
>>> sample1_struct = Dataset(
... 'CMIP6.CMIP.MRI.MRI-ESM2-0.piControl.r1i1p1f1.Amon.tas.gn.tas.20190222.aggregation.1',
... {
... 'lat':
... Var('lat', 'Float64', arr=[Arr('lat', 160)]),
... 'lat_bnds':
... Var('lat_bnds', 'Float64', arr=[Arr('lat', 160),
... Arr('bnds', 2)]),
... 'lon':
... Var('lon', 'Float64', arr=[Arr('lon', 320)]),
... 'lon_bnds':
... Var('lon_bnds', 'Float64', arr=[Arr('lon', 320),
... Arr('bnds', 2)]),
... 'height':
... Var('height', 'Float64'),
... 'time':
... Var('time', 'Float64', arr=[Arr('time', 8412)]),
... 'time_bnds':
... Var('time_bnds', 'Float64', arr=[Arr('time', 8412),
... Arr('bnds', 2)]),
... 'tas':
... Grid('tas',
... array=Var(
... 'tas',
... 'Float32',
... arr=[Arr('time', 8412),
... Arr('lat', 160),
... Arr('lon', 320)]),
... maps={
... 'time': Var('time', 'Float64', arr=[Arr('time', 8412)]),
... 'lat': Var('lat', 'Float64', arr=[Arr('lat', 160)]),
... 'lon': Var('lon', 'Float64', arr=[Arr('lon', 320)])
... })
... })
>>> sample1_struct == parse_dataset(sample1)
True
>>> from cmiputil import dds
>>> sample2 = '''
... Dataset {
... Int32 catalog_number;
... Sequence {
... String experimenter;
... Int32 time;
... Structure {
... Float64 latitude;
... Float64 longitude;
... } location;
... Sequence {
... Float64 depth;
... Float64 salinity;
... Float64 oxygen;
... Float64 temperature;
... } cast;
... } station;
... } data;
... '''
>>> sample2_struct = Dataset(
... 'data', {
... 'catalog_number':
... Var('catalog_number', 'Int32'),
... 'station':
... Sequence(
... 'station', {
... 'experimenter':
... Var('experimenter', 'String'),
... 'time':
... Var('time', 'Int32'),
... 'location':
... Structure(
... 'location', {
... 'latitude': Var('latitude', 'Float64'),
... 'longitude': Var('longitude', 'Float64')
... }),
... 'cast':
... Sequence(
... 'cast', {
... 'depth': Var('depth', 'Float64'),
... 'salinity': Var('salinity', 'Float64'),
... 'oxygen': Var('oxygen', 'Float64'),
... 'temperature': Var('temperature', 'Float64')
... })
... })
... })
>>> sample2_struct == parse_dataset(sample2)
True
>>> sample3 = '''
... Dataset {
... Structure {
... Float64 lat;
... Float64 lon;
... } location;
... Structure {
... Int32 minutes;
... Int32 day;
... Int32 year;
... } time;
... Float64 depth[500];
... Float64 temperature[500];
... } xbt-station;
... '''
>>> sample3_struct = Dataset(
... 'xbt-station', {
... 'location':
... Structure('location', {
... 'lat': Var('lat', 'Float64'),
... 'lon': Var('lon', 'Float64')
... }),
... 'time':
... Structure(
... 'time', {
... 'minutes': Var('minutes', 'Int32'),
... 'day': Var('day', 'Int32'),
... 'year': Var('year', 'Int32')
... }),
... 'depth':
... Var('depth', 'Float64', arr=[Arr('', 500)]),
... 'temperature':
... Var('temperature', 'Float64', arr=[Arr('', 500)])
... })
>>> sample3_struct == parse_dataset(sample3)
True
"""
import enum
import re
import textwrap as tw
from pprint import pprint
_debug = False
def _enable_debug():
global _debug
_debug = True
def _disable_debug():
global _debug
_debug = False
def _debug_write(text):
global _debug
if _debug:
print(text)
class BType(enum.Enum):
"""
Values for :attr:`.Var.btype`.
"""
Byte = 'Byte'
Int16 = 'Int16'
Int32 = 'Int32'
UInt32 = 'UInt32'
Float32 = 'Float32'
Float64 = 'Float64'
String = 'String'
Url = 'Url'
class SType(enum.Enum):
"""
Values for :attr:`Struct.stype`
"""
Dataset = 'Dataset'
Structure = 'Structure'
Sequence = 'Sequence'
Grid = 'Grid'
_idents_btype = [t.name for t in BType]
_idents_stype = [t.name for t in SType]
_idents = _idents_btype + _idents_stype
_pat_idents_stype = re.compile(r'^\s*(' + '|'.join(_idents_stype) + ')')
_pat_ident = re.compile(r'^\s*(' + '|'.join(_idents) + ')')
_pat_struct = re.compile(
r'^\s*(' + r'|'.join(_idents_stype) + r')\s*\{(.*)\}\s*(\S+);\s*',
re.DOTALL)
_pat_dataset = re.compile(r'^\s*Dataset\s+'
r'\{(.+)\}\s*(\S+);\s*$', re.DOTALL)
_pat_grid = re.compile(
r'^\s*Grid\s*\{\s*Array:(.+)Maps:'
r'\s*(.+)\s*\}\s*(\w+);', re.IGNORECASE | re.DOTALL)
_pat_varline = re.compile(r'^\s*(\w+)\s*(\w+)(\[.+\])*;\s*$', re.DOTALL)
_pat_arrdecl = re.compile(r'\[(\w+?)\s*=\s*(\d+)\]')
_pat_arrdecl_valonly = re.compile(r'^s*\[(\d+)]')
_pat_arrdecl_line = re.compile(r'\[(?:\w+?\s*=)*\s*\d+\]')
class Decls(dict):
"""
Class for *declarations*.
| *declarations* := list(*declaration*)
In this module, *declarations* are expressed as `dict`, not
`list`. At this point, this class is just an alias for `dict`.
"""
pass
class Decl:
"""
Class for *declaration*, that is, base class for :class:`Var`
and :class:`Struct`. No need to use this class explicitly.
| *declaration* := *Var* | *Struct*
"""
def __init__(self, name=''):
self.name = name
def __eq__(self, other):
_debug_write(f'Decl.__eq__():{type(self)},{type(other)}')
if not isinstance(other, type(self)):
return False
res = [getattr(self, a) == getattr(other, a) for a in self.__dict__]
return all(res)
def text_formatted(self, indent=None, linebreak=True):
pass
class Struct(Decl):
"""
Class for *struct*, that is, base class for :class:`Structure`,
:class:`Sequence`, :class:`Grid` and :class:`Dataset`.
Do not use this directly.
| *struct* := *stype* { *declarations* } *var*
| *stype* := Dataset|Structure|Sequence|Grid
You can access items of ``self.decl`` as if they are the attribute
of this class, via dot notation.
Examples:
>>> text = '''
... Sequence {
... Float64 depth;
... Float64 salinity;
... Float64 oxygen;
... Float64 temperature;
... } cast;'''
>>> s = Sequence(text=text)
>>> s.salinity
Var('salinity', 'Float64')
>>> text = '''
... Dataset {
... Int32 catalog_number;
... Sequence {
... String experimenter;
... Int32 time;
... Structure {
... Float64 latitude;
... Float64 longitude;
... } location;
... } station;
... } data;'''
>>> d = parse_dataset(text)
>>> d.station.location.latitude
Var('latitude', 'Float64')
Attributes:
name(str): *name*
stype(SType): *stype*
decl(Decls)): *declarations*
"""
stype = None
def __init__(self, name='', decl=None, text=None):
"""
Parameters:
name(str): *name*
decl(str or Decls)): *declarations*
text(str): text to be parsed.
If `text` is *not* ``None``, other attributes are overridden by
the result of :meth:`.parse` or left untouced..
"""
if text:
_debug_write(f'{self.__class__.__name__}' f"text='{text}'")
self.parse(text)
else:
self.name = name
if decl is None:
self.decl = None
elif isinstance(decl, dict):
self.decl = decl
elif type(decl) is str:
self.decl = parse_declarations(decl)
else:
raise TypeError(f'decl={decl} is invalid type: {type(decl)}')
def parse(self, text):
"""
Parse `text` to construct :class:`Struct`.
If given `text` is not valid for each subclass, the instance
is left as 'null' instance.
"""
_debug_write(f'{self.__class__.__name__}.parse: text="{text}"')
res = _pat_struct.match(text)
if not res:
return None
_debug_write(f'{self.__class__.__name__}.parse:name="{res.group(3)}"')
_debug_write(f'{self.__class__.__name__}.parse:decl="{res.group(2)}"')
if self.stype and self.stype == SType(res.group(1)):
self.decl = parse_declarations(res.group(2))
self.name = res.group(3)
def __getattr__(self, key):
# print('__getattr__() called')
if key in self.decl:
return self.decl[key]
else:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{key}'")
def __getitem__(self, key):
# print('__getitem__() called')
if key in self.decl:
return self.decl[key]
else:
raise KeyError(f"'{key}'")
def __contains__(self, item):
# print('__contains__() called')
return (item in self.__dict__) or (item in self.decl)
def __repr__(self):
if self.name:
name = f"'{self.name}'"
else:
name = ''
if self.decl:
# decl = f'decl={self.decl.__repr__()}'
decl = f'{self.decl.__repr__()}'
else:
decl = ''
res = ', '.join([l for l in [name, decl] if l])
return (f'{self.__class__.__name__}({res})')
def __str__(self):
return self.text_formatted()
def text_formatted(self, indent=4, linebreak=True):
"""
Return formatted text.
"""
_debug_write(
f'{self.__class__.__name__}.text_formatted:indent={indent},linebreak={linebreak}'
)
if self.name:
name = self.name + ';'
else:
name = ''
if self.stype:
stype = f'{self.stype.name}'
else:
stype = ''
if self.decl:
if linebreak:
lb = '\n'
else:
lb = ''
decl = f'{lb}'.join([
self.decl[d].text_formatted(indent, linebreak)
for d in self.decl if d
])
decl = tw.indent(decl, ' ' * indent)
decl = f'{lb}'.join(('{', | |
])
self.ContentionCount = v_uint32()
self.NumberOfSharedWaiters = v_uint16()
self.NumberOfExclusiveWaiters = v_uint16()
self.Address = v_ptr32()
self.SpinLock = v_uint32()
class FILE_NETWORK_OPEN_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CreationTime = LARGE_INTEGER()
self.LastAccessTime = LARGE_INTEGER()
self.LastWriteTime = LARGE_INTEGER()
self.ChangeTime = LARGE_INTEGER()
self.AllocationSize = LARGE_INTEGER()
self.EndOfFile = LARGE_INTEGER()
self.FileAttributes = v_uint32()
self._pad0038 = v_bytes(size=4)
class GENERAL_LOOKASIDE::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LastAllocateMisses = v_uint32()
class OBJECTOWNER_S(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Lock = v_uint32()
class OBJECT_HANDLE_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HandleAttributes = v_uint32()
self.GrantedAccess = v_uint32()
class IRP::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Overlay = IRP::__unnamed::__unnamed()
self._pad0030 = v_bytes(size=8)
class INITIAL_PRIVILEGE_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PrivilegeCount = v_uint32()
self.Control = v_uint32()
self.Privilege = vstruct.VArray([ LUID_AND_ATTRIBUTES() for i in xrange(3) ])
class ULARGE_INTEGER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class GENERAL_LOOKASIDE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListHead = SLIST_HEADER()
self.Depth = v_uint16()
self.MaximumDepth = v_uint16()
self.TotalAllocates = v_uint32()
self.AllocateMisses = v_uint32()
self.TotalFrees = v_uint32()
self.FreeMisses = v_uint32()
self.Type = v_uint32()
self.Tag = v_uint32()
self.Size = v_uint32()
self.Allocate = v_ptr32()
self.Free = v_ptr32()
self.ListEntry = LIST_ENTRY()
self.LastTotalAllocates = v_uint32()
self.LastAllocateMisses = v_uint32()
self.Future = vstruct.VArray([ v_uint32() for i in xrange(2) ])
class EX_PUSH_LOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Waiting = v_uint32()
class CM_PARTIAL_RESOURCE_DESCRIPTOR::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Generic = CM_PARTIAL_RESOURCE_DESCRIPTOR::__unnamed::__unnamed()
class SECTION_OBJECT_POINTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSectionObject = v_ptr32()
self.SharedCacheMap = v_ptr32()
self.ImageSectionObject = v_ptr32()
class DEVOBJ_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceObject = v_ptr32()
class tagVSTATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.fl = v_uint32()
self.bSystemStable = v_uint32()
self.ulRandomSeed = v_uint32()
self.ulFailureMask = v_uint32()
self.ulDebugLevel = v_uint32()
self.hsemPoolTracker = v_ptr32()
self.lePoolTrackerHead = LIST_ENTRY()
class EX_RUNDOWN_REF::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
class POWER_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SystemState = v_uint32()
class UNICODE_STRING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.MaximumLength = v_uint16()
self.Buffer = v_ptr32()
class GDIHandleBitFields(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Index = v_uint32()
class ACCESS_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OperationID = LUID()
self.SecurityEvaluated = v_uint8()
self.GenerateAudit = v_uint8()
self.GenerateOnClose = v_uint8()
self.PrivilegesAllocated = v_uint8()
self.Flags = v_uint32()
self.RemainingDesiredAccess = v_uint32()
self.PreviouslyGrantedAccess = v_uint32()
self.OriginalDesiredAccess = v_uint32()
self.SubjectSecurityContext = SECURITY_SUBJECT_CONTEXT()
self.SecurityDescriptor = v_ptr32()
self.AuxData = v_ptr32()
self.Privileges = ACCESS_STATE::__unnamed()
self.AuditPrivileges = v_uint8()
self._pad0064 = v_bytes(size=3)
self.ObjectName = UNICODE_STRING()
self.ObjectTypeName = UNICODE_STRING()
class FILE_STANDARD_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocationSize = LARGE_INTEGER()
self.EndOfFile = LARGE_INTEGER()
self.NumberOfLinks = v_uint32()
self.DeletePending = v_uint8()
self.Directory = v_uint8()
self._pad0018 = v_bytes(size=2)
class KAPC(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.Spare0 = v_uint32()
self.Thread = v_ptr32()
self.ApcListEntry = LIST_ENTRY()
self.KernelRoutine = v_ptr32()
self.RundownRoutine = v_ptr32()
self.NormalRoutine = v_ptr32()
self.NormalContext = v_ptr32()
self.SystemArgument1 = v_ptr32()
self.SystemArgument2 = v_ptr32()
self.ApcStateIndex = v_uint8()
self.ApcMode = v_uint8()
self.Inserted = v_uint8()
self._pad0030 = v_bytes(size=1)
class WAIT_CONTEXT_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WaitQueueEntry = KDEVICE_QUEUE_ENTRY()
self.DeviceRoutine = v_ptr32()
self.DeviceContext = v_ptr32()
self.NumberOfMapRegisters = v_uint32()
self.DeviceObject = v_ptr32()
self.CurrentIrp = v_ptr32()
self.BufferChainingDpc = v_ptr32()
class EX_PUSH_LOCK::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Waiting = v_uint32()
class MDL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Size = v_uint16()
self.MdlFlags = v_uint16()
self.Process = v_ptr32()
self.MappedSystemVa = v_ptr32()
self.StartVa = v_ptr32()
self.ByteCount = v_uint32()
self.ByteOffset = v_uint32()
class EX_RUNDOWN_REF(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
class W32THREAD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.pEThread = v_ptr32()
self.RefCount = v_uint32()
self.ptlW32 = v_ptr32()
self.pgdiDcattr = v_ptr32()
self.pgdiBrushAttr = v_ptr32()
self.pUMPDObjs = v_ptr32()
self.pUMPDHeap = v_ptr32()
self.dwEngAcquireCount = v_uint32()
self.pSemTable = v_ptr32()
self.pUMPDObj = v_ptr32()
class KDPC(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Number = v_uint8()
self.Importance = v_uint8()
self.DpcListEntry = LIST_ENTRY()
self.DeferredRoutine = v_ptr32()
self.DeferredContext = v_ptr32()
self.SystemArgument1 = v_ptr32()
self.SystemArgument2 = v_ptr32()
self.Lock = v_ptr32()
class OWNER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OwnerThread = v_uint32()
self.OwnerCount = v_uint32()
class KEVENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
class KSEMAPHORE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Header = DISPATCHER_HEADER()
self.Limit = v_uint32()
class PAGED_LOOKASIDE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.L = GENERAL_LOOKASIDE()
self.Lock__ObsoleteButDoNotDelete = FAST_MUTEX()
class OBJECT_TYPE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Mutex = ERESOURCE()
self.TypeList = LIST_ENTRY()
self.Name = UNICODE_STRING()
self.DefaultObject = v_ptr32()
self.Index = v_uint32()
self.TotalNumberOfObjects = v_uint32()
self.TotalNumberOfHandles = v_uint32()
self.HighWaterNumberOfObjects = v_uint32()
self.HighWaterNumberOfHandles = v_uint32()
self.TypeInfo = OBJECT_TYPE_INITIALIZER()
self.Key = v_uint32()
self.ObjectLocks = vstruct.VArray([ ERESOURCE() for i in xrange(4) ])
class DEVICE_OBJECT::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self._pad0028 = v_bytes(size=32)
class DISPATCHER_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint8()
self.Absolute = v_uint8()
self.Size = v_uint8()
self.Inserted = v_uint8()
self.SignalState = v_uint32()
self.WaitListHead = LIST_ENTRY()
class IRP::__unnamed::__unnamed::__unnamed::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CurrentStackLocation = v_ptr32()
class HOBJ(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.unused = v_uint32()
class IO_TIMER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class OBJECT_TYPE_INITIALIZER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.UseDefaultObject = v_uint8()
self.CaseInsensitive = v_uint8()
self.InvalidAttributes = v_uint32()
self.GenericMapping = GENERIC_MAPPING()
self.ValidAccessMask = v_uint32()
self.SecurityRequired = v_uint8()
self.MaintainHandleCount = v_uint8()
self.MaintainTypeList = v_uint8()
self._pad0020 = v_bytes(size=1)
self.PoolType = v_uint32()
self.DefaultPagedPoolCharge = v_uint32()
self.DefaultNonPagedPoolCharge = v_uint32()
self.DumpProcedure = v_ptr32()
self.OpenProcedure = v_ptr32()
self.CloseProcedure = v_ptr32()
self.DeleteProcedure = v_ptr32()
self.ParseProcedure = v_ptr32()
self.SecurityProcedure = v_ptr32()
self.QueryNameProcedure = v_ptr32()
self.OkayToCloseProcedure = v_ptr32()
class SCSI_REQUEST_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class IO_STACK_LOCATION::__unnamed::__unnamed::__unnamed::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReplaceIfExists = v_uint8()
self.AdvanceOnly = v_uint8()
class FILE_BASIC_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CreationTime = LARGE_INTEGER()
self.LastAccessTime = LARGE_INTEGER()
self.LastWriteTime = LARGE_INTEGER()
self.ChangeTime = LARGE_INTEGER()
self.FileAttributes = v_uint32()
self._pad0028 = v_bytes(size=4)
class DEVICE_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.ReferenceCount = v_uint32()
self.DriverObject = v_ptr32()
self.NextDevice = v_ptr32()
self.AttachedDevice = v_ptr32()
self.CurrentIrp = v_ptr32()
self.Timer = v_ptr32()
self.Flags = v_uint32()
self.Characteristics = v_uint32()
self.Vpb = v_ptr32()
self.DeviceExtension = v_ptr32()
self.DeviceType = v_uint32()
self.StackSize = v_uint8()
self._pad0034 = v_bytes(size=3)
self.Queue = DEVICE_OBJECT::__unnamed()
self.AlignmentRequirement = v_uint32()
self.DeviceQueue = KDEVICE_QUEUE()
self.Dpc = KDPC()
self.ActiveThreadCount = v_uint32()
self.SecurityDescriptor = v_ptr32()
self.DeviceLock = KEVENT()
self.SectorSize = v_uint16()
self.Spare1 = v_uint16()
self.DeviceObjectExtension = v_ptr32()
self.Reserved = v_ptr32()
class LIST_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_ptr32()
self.Blink = v_ptr32()
class EINFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.pobj = v_ptr32()
class SECURITY_QUALITY_OF_SERVICE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.ImpersonationLevel = v_uint32()
self.ContextTrackingMode = v_uint8()
self.EffectiveOnly = v_uint8()
self._pad000c = v_bytes(size=2)
class COMPRESSED_DATA_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CompressionFormatAndEngine = v_uint16()
self.CompressionUnitShift = v_uint8()
self.ChunkShift = v_uint8()
self.ClusterShift = v_uint8()
self.Reserved = v_uint8()
self.NumberOfChunks = v_uint16()
self.CompressedChunkSizes = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class BASEOBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.hHmgr = v_ptr32()
self.ulShareCount = v_uint32()
self.cExclusiveLock = v_uint16()
self.BaseFlags = v_uint16()
self.Tid = v_ptr32()
class HSEMAPHORE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.unused = v_uint32()
class LUID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class LARGE_INTEGER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class tagPOOLRECORD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExtraData = v_ptr32()
self.size = v_uint32()
self.trace = vstruct.VArray([ v_ptr32() for i in xrange(6) ])
class GUID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Data1 = v_uint32()
self.Data2 = v_uint16()
self.Data3 = v_uint16()
self.Data4 = vstruct.VArray([ v_uint8() for i in xrange(8) ])
class OBJECT_DUMP_CONTROL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Stream = v_ptr32()
self.Detail = v_uint32()
class NPAGED_LOOKASIDE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.L = GENERAL_LOOKASIDE()
self.Lock__ObsoleteButDoNotDelete = v_uint32()
self._pad0050 = v_bytes(size=4)
class ULARGE_INTEGER::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.HighPart = v_uint32()
class OBJECTOWNER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Share = OBJECTOWNER_S()
class tagWin32PoolHead(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.size = v_uint32()
self.pPrev = v_ptr32()
self.pNext = v_ptr32()
self.pTrace = v_ptr32()
class TL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.next = v_ptr32()
self.pobj = v_ptr32()
self.pfnFree = v_ptr32()
class IO_STACK_LOCATION::__unnamed::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Argument1 = v_ptr32()
self.Argument2 = v_ptr32()
self.Argument3 = v_ptr32()
self.Argument4 = v_ptr32()
class CM_PARTIAL_RESOURCE_DESCRIPTOR::__unnamed::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DataSize = v_uint32()
self.Reserved1 = v_uint32()
self.Reserved2 = v_uint32()
class ERESOURCE::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Address = v_ptr32()
class IO_STACK_LOCATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MajorFunction = v_uint8()
self.MinorFunction = v_uint8()
self.Flags = v_uint8()
self.Control = v_uint8()
self.Parameters = IO_STACK_LOCATION::__unnamed()
self.DeviceObject = v_ptr32()
self.FileObject = v_ptr32()
self.CompletionRoutine = v_ptr32()
self.Context = v_ptr32()
class IO_RESOURCE_DESCRIPTOR::__unnamed::__unnamed(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Priority = v_uint32()
self.Reserved1 = v_uint32()
self.Reserved2 = v_uint32()
class STRING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.MaximumLength = v_uint16()
self.Buffer = v_ptr32()
class GENERIC_MAPPING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.GenericRead = v_uint32()
self.GenericWrite = v_uint32()
self.GenericExecute = v_uint32()
self.GenericAll = v_uint32()
class IRP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.MdlAddress = v_ptr32()
self.Flags = v_uint32()
self.AssociatedIrp = IRP::__unnamed()
self.ThreadListEntry = LIST_ENTRY()
self.IoStatus = IO_STATUS_BLOCK()
self.RequestorMode = v_uint8()
self.PendingReturned = v_uint8()
self.StackCount = v_uint8()
self.CurrentLocation = v_uint8()
self.Cancel = v_uint8()
self.CancelIrql = v_uint8()
self.ApcEnvironment = v_uint8()
self.AllocationFlags = v_uint8()
self.UserIosb = v_ptr32()
self.UserEvent = v_ptr32()
self.Overlay = IRP::__unnamed()
self.CancelRoutine = v_ptr32()
self.UserBuffer = v_ptr32()
self.Tail = IRP::__unnamed()
class OBJECT_NAME_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = UNICODE_STRING()
class IO_RESOURCE_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint16()
self.Revision = v_uint16()
self.Count = v_uint32()
self.Descriptors = vstruct.VArray([ IO_RESOURCE_DESCRIPTOR() for i in xrange(1) ])
class DRIVER_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self.DeviceObject = | |
<reponame>leddartech/pioneer.common<gh_stars>1-10
from pioneer.common import plane, linalg
from pioneer.common.logging_manager import LoggingManager
from numpy.matlib import repmat
import math
import numpy as np
import os
import transforms3d
def grid(v, h, v_from, v_to, h_from, h_to, dtype = np.float32):
'''
Computes a matrix of all possible pairs of angles in the 2d field of view.
\param v vertical resolution
\param h horizontal resolution
\param v_fov vertical field of view (degrees)
\param h_fov horizontal field of view (degrees)
\param dtype the numpy data type
'''
# If you are wondering why there is a complex number here. Read the
# numpy mgrid documentation. The lines below are equivalent to:
# a = np.linspace(v_from, v_to, v)
# b = np.linspace(h_from, h_to, h)
# b, a = np.meshgrid(b, a)
a, b = np.mgrid[ v_from:v_to:complex(0,v)
, h_from:h_to:complex(0,h)]
return np.c_[a.ravel(), b.ravel()].astype(dtype)
def from_specs_dict(specs):
return (specs[k] for k in ['v', 'h', 'v_fov', 'h_fov'])
def angles(v, h = None, v_fov = None, h_fov = None, dtype = np.float32):
'''
Computes a matrix of all possible pairs of angles in the 2d field of view.
The generated grid follows the LCA axis system convention. That is the
bottom-left corner (0, 0) corresponds to (-v_fov/2, -h_fov/2) and the top
right corner (v-1, h-1) is (+v_fov/2, +h_fov/2). The grid is generated in
a row-major order.
\param v vertical resolution (or a dict with keys v', 'h', 'v_fov', 'h_fov')
\param h horizontal resolution
\param v_fov vertical field of view (degrees)
\param h_fov horizontal field of view (degrees)
\param dtype the numpy data type
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_fov_rad = math.radians(v_fov)
h_fov_rad = math.radians(h_fov)
v_offset = v_fov_rad/v/2
h_offset = h_fov_rad/h/2
return grid(v,h, -v_fov_rad/2 + v_offset, v_fov_rad/2 - v_offset
, -h_fov_rad/2 + h_offset, h_fov_rad/2 - h_offset, dtype)
def raycast_angles(v, h = None, v_fov = None, h_fov = None, density = 10, dtype = np.float32):
'''
Computes a densified matrix of all possible pairs of angles in the 2d field of view.
This matrix can be used to cast density * density rays per fov solid angle ('pixel')
\return the angle grid, and a mapping matrix m, where, m[dense_ray_i] == channel_i
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_fov_rad = math.radians(v_fov)
h_fov_rad = math.radians(h_fov)
dense_to_sparse = np.empty(v*h*density*density, 'u4')
sparse_to_dense = np.empty((v*h, density, density), 'u4')
dense_to_sub = np.empty((v*h*density*density, 2), 'u4')
m_i = 0
for v_i in range(v):
for vd_i in range(density):
for h_i in range(h):
for hd_i in range(density):
sparse_i = v_i * h + h_i
dense_to_sparse[m_i] = sparse_i
sparse_to_dense[sparse_i, vd_i, hd_i] = m_i
dense_to_sub[m_i] = [vd_i, hd_i]
m_i += 1
return grid(v * density,h * density, -v_fov_rad/2, v_fov_rad/2
, -h_fov_rad/2, h_fov_rad/2, dtype), dense_to_sparse, sparse_to_dense, dense_to_sub
def custom_v_angles(v, h = None, v_fov = None, h_fov = None, factor = 1, filename = os.path.join(os.path.dirname(__file__), 'eagle_angles_80.txt'), dtype = np.float32):
'''
similar to \a angles() but using a file to define scan direction angles
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
h_fov_rad = math.radians(h_fov)
h_offset = h_fov_rad/h/2
a = np.genfromtxt(filename, delimiter='\n', converters={_:lambda s: int(s, 16) for _ in range(1)})
a = a[:v]
a = a/2**16 * v_fov - v_fov/2
a = np.deg2rad(a) * factor
b = np.linspace(-h_fov_rad/2 + h_offset, h_fov_rad/2 - h_offset, num = h, dtype = dtype)
b, a = np.meshgrid(b, a)
return np.c_[a.ravel(), b.ravel()].astype(dtype)
def custom_v_quad_directions(v, h = None, v_fov = None, h_fov = None, factor = 1, filename = os.path.join(os.path.dirname(__file__), 'eagle_angles_80.txt'), dtype = np.float32):
'''
similar to \a quad_directions() but using a file to define scan direction angles
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
v_fov_rad = math.radians(v_fov)
h_fov_rad = math.radians(h_fov)
v_cell_size = v_fov_rad/v
h_cell_size = h_fov_rad/h
file_angles = np.genfromtxt(filename, delimiter='\n', converters={_:lambda s: int(s, 16) for _ in range(1)})
def custom_grid(v, h, v_offset, h_offset_from, h_offset_to, dtype):
a = file_angles[:v]
a = a/2**16 * v_fov - v_fov/2
a = (np.radians(a) - v_offset) * factor
b = np.linspace(-h_fov_rad/2+h_offset_from, h_fov_rad/2+h_offset_to, num = h, dtype = dtype)
b, a = np.meshgrid(b, a)
return np.c_[a.ravel(), b.ravel()].astype(dtype)
return np.vstack((
directions(custom_grid(v,h,-v_cell_size/2 ,h_cell_size , 0 , dtype))
,directions(custom_grid(v,h,+v_cell_size/2 ,h_cell_size , 0 , dtype))
,directions(custom_grid(v,h,+v_cell_size/2 ,0 , -h_cell_size, dtype))
,directions(custom_grid(v,h,-v_cell_size/2 ,0 , -h_cell_size, dtype)))
)
def direction(theta_x, theta_y):
'''
Convert angles of a spherical axis sytem into a cartesian direction vector.
The cartesian axis system is the camera axis system.
z
+-------> x
|
|
y v
The z axis enters your screen (or paper if you are the kind of person that
still prints code).
Angles go from -fov/2 to fov/2 in both horizontal and vertical direction, always computed
using "right hand" convention. In each direction, maximum z component will be attained at angle 0.
In the x-z plane (viewed from above):
pi/2
x ^
|
|<--.
|th_y\
------------(.)-------------> z
y
|
|
-pi/2
x = sin(theta_y)
z = cos(theta_y) //we want x,z = (0,1) at theta_y = 0
In the y-z plane (view from side):
z ^
|
|<--.
|th_x \ y
pi ------------(.)------------->
x
y = cos(theta_x + pi/2)
z = sin(theta_x + pi/2) //we want (y,z) = (0,1) at theta_x = 0
So the x, y, z coordinates should follow the equations below
x = sin(theta_y)
y = cos(theta_x + pi/2)
z = cos(theta_y) * sin(theta_x + pi/2)
'''
x = np.sin(theta_y)
y = np.cos(theta_x + np.pi/2)
z = np.sin(theta_x + np.pi/2) * np.cos(theta_y)
return x, y, z
def direction_spherical(thetas_x, thetas_y):
'''
LeddarConfig implementation
'''
x = np.cos(thetas_x) * np.sin(thetas_y)
y = np.sin(thetas_x)
z = np.cos(thetas_x) * np.cos(thetas_y)
return x, y, z
def direction_orthogonal(thetas_x, thetas_y):
'''
Simulator implementation using orthogonal camera depth projection
'''
x = np.tan(thetas_y)
y = np.tan(-thetas_x)
z = np.ones_like(x)
n = np.sqrt(z**2 + x**2 + y**2)
return x/n, y/n, z/n
def directions(angles, direction_f = direction):
'''Generate a set of cartesian direction vectors from a grid of
spherical coordinates angles. This function uses the same convention as
the `direction` function.
'''
thetas_x, thetas_y = angles.T
return np.stack(direction_f(thetas_x, thetas_y), axis=1)
def directions_orthogonal(v,h=None,v_fov=None,h_fov=None, dtype = np.float32):
'''Generate a set of cartesian direction vectors from a grid of
2D pixels coordinates (eg : camera depth) using Carla Simulator implementation
and camera depth projection
'''
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
if h_fov > 90:
LoggingManager.instance().warning("The projection model is not adapted for horizontal fov greater than 90 degrees. Trying to correct the" \
+" situation by spliting the fov in three parts and re-merging them. Use 'projection: direction_carla_pixell' instead.")
return directions_orthogonal_pixell(v=v, h=h, v_fov=v_fov, h_fov=h_fov, dtype=dtype)
# (Intrinsic) K Matrix
k = np.identity(3)
k[0, 2] = h / 2.0
k[1, 2] = v / 2.0
k[0, 0] = k[1, 1] = h / \
(2.0 * math.tan(h_fov * math.pi / 360.0))
# 2d pixel coordinates
pixel_length = h * v
u_coord = repmat(np.r_[h-1:-1:-1],
v, 1).reshape(pixel_length)
v_coord = repmat(np.c_[v-1:-1:-1],
1, h).reshape(pixel_length)
# pd2 = [u,v,1]
p2d = np.array([u_coord, v_coord, np.ones_like(u_coord)])
direction = np.dot(np.linalg.inv(k), p2d).T
direction[:,0] = -direction[:,0]
v_cell_size, h_cell_size = v_h_cell_size_rad(v, h, v_fov, h_fov)
# First face
face_a = np.zeros((direction.shape))
face_a[:,0] = direction[:,0] - h_cell_size/2
face_a[:,1] = direction[:,1] - v_cell_size/2
face_a[:,2] = direction[:,2]
# Second face
face_b = np.zeros((direction.shape))
face_b[:,0] = direction[:,0] + h_cell_size/2
face_b[:,1] = direction[:,1] - v_cell_size/2
face_b[:,2] = direction[:,2]
# Third face
face_c = np.zeros((direction.shape))
face_c[:,0] = direction[:,0] + h_cell_size/2
face_c[:,1] = direction[:,1] + v_cell_size/2
face_c[:,2] = direction[:,2]
# Fourth face
face_d = np.zeros((direction.shape))
face_d[:,0] = direction[:,0] - h_cell_size/2
face_d[:,1] = direction[:,1] + v_cell_size/2
face_d[:,2] = direction[:,2]
quad_direction = np.vstack((face_a,face_b,face_c,face_d))
return direction,quad_direction
def directions_orthogonal_pixell(v, h=None, v_fov=None, h_fov=None, dtype = np.float32):
"""Returns directions and quad_directions for the carla simulator projection, in the case of a h_fov greater than 90 deg."""
if isinstance(v, dict):
v, h, v_fov, h_fov = from_specs_dict(v)
directions_central_third, quad_directions_central_third = directions_orthogonal(v=v, h=int(h/3), v_fov=v_fov, h_fov=h_fov/3)
rot_left = transforms3d.euler.euler2mat(0,np.deg2rad(h_fov/3),0)
rot_right = transforms3d.euler.euler2mat(0,np.deg2rad(-h_fov/3),0)
directions_left_third = directions_central_third @ rot_left
directions_right_third = directions_central_third @ rot_right
quad_directions_left_third = quad_directions_central_third @ rot_left
quad_directions_right_third = quad_directions_central_third @ rot_right
ind_tpm = np.arange(v*int(h/3)).reshape((v,int(h/3)))
ind = np.ravel(np.hstack([ind_tpm,ind_tpm+v*int(h/3),ind_tpm+2*v*int(h/3)]))
quad_ind_tpm = np.arange(4*v*int(h/3)).reshape((4*v,int(h/3)))
quad_ind = np.ravel(np.hstack([quad_ind_tpm,quad_ind_tpm+4*v*int(h/3),quad_ind_tpm+2*4*v*int(h/3)]))
| |
<reponame>usnistgov/OOF3D
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# <EMAIL>.
from ooflib.SWIG.common import config
from ooflib.SWIG.common import crandom
from ooflib.SWIG.common import ooferror
from ooflib.SWIG.common import pixelgroup
from ooflib.SWIG.common import switchboard
from ooflib.SWIG.engine import skeletonselectioncourier
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import primitives
from ooflib.common import registeredclass
from ooflib.common import selectionoperators
from ooflib.common import utils
from ooflib.common.IO import parameter
from ooflib.common.IO import pixelgroupparam
from ooflib.common.IO import whoville
from ooflib.common.IO import xmlmenudump
from ooflib.engine import materialmanager
from ooflib.engine.IO import materialparameter
from ooflib.engine.IO import pbcparams
from ooflib.engine.IO import skeletongroupparams
import types
#Interface branch
from ooflib.engine.IO import interfaceparameters
import ooflib.engine.coverage
from ooflib.engine.skeletonselection import \
NodeSelectionModifier, SegmentSelectionModifier, \
FaceSelectionModifier, ElementSelectionModifier, \
NodeSelectionModRegistration, SegmentSelectionModRegistration, \
FaceSelectionModRegistration, ElementSelectionModRegistration
## TODO : Use couriers as in pixel selection.
## TODO 3.1: Add more face selection methods. Select by aspect ratio?
## Area?
## TODO 3.1: Add Region selection methods, in the style of BoxSelection
## for voxels.
# TODO 3.1: Can the Segment/Element/Node/Face operations here all be
# derived from common SkeletonSelectable classes?
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Ordering: The order in which the skeleton selection modifiers appear
# in the menus should be as similar as possible for the different
# selection modes. The order is determined by the "ordering" arg in
# the registrations.
## TODO: These aren't used consistently below. Make sure that all
## orderings use them.
_selectGroupOrdering = 0.0
# The "Select from Selected Object" methods have ordering=1.x, where
# x is the dimension of the Object.
_selectFromNodesOrdering = 1.0
_selectFromSegmentsOrdering = 1.1
_selectFromFacesOrdering = 1.2
_selectFromElementsOrdering = 1.3
# Other selection methods have ordering >= 2.0
_materialOrdering = 2.0
_pixelGroupOrdering = 2.1
_homogeneityOrdering = 3.3
_shapeEnergyOrdering = 3.4
_illegalOrdering = 3.45
_suspectOrdering = 3.46
_internalBoundaryOrdering = 3.5
_namedBoundaryOrdering = 3.6
_interfaceOrdering = 3.7
_expandOrdering = 4.0
_shrinkOrdering = 4.1
_invertOrdering = 5.0
_randomOrdering = 6.0
_periodicPartnerOrdering = 8
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Node selection modifiers
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class NodeFromSelectedSegments(NodeSelectionModifier):
def __init__(self, operator):
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.NodesFromSegmentsCourier(
skelctxt.getObject(),
skelctxt.segmentselection.currentSelectionTracker(),
clist, plist)
self.operator.operate(selection, courier)
NodeSelectionModRegistration(
'From Selected Segments',
NodeFromSelectedSegments,
ordering=_selectFromSegmentsOrdering,
params = [
selectionoperators.SelectionOperatorParam('operator')
],
tip="Select nodes from selected segments.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/menu/nodes_from_segments.xml'))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class NodeFromSelectedElements(NodeSelectionModifier):
def __init__(self, coverage, operator):
self.coverage = coverage
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.NodesFromElementsCourier(
skelctxt.getObject(),
self.coverage,
skelctxt.elementselection.currentSelectionTracker(),
clist, plist)
self.operator.operate(selection, courier)
NodeSelectionModRegistration(
'From Selected Elements',
NodeFromSelectedElements,
ordering=_selectFromElementsOrdering,
params = [
enum.EnumParameter('coverage', ooflib.engine.coverage.Coverage),
selectionoperators.SelectionOperatorParam('operator')
],
tip="Select nodes from selected elements.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/menu/nodes_from_elements.xml'))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class NodeFromSelectedFaces(NodeSelectionModifier):
def __init__(self, coverage, operator):
self.coverage = coverage
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.NodesFromFacesCourier(
skelctxt.getObject(),
self.coverage,
skelctxt.faceselection.currentSelectionTracker(),
clist, plist)
self.operator.operate(selection, courier)
NodeSelectionModRegistration(
'From Selected Faces',
NodeFromSelectedFaces,
ordering=_selectFromFacesOrdering,
params = [
enum.EnumParameter('coverage', ooflib.engine.coverage.Coverage),
selectionoperators.SelectionOperatorParam('operator')
],
tip="Select nodes from selected faces.")
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SelectInternalBoundaryNodes(NodeSelectionModifier):
def __init__(self, operator, ignorePBC=False):
self.operator = operator
self.ignorePBC = ignorePBC
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.InternalBoundaryNodesCourier(
skelctxt.getObject(), clist, plist)
self.operator.operate(selection, courier)
if config.dimension() == 2:
params=[
selectionoperators.SelectionOperatorParam('operator'),
pbcparams.PBCBooleanParameter('ignorePBC', False,
tip='Ignore periodicity?')]
else:
params = [selectionoperators.SelectionOperatorParam('operator')]
NodeSelectionModRegistration(
'Internal Boundaries',
SelectInternalBoundaryNodes,
ordering=_internalBoundaryOrdering,
params=params,
tip="Select all nodes on material or group boundaries.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/menu/boundary_nodes.xml'))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SelectNamedBoundaryNodes(NodeSelectionModifier):
def __init__(self, boundary, operator):
self.boundary = boundary
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
bdy = skelctxt.getBoundary(self.boundary)
courier = skeletonselectioncourier.PointBoundaryCourier(
skelctxt.getObject(), bdy.getBoundarySet(skelctxt.getObject()),
clist, plist)
self.operator.operate(selection, courier)
NodeSelectionModRegistration(
'Named Boundary',
SelectNamedBoundaryNodes,
ordering=_namedBoundaryOrdering,
params=[
skeletongroupparams.SkeletonPointBoundaryParameter(
'boundary', tip="Select nodes in this boundary"),
selectionoperators.SelectionOperatorParam('operator')
],
tip="Select nodes belonging to the given skeleton point boundary.",
discussion="""<para>
Select all the &nodes; contained in the given &skel;
<link linkend="Section-Concepts-Skeleton-Boundary">boundary</link>.
The boundary must be a
<link linkend="Section-Concepts-Skeleton-Boundary-Point">point</link>
boundary.
</para>"""
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
## This 2D method has not been updated to use selection couriers!
class SelectPeriodicPartnerNodes(NodeSelectionModifier):
def select(self, skeleton, selection):
oldnodes = skeleton.nodeselection.retrieve()
newnodes = set()
for node in oldnodes:
for p in node.getPartners():
newnodes.add(p)
selection.start()
selection.select(newnodes)
registeredclass.TwoDOnlyRegistration(
'Periodic Partners',
SelectPeriodicPartnerNodes,
ordering=_periodicPartnerOrdering,
tip="Select nodes whose periodic partners are already selected.",
discussion="""<para>
If the &skel; is <link
linkend="Section-Concepts-Skeleton-Periodicity">periodic</link>,
every &node; on a periodic boundary has a partner on the
opposite boundary. This command selects the periodic partners
of the currently selected &nodes;, without unselecting any
&nodes;.
</para>"""
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
## Since we only have tetrahedral elements in 3D, there's no
## difference between expanding by shared elements, segments, or
## faces. TODO 3.1: If we ever add non-tetrahedral elements, this
## should be rewritten to look like ExpandElementSelection.
class ExpandNodeSelection(NodeSelectionModifier):
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.ExpandNodeSelectionCourier(
skelctxt.getObject(),
skelctxt.nodeselection.currentSelectionTracker(),
clist, plist);
selection.select(courier)
NodeSelectionModRegistration(
'Expand',
ExpandNodeSelection,
ordering=_expandOrdering,
tip="Select the neighbors of selected Nodes.")
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Select the indicated group.
class NodeSelectGroup(NodeSelectionModifier):
def __init__(self, group, operator):
self.group = group
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.SkeletonGroupCourier(
skelctxt.getObject(),
self.group,
skelctxt.nodegroups.getTracker(skelctxt.getObject()),
clist, plist)
self.operator.operate(selection, courier)
NodeSelectionModRegistration(
'Group',
NodeSelectGroup,
ordering=_selectGroupOrdering,
params=[
skeletongroupparams.NodeGroupParameter('group',
tip="Node group to select."),
selectionoperators.SelectionOperatorParam('operator')
],
tip='Select the members of a group.',
discussion="""<para>
Select all the &nodes; in the given <link
linkend='Section-Concepts-Skeleton-Groups'>group</link>..
</para>""")
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Segment Selection Modifiers
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SegFromSelectedElements(SegmentSelectionModifier):
def __init__(self, coverage, operator):
self.coverage = coverage
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.SegmentsFromElementsCourier(
skelctxt.getObject(),
self.coverage,
skelctxt.elementselection.currentSelectionTracker(),
clist, plist)
self.operator.operate(selection, courier)
SegmentSelectionModRegistration(
'From Selected Elements',
SegFromSelectedElements,
ordering=_selectFromElementsOrdering,
params = [
enum.EnumParameter("coverage", ooflib.engine.coverage.Coverage),
selectionoperators.SelectionOperatorParam('operator')
],
tip="Select segments from selected elements.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/menu/segments_from_elements.xml'))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SegFromSelectedNodes(SegmentSelectionModifier):
def __init__(self, one, two, operator):
self.one = one
self.two = two
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.SegmentsFromNodesCourier(
skelctxt.getObject(),
self.one, self.two,
skelctxt.nodeselection.currentSelectionTracker(),
clist, plist)
self.operator.operate(selection, courier)
SegmentSelectionModRegistration(
"From Selected Nodes",
SegFromSelectedNodes,
params=[
parameter.BooleanParameter(
'one', value=True,
tip='Select segments with one selected node.'),
parameter.BooleanParameter(
'two', value=True,
tip='Select segments with two selected nodes.'),
selectionoperators.SelectionOperatorParam('operator')
],
ordering=_selectFromNodesOrdering,
tip="Select segments from the selected nodes.")
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SegFromSelectedFaces(SegmentSelectionModifier):
def __init__(self, coverage, operator):
self.coverage = coverage
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.SegmentsFromFacesCourier(
skelctxt.getObject(),
self.coverage,
skelctxt.faceselection.currentSelectionTracker(),
clist, plist)
self.operator.operate(selection, courier)
SegmentSelectionModRegistration(
'From Selected Faces',
SegFromSelectedFaces,
ordering=_selectFromFacesOrdering,
params=[
enum.EnumParameter('coverage', ooflib.engine.coverage.Coverage),
selectionoperators.SelectionOperatorParam('operator')
],
tip="Select the edges of the selected faces.")
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SelectInternalBoundarySegments(SegmentSelectionModifier):
def __init__(self, operator):
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.InternalBoundarySegmentsCourier(
skelctxt.getObject(), clist, plist)
self.operator.operate(selection, courier)
SegmentSelectionModRegistration(
'Internal Boundaries',
SelectInternalBoundarySegments,
params=[selectionoperators.SelectionOperatorParam('operator')],
ordering=_internalBoundaryOrdering,
tip="Select segments on material or group boundaries.")
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
#Interface branch
## This 2D method has not been updated to use selection couriers!
class SelectInterfaceSegments(SegmentSelectionModifier):
def __init__(self, interface):
self.interface = interface
def select(self, skeleton, selection):
skel = skeleton.getObject()
interfacemsplugin=skel.getMicrostructure().getPlugIn("Interfaces")
try:
interfacedef=interfacemsplugin.namedinterfaces[self.interface]
except KeyError:
#Should not happen
raise ooferror.ErrPyProgrammingError("Interface not found!")
seglist = []
for segment in skel.segments.values():
yes,side1elem=interfacedef.isInterfaceSegment(segment,skel)
if yes:
seglist.append(segment)
selection.start()
selection.clear()
selection.select(seglist)
if config.dimension() == 2:
SegmentSelectionModRegistration(
'Interface Segments',
SegmentSelectionModifier,
SelectInterfaceSegments,
ordering=_interfaceOrdering,
params=[
interfaceparameters.InterfacesParameter(
'interface',
tip='Select segments in this interface.')],
tip="Select segments from an interface definition.",
discussion="""<para>
Select all the &sgmts; that belong to the given interface definition.
</para>"""
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SelectNamedBoundarySegments(SegmentSelectionModifier):
def __init__(self, boundary, operator):
self.boundary = boundary
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
bdy = skelctxt.getBoundary(self.boundary) # A SkelContextEdgeBoundary
courier = skeletonselectioncourier.EdgeBoundaryCourier(
skelctxt.getObject(),
bdy.getBoundarySet(skelctxt.getObject()),
clist, plist)
self.operator.operate(selection, courier)
SegmentSelectionModRegistration(
'Named Boundary',
SelectNamedBoundarySegments,
ordering=_namedBoundaryOrdering,
params=[
skeletongroupparams.SkeletonEdgeBoundaryParameter(
'boundary',
tip="Select segments in this boundary"),
selectionoperators.SelectionOperatorParam('operator')
],
tip="Select segments belonging to the given skeleton edge boundary.",
discussion="""<para>
Select all the &sgmts; contained in the given &skel;
<link linkend="Section-Concepts-Skeleton-Boundary">boundary</link>.
The boundary must be a
<link linkend="Section-Concepts-Skeleton-Boundary-Edge">edge</link>
boundary.
</para>"""
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
## This 2D method has not been updated to use selection couriers!
class SelectPeriodicPartnerSegments(SegmentSelectionModifier):
def select(self, skeleton, selection):
oldsegs = skeleton.segmentselection.retrieve()
newsegs = set()
skel = skeleton.getObject()
for seg in oldsegs:
partner = seg.getPartner(skel)
if partner:
newsegs.add(partner)
selection.start()
selection.select(newsegs)
registeredclass.TwoDOnlyRegistration(
'Periodic Partners',
SegmentSelectionModifier,
SelectPeriodicPartnerSegments,
ordering=_periodicPartnerOrdering,
tip="Select the periodic partners of the currently selected Segments.",
discussion="""<para>
If the &skel; is <link
linkend="Section-Concepts-Skeleton-Periodicity">periodic</link>,
every &sgmt; on a periodic boundary has a partner on the opposite
boundary. This command selects the periodic partners of the
currently selected &sgmts;, without unselecting any &sgmts;.
</para>"""
)
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
class SegmentHomogeneity(SegmentSelectionModifier):
def __init__(self, min_homogeneity, max_homogeneity, operator):
self.min_homogeneity = min_homogeneity
self.max_homogeneity = max_homogeneity
self.operator = operator
def select(self, skelctxt, selection):
clist, plist = selection.trackerlist()
courier = skeletonselectioncourier.SegmentHomogeneityCourier(
skelctxt.getObject(), self.min_homogeneity, self.max_homogeneity,
clist, plist)
self.operator.operate(selection, courier)
SegmentSelectionModRegistration(
'By Homogeneity',
SegmentHomogeneity,
ordering=_homogeneityOrdering,
params = [
parameter.FloatRangeParameter(
'min_homogeneity', (0.0, 1.0, 0.01), value=0.0,
| |
<gh_stars>1-10
#!/usr/bin/env python
import argparse
import itertools
import math
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import pickle as pk
from discriminator import Discriminator
from gan import batch_dataset, train, gen_noise
from generator import Generator
from utils import load_dataset
from logger_utils import Logger
from matplotlib import pyplot as plt
from torch.autograd import Variable
from torchvision import transforms, datasets
NUM_EVALUATORS = 10
# ==============================================================================
# Distillation GAN
# ==============================================================================
def get_training_partitions(X):
"""
Generates column-partitioned training sets for various GANs
# TODO: for an extension, we can sample a number of random datasets
"""
X = torch.tensor(X, dtype=torch.float32)
other_idx = [i for i in range(0,7)]
other_X = X[:,:,other_idx]
atomic_idx = [i for i in range(7,71,4)]
atomic_X = X[:,:,atomic_idx]
locations_idx = [i for i in range(8,71) if i % 4 != 3]
locations_X = X[:,:,locations_idx]
return [(other_X, other_idx), (atomic_X, atomic_idx), (locations_X, locations_idx)]
def init_population(X, num_batches):
"""
Initializes a population given the initial training partitions
"""
partitions = get_training_partitions(X)
generation = 0
population = dict()
for i, partition in enumerate(partitions):
spec_args = args
spec_args.g_input_size = args.latent
spec_args.g_output_size = len(partition[1])
spec_args.g_hidden_size = int(math.ceil(spec_args.g_output_size / 2))
spec_args.d_input_size = len(partition[1])
spec_args.d_hidden_size = int(math.ceil(spec_args.d_input_size / 2))
G, D, _, evaluations = train(
partition[0],
num_batches,
args.num_particle_samples,
set_args=spec_args,
train_cols=partition[1]
)
MLE_emittance = torch.mean(evaluations)
population['gen%dpartition%d' % (generation, i)] = {
'generator': G,
'discriminator': D,
'emittance': MLE_emittance,
'partition': partition
}
return population
def mutate(population, num_batches, generation):
"""
Trains a GAN for each population element
"""
i = 0
new_population = dict()
for label, map in population.items():
G, D, _, evaluations = train(
map['partition'][0],
num_batches,
args.num_particle_samples,
G=map['generator'],
D=map['discriminator'],
set_args=args,
train_cols=map['partition'][1]
)
MLE_emittance = torch.mean(evaluations)
new_population['gen%dpartition%d' % (generation, i)] = {
'generator': G,
'discriminator': D,
'emittance': MLE_emittance,
'partition': map['partition']
}
i += 1
return new_population
def select_k_fittest(population, k):
"""
Select k fittest GANs
TODO: debug this function
"""
if len(population) >= k:
return sorted(population.values(), key=lambda v: v['emittance'])[:k] # results in a list of tuples
return population
def reshape_generator_output(pol1, pol2, pol3):
"""
Reshapes the output of generators that do not match the original output size
"""
G_1 = pol1['generator']
p_1 = pol1['partition']
G_2 = pol2['generator']
p_2 = pol2['partition']
G_3 = pol3['generator']
p_3 = pol3['partition']
test_noise_1 = gen_noise(args.crossover_samples, args.latent)
test_noise_2 = gen_noise(args.crossover_samples, args.latent)
test_noise_3 = gen_noise(args.crossover_samples, args.latent)
fake_data_1 = G_1(test_noise_1).detach() # make sure when we create a gan we can set its output shape (critical!)
fake_data_2 = G_2(test_noise_2).detach()
fake_data_3 = G_3(test_noise_3).detach()
# Format back into their appropriate columns
d_1 = torch.zeros(args.crossover_samples, 71)
d_2 = torch.zeros(args.crossover_samples, 71)
d_3 = torch.zeros(args.crossover_samples, 71)
d_1[:,p_1[1]] = fake_data_1
d_2[:,p_2[1]] = fake_data_2
d_3[:,p_3[1]] = fake_data_3
# Also format the datasets back into their appropriate columns
jp_1 = torch.zeros(p_1[0].shape[0], p_1[0].shape[1], 71)
jp_2 = torch.zeros(p_2[0].shape[0], p_2[0].shape[1], 71)
jp_3 = torch.zeros(p_3[0].shape[0], p_3[0].shape[1], 71)
jp_1[:,:,p_1[1]] = p_1[0]
jp_2[:,:,p_2[1]] = p_2[0]
jp_3[:,:,p_3[1]] = p_3[0]
joint_partition = jp_1.add_(jp_2.add_(jp_3))
return joint_partition, d_1, d_2, d_3, p_1, p_2, p_3
def sample_top_paths(joint_partition, gen_partition, clf):
"""
Samples the top 50% of training samples from GPO and the previous dataset
# Commented out code prints what the top particle representations are
# for particle in ind:
# print(dataset_particles[ind,:])
"""
dataset_particles = joint_partition.view(-1, 71)
dataset_particles = torch.cat((dataset_particles, gen_partition), dim=0)
percentile_index = math.floor(dataset_particles.shape[0]/2)
dataset_particles = dataset_particles.detach().numpy()
prediction = torch.tensor(clf.predict(dataset_particles), dtype=torch.float32)
res, ind = prediction.topk(percentile_index, largest=False)
# There may be a bug later where this fails. That would be because i did batching jankily long ago, fix it
top_partition = batch_dataset(dataset_particles[ind,:], args.batch_size)
return torch.tensor(top_partition, dtype=torch.float32)
def crossover(pol1, pol2, pol3):
"""
Genetic operator that crosses together two GANs trained separately
Specifically for our case, we note our child GANS train on different parts
of the dataset. Hence, we combine to create a new good dataset extension
to match with a new student GAN.
We choose the crossover to operate on 3 GANs for the initial combination of the dataset
"""
joint_partition, d_1, d_2, d_3, p_1, p_2, p_3 = reshape_generator_output(pol1, pol2, pol3)
# Naive approach sequential greedy maximization
# file = open('NN_evaluator_0.sav', 'rb')
# clf = pk.load(file)
gen_partition = torch.zeros(d_1.shape[0], 71, dtype=torch.float32)
for col in range(71):
grad_d_1 = gen_partition.clone()
grad_d_2 = gen_partition.clone()
grad_d_3 = gen_partition.clone()
grad_d_1[:,col] = d_1[:,col]
grad_d_2[:,col] = d_2[:,col]
grad_d_3[:,col] = d_3[:,col]
d_1_pred = torch.zeros(d_1.shape[0], NUM_EVALUATORS)
d_2_pred = torch.zeros(d_1.shape[0], NUM_EVALUATORS)
d_3_pred = torch.zeros(d_1.shape[0], NUM_EVALUATORS)
for i in range(NUM_EVALUATORS):
# Import the evaluator NN
file = open('NN_evaluator_'+str(i)+'.sav', 'rb')
clf = pk.load(file)
# Using the evaluator NN, make a prediction on the generated particle
d_1_pred[:,i] = torch.tensor(clf.predict((grad_d_1).detach().numpy()), dtype=torch.float32)
d_2_pred[:,i] = torch.tensor(clf.predict((grad_d_2).detach().numpy()), dtype=torch.float32)
d_3_pred[:,i] = torch.tensor(clf.predict((grad_d_3).detach().numpy()), dtype=torch.float32)
e_1 = torch.mean(d_1_pred)
e_2 = torch.mean(d_2_pred)
e_3 = torch.mean(d_3_pred)
if e_1 < e_2 and e_1 < e_3:
gen_partition = grad_d_1
elif e_2 < e_1 and e_2 < e_3:
gen_partition = grad_d_2
else:
gen_partition = grad_d_3
# ======================================================================== #
# For each generated feature col, choose the col that maximizes according to NN_eval (stochastic gradient descent or adam)
# After this, we already have a sub-GAN which is SGD optimized GAN combination
# This is what Michael was originally interested in investigating
# The only problem is this is not a single GAN, so we cannot add it to a population and iterate
# Print the results (i.e. mean emittance for the crossover) for sure!
# ======================================================================== #
top_partition = sample_top_paths(joint_partition, gen_partition, clf)
# Now that we have a new dataset, train a new GAN on it for imitation learning GAN.
spec_args = args
spec_args.g_input_size = args.latent
spec_args.g_output_size = top_partition.shape[2]
spec_args.g_hidden_size = int(math.ceil(spec_args.g_output_size / 2))
spec_args.d_input_size = top_partition.shape[2]
spec_args.d_hidden_size = int(math.ceil(spec_args.d_input_size / 2))
partition_idx = list(set(p_1[1] + p_2[1] + p_3[1]))
return train(
top_partition,
num_batches,
args.num_particle_samples,
set_args=spec_args,
train_cols=partition_idx
), (top_partition, partition_idx)
def breed(parents, population):
"""
Runs crossover and 'dagger'_distillation on pairs of fit parents
"""
children = dict()
triplets = list(itertools.combinations(parents.values(), 3))
while len(population) - len(triplets) > 0:
triplets.append(random.choice(triplets))
if len(triplets) - len(population) > 0:
triplets = random.sample(triplets, len(population))
w = 0
gen_best_score = None
for p1, p2, p3 in triplets:
(G, D, _, evaluations), partition = crossover(p1, p2, p3)
MLE_emittance = torch.mean(evaluations)
if not gen_best_score or MLE_emittance < gen_best_score:
gen_best_score = MLE_emittance
children['cross%d' % w] = {
'generator': G,
'discriminator': D,
'emittance': MLE_emittance,
'partition': partition
}
w += 1
print('The best emittance score for this generation is %2f' % gen_best_score)
return children
def train_GPO_GAN(X, Y, num_batches, k, r):
"""
Trains a student GAN network from a teacher GAN selected from population and r epochs
Specifically for our project, we would like to experiment with
generating our atomic numbers and connections separately, to do this,
we distill a GAN trained on atomic numbers and a GAN trained on connections
into a binary GAN policy and train a student using a framework similar
to GPO (https://arxiv.org/pdf/1711.01012.pdf)
We have modified this framework specifically to train GANs on various parts
of the dataset separately.
"""
population = init_population(X, num_batches)
epoch = 0
while epoch < r and len(population) > 1:
if epoch > 0:
population = mutate(population, num_batches, epoch)
parents = select_k_fittest(population, k)
population = breed(parents, population)
epoch += 1
# Run a final training on the resultant child to ensure training on full dataset
student = select_k_fittest(population, 1)[0] # probably bug, need to isolate a single particle rather than a dict
# return student['generator'], student['discriminator'], None, student['emittance']
return train(
X,
num_batches,
args.num_particle_samples,
student['generator'],
student['discriminator'],
set_args=args,
model_name='GPO-GAN'
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--latent', type=int, default=8, help="Latent representation size")
parser.add_argument('--g_input_size', type=int, default=8, help="Random noise dimension coming into generator, per output vector")
parser.add_argument('--g_hidden_size', type=int, default=32, help="Generator complexity")
parser.add_argument('--g_output_size', type=int, default=71, help="Size of generator output vector")
parser.add_argument('--d_input_size', type=int, default=71, help="Minibatch size - cardinality of distributions (change)")
parser.add_argument('--d_hidden_size', type=int, default=32, help="Discriminator complexity")
parser.add_argument('--d_output_size', type=int, default=1, help="Single dimension for real vs fake classification")
parser.add_argument('--p', type=float, default=0.2)
parser.add_argument('--dropout', type=float, default=0.3)
parser.add_argument('--d_learning_rate', type=float, default=1e-3)
parser.add_argument('--g_learning_rate', type=float, default=1e-3)
parser.add_argument('--sgd_momentum', type=float, default=0.9)
parser.add_argument('--num_epochs', type=int, default=200)
parser.add_argument('--print_interval', type=int, default=200)
parser.add_argument('--optim', type=str, default='SGD')
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--k', type=int, default=5, help="Number of GANs to select teacher from")
parser.add_argument('--num_particle_samples', type=int, default=100, help="Number of sample particles to aggregate fitness estimate over")
parser.add_argument('--r_epochs', type=int, default=3, help="Number of epochs of GPO")
parser.add_argument('--crossover_samples', type=int, default=1000, help="number of samples for crossover")
parser.add_argument('--loss_fn', type=str, default='dflt') # Loss defaults | |
<reponame>eric11eca/inference-information-probing<filename>jiant/ext/allennlp.py
from typing import Optional
import torch
import torch.nn as nn
# noinspection PyTypeChecker
# noinspection PyUnusedLocal
class SelfAttentiveSpanExtractor(nn.Module):
"""
Computes span representations by generating an unnormalized attention score for each
word in the document. Spans representations are computed with respect to these
scores by normalising the attention scores for words inside the span.
Given these attention distributions over every span, this module weights the
corresponding vector representations of the words in the span by this distribution,
returning a weighted representation of each span.
Parameters
----------
input_dim : ``int``, required.
The final dimension of the ``sequence_tensor``.
Returns
-------
attended_text_embeddings : ``torch.FloatTensor``.
A tensor of shape (batch_size, num_spans, input_dim), which each span representation
is formed by locally normalising a global attention over the sequence. The only way
in which the attention distribution differs over different spans is in the set of words
over which they are normalized.
"""
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._input_dim
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.LongTensor = None,
span_indices_mask: torch.LongTensor = None,
) -> torch.FloatTensor:
# both of shape (batch_size, num_spans, 1)
span_starts, span_ends = span_indices.split(1, dim=-1)
# shape (batch_size, num_spans, 1)
# These span widths are off by 1, because the span ends are `inclusive`.
span_widths = span_ends - span_starts
# We need to know the maximum span width so we can
# generate indices to extract the spans from the sequence tensor.
# These indices will then get masked below, such that if the length
# of a given span is smaller than the max, the rest of the values
# are masked.
max_batch_span_width = span_widths.max().item() + 1
# shape (batch_size, sequence_length, 1)
global_attention_logits = self._global_attention(sequence_tensor)
# Shape: (1, 1, max_batch_span_width)
max_span_range_indices = get_range_vector(
max_batch_span_width, get_device_of(sequence_tensor)
).view(1, 1, -1)
# Shape: (batch_size, num_spans, max_batch_span_width)
# This is a broadcasted comparison - for each span we are considering,
# we are creating a range vector of size max_span_width, but masking values
# which are greater than the actual length of the span.
#
# We're using <= here (and for the mask below) because the span ends are
# inclusive, so we want to include indices which are equal to span_widths rather
# than using it as a non-inclusive upper bound.
span_mask = (max_span_range_indices <= span_widths).float()
raw_span_indices = span_ends - max_span_range_indices
# We also don't want to include span indices which are less than zero,
# which happens because some spans near the beginning of the sequence
# have an end index < max_batch_span_width, so we add this to the mask here.
span_mask = span_mask * (raw_span_indices >= 0).float()
span_indices = torch.nn.functional.relu(
raw_span_indices.float()).long()
# Shape: (batch_size * num_spans * max_batch_span_width)
flat_span_indices = flatten_and_batch_shift_indices(
span_indices, sequence_tensor.size(1))
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = batched_index_select(
sequence_tensor, span_indices, flat_span_indices)
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_logits = batched_index_select(
global_attention_logits, span_indices, flat_span_indices
).squeeze(-1)
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_weights = masked_softmax(
span_attention_logits, span_mask)
# Do a weighted sum of the embedded spans with
# respect to the normalised attention distributions.
# Shape: (batch_size, num_spans, embedding_dim)
attended_text_embeddings = weighted_sum(
span_embeddings, span_attention_weights)
if span_indices_mask is not None:
# Above we were masking the widths of spans with respect to the max
# span width in the batch. Here we are masking the spans which were
# originally passed in as padding.
return attended_text_embeddings * span_indices_mask.unsqueeze(-1).float()
return attended_text_embeddings
class TimeDistributed(torch.nn.Module):
"""
Given an input shaped like ``(batch_size, time_steps, [rest])`` and a ``Module`` that takes
inputs like ``(batch_size, [rest])``, ``TimeDistributed`` reshapes the input to be
``(batch_size * time_steps, [rest])``, applies the contained ``Module``, then reshapes it back.
Note that while the above gives shapes with ``batch_size`` first, this ``Module`` also works if
``batch_size`` is second - we always just combine the first two dimensions, then split them.
"""
def __init__(self, module):
super(TimeDistributed, self).__init__()
self._module = module
def forward(self, *inputs): # pylint: disable=arguments-differ
reshaped_inputs = []
for input_tensor in inputs:
input_size = input_tensor.size()
if len(input_size) <= 2:
raise RuntimeError(
"No dimension to distribute: " + str(input_size))
# Squash batch_size and time_steps into a single axis; result has shape
# (batch_size * time_steps, input_size).
squashed_shape = [-1] + [x for x in input_size[2:]]
reshaped_inputs.append(
input_tensor.contiguous().view(*squashed_shape))
reshaped_outputs = self._module(*reshaped_inputs)
# Now get the output back into the right shape.
# (batch_size, time_steps, [hidden_size])
# noinspection PyUnboundLocalVariable
new_shape = [input_size[0], input_size[1]] + \
[x for x in reshaped_outputs.size()[1:]]
outputs = reshaped_outputs.contiguous().view(*new_shape)
return outputs
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words,
embedding_dim)``. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- ``(batch_size, num_queries, num_words)`` (distribution over words for each query)
- ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
``(batch_size, num_queries, embedding_dim)`` and
``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorical cross-entropy loss.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
return result
def batched_index_select(
target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
"""
The given ``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into the sequence
dimension (dimension 2) of the target, which has size ``(batch_size, sequence_length,
embedding_size)``.
This function returns selected values in the target with respect to the provided indices, which
have size ``(batch_size, d_1, ..., d_n, embedding_size)``. This can use the optionally
precomputed :func:`~flattened_indices` with size ``(batch_size * d_1 * ... * d_n)`` if given.
An example use case of this function is looking up the start and end | |
single word without wrapping it in quotes as an argument
# ex: p.inflect("I plural(see)") instead of p.inflect("I plural('see')")
raise NameError(f"name '{obj.id}' is not defined")
def _string_to_substitute(
self, mo: Match, methods_dict: Dict[str, Callable]
) -> str:
"""
Return the string to be substituted for the match.
"""
matched_text, f_name = mo.groups()
# matched_text is the complete match string. e.g. plural_noun(cat)
# f_name is the function name. e.g. plural_noun
# Return matched_text if function name is not in methods_dict
if f_name not in methods_dict:
return matched_text
# Parse the matched text
a_tree = ast.parse(matched_text)
# get the args and kwargs from ast objects
args_list = [
self._get_value_from_ast(a)
for a in a_tree.body[0].value.args # type: ignore[attr-defined]
]
kwargs_list = {
kw.arg: self._get_value_from_ast(kw.value)
for kw in a_tree.body[0].value.keywords # type: ignore[attr-defined]
}
# Call the corresponding function
return methods_dict[f_name](*args_list, **kwargs_list)
# 0. PERFORM GENERAL INFLECTIONS IN A STRING
def inflect(self, text: str) -> str:
"""
Perform inflections in a string.
e.g. inflect('The plural of cat is plural(cat)') returns
'The plural of cat is cats'
can use plural, plural_noun, plural_verb, plural_adj,
singular_noun, a, an, no, ordinal, number_to_words,
and prespart
"""
save_persistent_count = self.persistent_count
# Dictionary of allowed methods
methods_dict: Dict[str, Callable] = {
"plural": self.plural,
"plural_adj": self.plural_adj,
"plural_noun": self.plural_noun,
"plural_verb": self.plural_verb,
"singular_noun": self.singular_noun,
"a": self.a,
"an": self.a,
"no": self.no,
"ordinal": self.ordinal,
"number_to_words": self.number_to_words,
"present_participle": self.present_participle,
"num": self.num,
}
# Regular expression to find Python's function call syntax
output = FUNCTION_CALL.sub(
lambda mo: self._string_to_substitute(mo, methods_dict), text
)
self.persistent_count = save_persistent_count
return output
# ## PLURAL SUBROUTINES
def postprocess(self, orig: str, inflected) -> str:
inflected = str(inflected)
if "|" in inflected:
word_options = inflected.split("|")
# When two parts of a noun need to be pluralized
if len(word_options[0].split(" ")) == len(word_options[1].split(" ")):
result = inflected.split("|")[self.classical_dict["all"]].split(" ")
# When only the last part of the noun needs to be pluralized
else:
result = inflected.split(" ")
for index, word in enumerate(result):
if "|" in word:
result[index] = word.split("|")[self.classical_dict["all"]]
else:
result = inflected.split(" ")
# Try to fix word wise capitalization
for index, word in enumerate(orig.split(" ")):
if word == "I":
# Is this the only word for exceptions like this
# Where the original is fully capitalized
# without 'meaning' capitalization?
# Also this fails to handle a capitalizaion in context
continue
if word.capitalize() == word:
result[index] = result[index].capitalize()
if word == word.upper():
result[index] = result[index].upper()
return " ".join(result)
def partition_word(self, text: str) -> Tuple[str, str, str]:
mo = PARTITION_WORD.search(text)
if mo:
return mo.group(1), mo.group(2), mo.group(3)
else:
return "", "", ""
def plural(self, text: str, count: Optional[Union[str, int]] = None) -> str:
"""
Return the plural of text.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(
word,
self._pl_special_adjective(word, count)
or self._pl_special_verb(word, count)
or self._plnoun(word, count),
)
return f"{pre}{plural}{post}"
def plural_noun(self, text: str, count: Optional[Union[str, int]] = None) -> str:
"""
Return the plural of text, where text is a noun.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._plnoun(word, count))
return f"{pre}{plural}{post}"
def plural_verb(self, text: str, count: Optional[Union[str, int]] = None) -> str:
"""
Return the plural of text, where text is a verb.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(
word,
self._pl_special_verb(word, count) or self._pl_general_verb(word, count),
)
return f"{pre}{plural}{post}"
def plural_adj(self, text: str, count: str = None) -> str:
"""
Return the plural of text, where text is an adjective.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._pl_special_adjective(word, count) or word)
return f"{pre}{plural}{post}"
def compare(self, word1: str, word2: str) -> Union[str, bool]:
"""
compare word1 and word2 for equality regardless of plurality
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return (
self._plequal(word1, word2, self.plural_noun)
or self._plequal(word1, word2, self.plural_verb)
or self._plequal(word1, word2, self.plural_adj)
)
def compare_nouns(self, word1: str, word2: str) -> Union[str, bool]:
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as nouns
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_noun)
def compare_verbs(self, word1: str, word2: str) -> Union[str, bool]:
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as verbs
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_verb)
def compare_adjs(self, word1: str, word2: str) -> Union[str, bool]:
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as adjectives
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_adj)
def singular_noun(
self,
text: str,
count: Optional[Union[int, str]] = None,
gender: Optional[str] = None,
) -> Union[str, bool]:
"""
Return the singular of text, where text is a plural noun.
If count supplied, then return the singular if count is one of:
1, a, an, one, each, every, this, that or if count is None
otherwise return text unchanged.
Whitespace at the start and end is preserved.
>>> p = engine()
>>> p.singular_noun('horses')
'horse'
>>> p.singular_noun('knights')
'knight'
Returns False when a singular noun is passed.
>>> p.singular_noun('horse')
False
>>> p.singular_noun('knight')
False
>>> p.singular_noun('soldier')
False
"""
pre, word, post = self.partition_word(text)
if not word:
return text
sing = self._sinoun(word, count=count, gender=gender)
if sing is not False:
plural = self.postprocess(word, sing)
return f"{pre}{plural}{post}"
return False
def _plequal(self, word1: str, word2: str, pl) -> Union[str, bool]: # noqa: C901
classval = self.classical_dict.copy()
self.classical_dict = all_classical.copy()
if word1 == word2:
return "eq"
if word1 == pl(word2):
return "p:s"
if pl(word1) == word2:
return "s:p"
self.classical_dict = no_classical.copy()
if word1 == pl(word2):
return "p:s"
if pl(word1) == word2:
return "s:p"
self.classical_dict = classval.copy()
if pl == self.plural or pl == self.plural_noun:
if self._pl_check_plurals_N(word1, word2):
return "p:p"
if self._pl_check_plurals_N(word2, word1):
return "p:p"
if pl == self.plural or pl == self.plural_adj:
if self._pl_check_plurals_adj(word1, word2):
return "p:p"
return False
def _pl_reg_plurals(self, pair: str, stems: str, end1: str, end2: str) -> bool:
pattern = fr"({stems})({end1}\|\1{end2}|{end2}\|\1{end1})"
return bool(re.search(pattern, pair))
def _pl_check_plurals_N(self, word1: str, word2: str) -> bool:
stem_endings = (
(pl_sb_C_a_ata, "as", "ata"),
(pl_sb_C_is_ides, "is", "ides"),
(pl_sb_C_a_ae, "s", "e"),
(pl_sb_C_en_ina, "ens", "ina"),
(pl_sb_C_um_a, "ums", "a"),
(pl_sb_C_us_i, "uses", "i"),
(pl_sb_C_on_a, "ons", "a"),
(pl_sb_C_o_i_stems, "os", "i"),
(pl_sb_C_ex_ices, "exes", "ices"),
(pl_sb_C_ix_ices, "ixes", "ices"),
(pl_sb_C_i, "s", "i"),
(pl_sb_C_im, "s", "im"),
(".*eau", "s", "x"),
(".*ieu", "s", | |
<filename>codes/dataloader.py<gh_stars>1-10
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import *
TIME_TYPE = {'no-time':0, 'point-in-time':1, 'only-begin':2, 'only-end':3, 'full-interval':4}
'''
(1) It is meaningless to sample time points for static statements or statements with missing temporal information;
(2) For temporal statements, we sample time points that are not within the validity time period
* for partial statements, we sample time from the previous part or the afterwards depending on the mention 'until ..., before';
we need to ensure that it is still valid for some boxes but invalid for the intersection of the boxes;
* for full-interval statements, excluding them out of the sampled time points;
* It is also unknown whether two kinds of negative samples are compementary or completing;
* try to train these two negative samples separately; if it does not work, then probably try to do them separately.
'''
class TrainDataset(Dataset):
def __init__(self, triples, nentity, nrelation, negative_sample_size, train_ans, mode):
# assert mode == 'tail-batch'
self.len = len(triples)
self.triples = triples
self.nentity = nentity
self.nrelation = nrelation
self.negative_sample_size = negative_sample_size
self.mode = mode
# self.count = self.count_frequency(triples, train_ans, mode)
self.true_missing = train_ans
self.qtype = self.triples[0][-1]
def __len__(self):
return self.len
def __getitem__(self, idx):
if self.mode == 'tail-batch':
positive_sample = self.triples[idx][0] # the data format (sub, (rel, ), fake_obj)
head, relations = positive_sample
candicates = list(self.true_missing[positive_sample])
# tail = np.random.choice(candicates)
tail = self.triples[idx][1]
subsampling_weight = len(candicates) + 4
subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))
negative_sample_list = []
negative_sample_size = 0
while negative_sample_size < self.negative_sample_size:
negative_sample = np.random.randint(self.nentity, size=self.negative_sample_size*2)
mask = np.in1d( ## mask is used here to filter out other true possibilities
negative_sample,
self.true_missing[positive_sample],
assume_unique=True,
invert=True
)
negative_sample = negative_sample[mask]
negative_sample_list.append(negative_sample)
negative_sample_size += negative_sample.size
negative_sample = np.concatenate(negative_sample_list)[:self.negative_sample_size]
negative_sample = torch.from_numpy(negative_sample)
positive_sample = torch.LongTensor([positive_sample[0], positive_sample[1], tail])
return positive_sample, negative_sample, subsampling_weight, self.mode
elif self.mode == 'head-batch':
positive_sample = self.triples[idx][0] # the data format (fake_sub, ((rel, ), obj))
head, relations, tail = positive_sample
head = np.random.choice(list(self.true_missing[(0,(relations, tail))]))
subsampling_weight = self.count[(relations, tail)]
subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))
negative_sample_list = []
negative_sample_size = 0
while negative_sample_size < self.negative_sample_size:
negative_sample = np.random.randint(self.nentity, size=self.negative_sample_size*2)
mask = np.in1d(
negative_sample,
self.true_missing[(0,(relations, tail))],
assume_unique=True,
invert=True
)
negative_sample = negative_sample[mask]
negative_sample_list.append(negative_sample)
negative_sample_size += negative_sample.size
negative_sample = np.concatenate(negative_sample_list)[:self.negative_sample_size]
negative_sample = torch.from_numpy(negative_sample)
positive_sample = torch.LongTensor([head] + [i for i in relations] + [tail])
return positive_sample, negative_sample, subsampling_weight, self.mode
@staticmethod
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
subsample_weight = torch.cat([_[2] for _ in data], dim=0)
mode = data[0][3]
return positive_sample, negative_sample, None, subsample_weight, mode
def get_subsampling_weight(new_query, true_missing, start=4):
return len(true_missing[new_query]) + start
@staticmethod
def count_frequency(triples, true_missing, mode, start=4):
count = {}
# if mode == 'tail-batch':
for triple in triples:
# head, relations = triple
if triple not in count:
count[triple] = start + len(true_missing[triple])
return count
# elif mode == 'head-batch':
# for triple, qtype in triples:
# head, relations, tail = triple
# assert (relations, tail) not in count
# count[(relations, tail)] = start + len(true_missing[(0,(relations, tail))])
# return count
class TrainInterDataset(Dataset):
def __init__(self, triples, nentity, nrelation, ntimestamps, negative_sample_size, train_ans, mode, ans_t, use_one_sample=False, use_two_sample=False, add_hard_neg=False, double_point_in_time=False, num_time_negatives=0):
self.len = len(triples)
self.triples = triples
self.nentity = nentity
self.nrelation = nrelation
self.ntimestamps = ntimestamps
self.negative_sample_size = negative_sample_size - num_time_negatives
self.mode = mode
self.true_missing = train_ans
self.true_ans_t = ans_t
self.qtype = self.triples[0][-1]
self.use_one_sample = use_one_sample
self.use_two_sample = use_two_sample
self.add_hard_neg = add_hard_neg
self.num_time_negatives = num_time_negatives
if double_point_in_time:
self.qtype = '2-3-inter'
# if self.qtype == '2-inter' or ((~self.use_one_sample) and (~self.use_two_sample) and self.qtype=='3-inter'):
# self.count = self.count_frequency(triples, train_ans)
if self.use_one_sample:
self.qtype = '2-inter'
assert use_one_sample * use_two_sample != 1
def __len__(self):
return self.len
def __getitem__(self, idx):
query = self.triples[idx][0] #[entity, relation, start, end]
gold_tail = self.triples[idx][1]
time_type = self.triples[idx][2]
# flat_query = np.array(query) #
### deal with full interval
if time_type == TIME_TYPE['full-interval']:
start_date = query[2]
end_date = query[3]
if self.use_one_sample: # sample one time from the inside
assert start_date != end_date # we must make sure we do not consider point in time here, excluding tuples with point-in-time
assert start_date != -1 # we must make sure we do not consider point in time here, excluding tuples with point-in-time
assert end_date != -1 # we must make sure we do not consider point in time here, excluding tuples with point-in-time
date = (np.random.random(1)*(end_date-start_date)+start_date).round()
date = int(date[0])
query = (query[0], query[1], date)
elif self.use_two_sample:
assert start_date != end_date # we must make sure we do not consider point in time here, excluding tuples with point-in-time
date_1,date_2 = (np.random.random(2)*(end_date-start_date)+start_date).round()
while (date_1 == date_2):
date_2 = (np.random.random(1)*(end_date-start_date)+start_date).round()
s_date, e_date = sorted([date_1, date_2])
query = (query[0], query[1], int(s_date), int(e_date))
# elif time_type == TIME_TYPE['only-begin']:
# query = (query[0], query[1], query[2])
elif time_type in [TIME_TYPE['only-end'], TIME_TYPE['point-in-time'], TIME_TYPE['only-begin']]:
query = query
else:
print("test ", self.triples[idx])
raise NotImplementedError
# tail = np.random.choice(list(self.true_missing[query]))
positive_sample = torch.LongTensor(list(query)+[gold_tail]) # positive samples include the flat-query but the negative samples just have the tails
if 'tail-batch' in self.mode:
subsampling_weight = self.get_subsampling_weight(query, self.true_missing)
subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))
negative_sample_list = []
negative_sample_size = 0
## in this part, for a query, hard negatives are those not in (s,r,o,t) but in (s, r, o)
if self.add_hard_neg:
assert len(self.true_missing[(query[0], query[1])]) >= len(self.true_missing[query])
hard_neg = np.array(list(set(self.true_missing[(query[0], query[1])]) - set(self.true_missing[query])), dtype=np.int)
# if hard_neg.size != 0:
negative_sample_list.append(hard_neg)
negative_sample_size += hard_neg.size
#print('qtype:', self.qtype)
##
while negative_sample_size < self.negative_sample_size:
negative_sample = np.random.randint(self.nentity, size=self.negative_sample_size*2)
mask = np.in1d(
negative_sample,
self.true_missing[query],
assume_unique=True,
invert=True
)
negative_sample = negative_sample[mask]
negative_sample_list.append(negative_sample)
negative_sample_size += negative_sample.size
negative_sample = np.concatenate(negative_sample_list)[:self.negative_sample_size]
negative_sample = torch.from_numpy(negative_sample)
time_negative_sample = None
# return positive_sample, negative_sample, subsampling_weight, self.mode
if 'time-batch' in self.mode: ## negative timestamps;
negative_sample_list_time = []
negative_sample_size = 0
sro = (query[0], query[1], gold_tail)
# subsampling_weight = self.get_subsampling_weight(sro, self.true_missing)
# subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))
groundtruth_ts = self.true_ans_t[sro]
if time_type == TIME_TYPE['only-begin']:
groundtruth_ts.update(set(np.arange(query[2], min(query[2]+20, self.ntimestamps), 1)) )
elif time_type == TIME_TYPE['only-end']:
groundtruth_ts.update(set(np.arange(max(0, query[2]-20), query[2], 1)))
# elif time_type in [TIME_TYPE['point-in-time'], TIME_TYPE['full-interval']]:
# groundtruth_ts = self.true_ans_t[sro]
# else:
# raise ValueError
while negative_sample_size < self.num_time_negatives:
time_negative_sample = np.random.randint(self.ntimestamps, size=self.num_time_negatives*2)
mask = np.in1d(
time_negative_sample,
groundtruth_ts,
assume_unique=True,
invert=True
)
time_negative_sample = time_negative_sample[mask]
negative_sample_list_time.append(time_negative_sample)
negative_sample_size += time_negative_sample.size
time_negative_sample = np.concatenate(negative_sample_list_time)[:self.num_time_negatives]
time_negative_sample = torch.from_numpy(time_negative_sample)
return positive_sample, negative_sample, time_negative_sample, subsampling_weight, self.mode
@staticmethod
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
if data[0][2] == None:
time_negative_sample = None
else:
time_negative_sample = torch.stack([_[2] for _ in data], dim=0)
subsample_weight = torch.cat([_[3] for _ in data], dim=0)
mode = data[0][4]
return positive_sample, negative_sample, time_negative_sample, subsample_weight, mode
@staticmethod
def get_subsampling_weight(new_query, true_missing, start=4):
return len(true_missing[new_query]) + start
@staticmethod
def count_frequency(triples, true_missing, start=4):
count = {}
for triple,qtype in triples:
# query = triple[:-2]
if triple not in count:
count[query] = start + len(true_missing[triple])
return count
class TestInterDataset(Dataset):
def __init__(self, triples, test_ans, test_ans_hard, nentity, nrelation, ntimestamps, mode, use_one_sample=False, use_two_sample=False, enumerate_time=False, double_point_in_time=False, predict_o=True, predict_t=False, predict_r=False):
self.len = len(triples)
self.triples = triples
self.nentity = nentity
self.nrelation = nrelation
self.ntimestamps = ntimestamps
self.mode = mode
self.test_ans = test_ans
self.test_ans_hard = test_ans_hard
self.qtype = self.triples[0][-1]
self.use_one_sample = use_one_sample
self.use_two_sample = use_two_sample
self.enumerate_time = enumerate_time
self.predict_o = predict_o
self.predict_t = predict_t
self.predict_r = predict_r
if double_point_in_time:
self.qtype = '2-3-inter'
if self.use_one_sample: ## Once datasets are fed here, must be 2-inter or 3-inter
self.qtype = '2-inter'
if self.enumerate_time: ## Once datasets are fed here, must be 2-inter or 3-inter
self.qtype = '2-inter'
def __len__(self):
return self.len
def __getitem__(self, idx):
query = self.triples[idx][0] # used as keys to search for the filtered set
gold_tail = self.triples[idx][1]
time_type = self.triples[idx][2]
if self.predict_o:
# tail = self.triples[idx][-2] # this is not the true tail, used for place holder
negative_sample = torch.LongTensor(range(self.nentity)) # negative samples: all the entities
# flat_query = np.array(query)
# flat_query = | |
<filename>openmdao/jacobians/tests/test_jacobian.py
""" Test the Jacobian objects."""
import itertools
import sys
import unittest
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from openmdao.api import IndepVarComp, Group, Problem, \
ExplicitComponent, ImplicitComponent, ExecComp, \
NewtonSolver, ScipyKrylov, \
LinearBlockGS, DirectSolver
from openmdao.utils.assert_utils import assert_rel_error
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.api import ScipyOptimizeDriver
try:
from parameterized import parameterized
except ImportError:
from openmdao.utils.assert_utils import SkipParameterized as parameterized
class MyExplicitComp(ExplicitComponent):
def __init__(self, jac_type):
super(MyExplicitComp, self).__init__()
self._jac_type = jac_type
def setup(self):
self.add_input('x', val=np.zeros(2))
self.add_input('y', val=np.zeros(2))
self.add_output('f', val=np.zeros(2))
val = self._jac_type(np.array([[1., 1.], [1., 1.]]))
if isinstance(val, list):
self.declare_partials('f', ['x','y'], rows=val[1], cols=val[2], val=val[0])
else:
self.declare_partials('f', ['x','y'], val=val)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'][0] = (x[0]-3.0)**2 + x[0]*x[1] + (x[1]+4.0)**2 - 3.0 + \
y[0]*17. - y[0]*y[1] + 2.*y[1]
outputs['f'][1] = outputs['f'][0]*3.0
def compute_partials(self, inputs, partials):
x = inputs['x']
y = inputs['y']
jac1 = self._jac_type(np.array([
[2.0*x[0] - 6.0 + x[1], 2.0*x[1] + 8.0 + x[0]],
[(2.0*x[0] - 6.0 + x[1])*3., (2.0*x[1] + 8.0 + x[0])*3.]
]))
if isinstance(jac1, list):
jac1 = jac1[0]
partials['f', 'x'] = jac1
jac2 = self._jac_type(np.array([
[17.-y[1], 2.-y[0]],
[(17.-y[1])*3., (2.-y[0])*3.]
]))
if isinstance(jac2, list):
jac2 = jac2[0]
partials['f', 'y'] = jac2
class MyExplicitComp2(ExplicitComponent):
def __init__(self, jac_type):
super(MyExplicitComp2, self).__init__()
self._jac_type = jac_type
def setup(self):
self.add_input('w', val=np.zeros(3))
self.add_input('z', val=0.0)
self.add_output('f', val=0.0)
val = self._jac_type(np.array([[7.]]))
if isinstance(val, list):
self.declare_partials('f', 'z', rows=val[1], cols=val[2], val=val[0])
else:
self.declare_partials('f', 'z', val=val)
val = self._jac_type(np.array([[1., 1., 1.]]))
if isinstance(val, list):
self.declare_partials('f', 'w', rows=val[1], cols=val[2], val=val[0])
else:
self.declare_partials('f', 'w', val=val)
def compute(self, inputs, outputs):
w = inputs['w']
z = inputs['z']
outputs['f'] = (w[0]-5.0)**2 + (w[1]+1.0)**2 + w[2]*6. + z*7.
def compute_partials(self, inputs, partials):
w = inputs['w']
z = inputs['z']
jac = self._jac_type(np.array([[
2.0*w[0] - 10.0,
2.0*w[1] + 2.0,
6.
]]))
if isinstance(jac, list):
jac = jac[0]
partials['f', 'w'] = jac
class ExplicitSetItemComp(ExplicitComponent):
def __init__(self, dtype, value, shape, constructor):
self._dtype = dtype
self._shape = shape
self._value = value
self._constructor = constructor
super(ExplicitSetItemComp, self).__init__()
def setup(self):
if self._shape == 'scalar':
in_val = 1
out_val = 1
elif self._shape == '1D_array':
in_val = np.array([1])
out_val = np.array([1, 2, 3, 4, 5])
elif self._shape == '2D_array':
in_val = np.array([1, 2, 3])
out_val = np.array([1, 2, 3])
if self._dtype == 'int':
scale = 1
elif self._dtype == 'float':
scale = 1.
elif self._dtype == 'complex':
scale = 1j
self.add_input('in', val=in_val*scale)
self.add_output('out', val=out_val*scale)
self.declare_partials(of='*', wrt='*')
def compute_partials(self, inputs, partials):
partials['out', 'in'] = self._constructor(self._value)
class SimpleCompWithPrintPartials(ExplicitComponent):
def setup(self):
self.add_input('x', val=0.0)
self.add_input('y', val=0.0)
self.add_output('f_xy', val=0.0, upper=1.0)
self.declare_partials(of='*', wrt='*')
self.count = 0
self.partials_name_pairs = []
self.partials_values = []
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
def compute_partials(self, inputs, partials):
x = inputs['x']
y = inputs['y']
partials['f_xy', 'x'] = 2.0*x - 6.0 + y
partials['f_xy', 'y'] = 2.0*y + 8.0 + x
if self.count < 1: # Only want to save these this once for the test
for k in partials:
self.partials_name_pairs.append(k)
for k, v in partials.items():
self.partials_values.append((k,v))
self.count += 1
def arr2list(arr):
"""Convert a numpy array to a 'sparse' list."""
data = []
rows = []
cols = []
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
rows.append(row)
cols.append(col)
data.append(arr[row, col])
return [np.array(data), np.array(rows), np.array(cols)]
def arr2revlist(arr):
"""Convert a numpy array to a 'sparse' list in reverse order."""
lst = arr2list(arr)
return [lst[0][::-1], lst[1][::-1], lst[2][::-1]]
def inverted_coo(arr):
"""Convert an ordered coo matrix into one with columns in reverse order
so we can test unsorted coo matrices.
"""
shape = arr.shape
arr = coo_matrix(arr)
return coo_matrix((arr.data[::-1], (arr.row[::-1], arr.col[::-1])), shape=shape)
def inverted_csr(arr):
"""Convert an ordered coo matrix into a csr with columns in reverse order
so we can test unsorted csr matrices.
"""
return inverted_coo(arr).tocsr()
def _test_func_name(func, num, param):
args = []
for p in param.args:
try:
arg = p.__name__
except:
arg = str(p)
args.append(arg)
return 'test_jacobian_src_indices_' + '_'.join(args)
class TestJacobian(unittest.TestCase):
@parameterized.expand(itertools.product(
['dense', 'csc'],
[np.array, coo_matrix, csr_matrix, inverted_coo, inverted_csr, arr2list, arr2revlist],
[False, True], # not nested, nested
[0, 1], # extra calls to linearize
), name_func=_test_func_name
)
def test_src_indices(self, assembled_jac, comp_jac_class, nested, lincalls):
self._setup_model(assembled_jac, comp_jac_class, nested, lincalls)
# if we multiply our jacobian (at x,y = ones) by our work vec of 1's,
# we get fwd_check
fwd_check = np.array([-1.0, -1.0, -1.0, -1.0, -1.0, 24., 74., 8.])
# if we multiply our jacobian's transpose by our work vec of 1's,
# we get rev_check
rev_check = np.array([35., 5., -9., 63., 3., -1., 6., -1.])
self._check_fwd(self.prob, fwd_check)
# to catch issues with constant subjacobians, repeatedly call linearize
for i in range(lincalls):
self.prob.model.run_linearize()
self._check_fwd(self.prob, fwd_check)
self._check_rev(self.prob, rev_check)
def _setup_model(self, assembled_jac, comp_jac_class, nested, lincalls):
self.prob = prob = Problem()
if nested:
top = prob.model.add_subsystem('G1', Group())
else:
top = prob.model
indep = top.add_subsystem('indep', IndepVarComp())
indep.add_output('a', val=np.ones(3))
indep.add_output('b', val=np.ones(2))
top.add_subsystem('C1', MyExplicitComp(comp_jac_class))
top.add_subsystem('C2', MyExplicitComp2(comp_jac_class))
top.connect('indep.a', 'C1.x', src_indices=[2,0])
top.connect('indep.b', 'C1.y')
top.connect('indep.a', 'C2.w', src_indices=[0,2,1])
top.connect('C1.f', 'C2.z', src_indices=[1])
top.nonlinear_solver = NewtonSolver(solve_subsystems=False)
top.nonlinear_solver.linear_solver = ScipyKrylov(maxiter=100)
top.linear_solver = ScipyKrylov(
maxiter=200, atol=1e-10, rtol=1e-10, assemble_jac=True)
top.options['assembled_jac_type'] = assembled_jac
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
def _check_fwd(self, prob, check_vec):
d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()
work = d_outputs._clone()
work.set_const(1.0)
# fwd apply_linear test
d_outputs.set_const(1.0)
prob.model.run_apply_linear(['linear'], 'fwd')
d_residuals._data[:] = d_residuals._data - check_vec
self.assertAlmostEqual(d_residuals.get_norm(), 0)
# fwd solve_linear test
d_outputs.set_const(0.0)
d_residuals._data[:] = check_vec
prob.model.run_solve_linear(['linear'], 'fwd')
d_outputs -= work
self.assertAlmostEqual(d_outputs.get_norm(), 0, delta=1e-6)
def _check_rev(self, prob, check_vec):
d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()
work = d_outputs._clone()
work.set_const(1.0)
# rev apply_linear test
d_residuals.set_const(1.0)
prob.model.run_apply_linear(['linear'], 'rev')
d_outputs._data[:] = d_outputs._data - check_vec
self.assertAlmostEqual(d_outputs.get_norm(), 0)
# rev solve_linear test
d_residuals.set_const(0.0)
d_outputs._data[:] = check_vec
prob.model.run_solve_linear(['linear'], 'rev')
d_residuals -= work
self.assertAlmostEqual(d_residuals.get_norm(), 0, delta=1e-6)
dtypes = [
('int', 1),
('float', 2.1),
# ('complex', 3.2 + 1.1j), # TODO: enable when Vectors support complex entries.
]
shapes = [
('scalar', lambda x: x, (1, 1)),
('1D_array', lambda x: np.array([x + i for i in range(5)]), (5, 1)),
('2D_array', lambda x: np.array([[x + i + 2 * j for i in range(3)] for j in range(3)]),
(3, 3))
]
@parameterized.expand(itertools.product(dtypes, shapes), name_func=
lambda f, n, p: '_'.join(['test_jacobian_set_item', p.args[0][0], p.args[1][0]]))
def test_jacobian_set_item(self, dtypes, shapes):
shape, constructor, expected_shape = shapes
dtype, value = dtypes
prob = Problem()
comp = ExplicitSetItemComp(dtype, value, shape, constructor)
prob.model.add_subsystem('C1', comp)
prob.setup()
prob.set_solver_print(level=0)
prob.run_model()
prob.model.run_apply_nonlinear()
prob.model.run_linearize()
expected = constructor(value)
J = prob.model._subsystems_allprocs[0]._jacobian
jac_out = J['out', 'in']
self.assertEqual(len(jac_out.shape), 2)
expected_dtype = np.promote_types(dtype, float)
self.assertEqual(jac_out.dtype, expected_dtype)
assert_rel_error(self, jac_out, np.atleast_2d(expected).reshape(expected_shape), 1e-15)
def test_group_assembled_jac_with_ext_mat(self):
class TwoSellarDis1(ExplicitComponent):
"""
Component containing Discipline 1 -- no derivatives version.
"""
def setup(self):
self.add_input('z', val=np.zeros(2))
self.add_input('x', val=np.zeros(2))
self.add_input('y2', val=np.ones(2))
self.add_output('y1', val=np.ones(2))
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
z1 = inputs['z'][0]
z2 = inputs['z'][1]
x1 = inputs['x']
y2 = inputs['y2']
outputs['y1'][0] = z1**2 + z2 + x1[0] - 0.2*y2[0]
outputs['y1'][1] = z1**2 + z2 + x1[0] - 0.2*y2[0]
def compute_partials(self, inputs, partials):
"""
Jacobian for Sellar discipline 1.
"""
partials['y1', 'y2'] =np.array([[-0.2, 0.], [0., -0.2]])
partials['y1', 'z'] = np.array([[2.0 * inputs['z'][0], 1.0], [2.0 * inputs['z'][0], 1.0]])
partials['y1', 'x'] = np.eye(2)
class TwoSellarDis2(ExplicitComponent):
def setup(self):
self.add_input('z', val=np.zeros(2))
self.add_input('y1', val=np.ones(2))
self.add_output('y2', val=np.ones(2))
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y1 = inputs['y1']
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
if y1[0].real < 0.0:
y1[0] *= -1
if y1[1].real < 0.0:
y1[1] *= -1
outputs['y2'][0] = y1[0]**.5 + z1 + z2
outputs['y2'][1] = y1[1]**.5 + z1 + z2
def compute_partials(self, inputs, J):
y1 = inputs['y1']
if y1[0].real < 0.0:
y1[0] *= -1
if y1[1].real < 0.0:
y1[1] *= -1
J['y2', 'y1'] = np.array([[.5*y1[0]**-.5, 0.], [0., .5*y1[1]**-.5]])
J['y2', 'z'] = np.array([[1.0, 1.0], [1.0, 1.0]])
prob = Problem()
model = prob.model
model.add_subsystem('px', IndepVarComp('x', np.array([1.0, 1.0])), promotes=['x'])
model.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
sup = model.add_subsystem('sup', Group(), promotes=['*'])
sub1 = sup.add_subsystem('sub1', Group(), promotes=['*'])
sub2 = sup.add_subsystem('sub2', Group(), promotes=['*'])
d1 = sub1.add_subsystem('d1', TwoSellarDis1(), promotes=['x', 'z', 'y1', 'y2'])
sub2.add_subsystem('d2', TwoSellarDis2(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1[0] - y1[1]', y1=np.array([0.0, 0.0])),
promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', ExecComp('con2 = y2[0] + y2[1] - 24.0', y2=np.array([0.0, 0.0])),
promotes=['con2', 'y2'])
model.linear_solver = LinearBlockGS()
sup.linear_solver = LinearBlockGS()
sub1.linear_solver = DirectSolver(assemble_jac=True)
sub2.linear_solver = DirectSolver(assemble_jac=True)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
| |
import discord
from discord.ext import commands
from cogs.utils import hypixel
from datetime import datetime
import json
import aiohttp
from concurrent.futures import ThreadPoolExecutor
import asyncio
import requests
class staff(commands.Cog, name="Staff"):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['req', 'requirement'])
async def requirements(self, ctx):
"""Lists the requirements
"""
embed = discord.Embed(title="Miscellaneous Guild Requirements",
description="These requirements are subject to change!",
color=0x8368ff)
embed.add_field(name="Active", value=f"• {format(self.bot.active, ',d')} Weekly Guild Experience",
inline=False)
embed.add_field(name="Do Not Kick List Eligibility",
value=f"• {format(self.bot.dnkl, ',d')} Weekly Guild Experience", inline=False)
embed.add_field(name="Resident", value=f"• {format(self.bot.resident_req, ',d')} Weekly Guild Experience",
inline=False)
embed.add_field(name="Member", value=f"• {format(self.bot.inactive, ',d')} Weekly Guild Experience",
inline=False)
embed.add_field(name="New Member", value=f"• {format(self.bot.new_member, ',d')} Daily Guild Experience",
inline=False)
embed.set_footer(text="You are considered a New Member for the first 7 days after joining the guild"
"\nIf you fail to meet the New Member/Member requirements, you will be kicked!")
await ctx.send(embed=embed)
@commands.command(aliases=['res'])
async def resident(self, ctx):
"""Lists the methods to get the resident rank
"""
embed = discord.Embed(title='How to get Resident?',
description='To be eligible for Resident, you must be one of the following',
color=0x8368ff)
embed.add_field(name="Veteran", value="Be in the guild for more than 1 year",
inline=False)
embed.add_field(name="Youtuber",
value="If you're a youtuber with more than 5,000 subscribers, you aren't subject to any guild requirements.",
inline=False)
embed.add_field(name="<NAME>", value="Spend Money on the guild by doing giveaways, sponsoring events!",
inline=False)
embed.add_field(name="GvG Team", value="Be an exceptional GvG player.",
inline=False)
embed.set_footer(
text=f"Everyone who has the resident rank must get {format(self.bot.resident_req, ',d')} weekly guild experience! (Except YouTubers)")
await ctx.send(embed=embed)
@commands.command(aliases=['ticket'])
async def tickets(self, ctx):
"""Explains the entire ticket system
"""
embed = discord.Embed(title="How to create a ticket?",
color=0x8368ff)
embed.add_field(name="Go to #🎟-tickets-🎟",
value="#🎟-tickets-🎟 is located in the noticeboard category",
inline=False)
embed.add_field(name="Tickets can be created for the following reasons",
value="> Discord Nick/Role & Tag Change/\n"
"> Do not kick list\n"
"> Problems/Queries/Complaint/Suggestion\n"
"> Reporting a player\n"
"> Milestone\n"
"> Staff Application\n"
"> Event\n"
"> Other",
inline=False)
embed.add_field(name="Click the button under the message sent by @Miscellaneous",
value="The following image shows you what you need to click.",
inline=False)
embed.set_image(
url=f"https://media.discordapp.net/attachments/813075018820222976/870799273892147230/unknown.png?width=1316&height=671")
await ctx.send(embed=embed)
@commands.command()
@commands.has_role(538015368782807040)
async def inactive(self, ctx):
"""Prints a list of users who need to be promoted, demoted, warned and kicked!
"""
msg = await ctx.send("**Please wait!**\n `Approximate wait time: Calculating`")
api = hypixel.get_api()
async with aiohttp.ClientSession() as session:
async with session.get(f'https://api.hypixel.net/guild?key={api}&name=Miscellaneous') as resp:
g = await resp.json()
await session.close()
with open('dnkl.json') as f:
data = json.load(f)
dnkl_names = data.keys()
activearray = {}
activedemotearray = {}
inactivearray = {}
veteranarray = {}
exp = 0
await msg.edit(content=f"**Please wait!**\n `Approximate wait time: 15 seconds`")
async with ctx.channel.typing():
for i in range(len(g['guild']['members'])):
expHistory = sum(g['guild']['members'][i]['expHistory'].values())
rank = g['guild']['members'][i]['rank']
joined = g['guild']['members'][i]['joined']
if expHistory >= self.bot.active and rank == "Member":
uuid = g['guild']['members'][i]['uuid']
async with aiohttp.ClientSession() as session:
async with session.get(
f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}') as resp:
a = await resp.json()
await session.close()
name = a['name']
time = str(datetime.fromtimestamp(int(str(joined)[:-3])))
dt = (time[0:10])
if name in dnkl_names:
name = name + f'[DNKL]\n{dt}'
else:
name = name + f'[{rank}]\n{dt}'
exp += expHistory
activearray[name] = exp
exp = 0
elif expHistory < self.bot.active and rank == "Active":
uuid = g['guild']['members'][i]['uuid']
async with aiohttp.ClientSession() as session:
async with session.get(
f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}') as resp:
a = await resp.json()
await session.close()
name = a['name']
time = str(datetime.fromtimestamp(int(str(joined)[:-3])))
dt = (time[0:10])
if name in dnkl_names:
name = name + f'[DNKL]\n{dt}'
else:
name = name + f'[{rank}]\n{dt}'
exp += expHistory
activedemotearray[name] = exp
exp = 0
elif expHistory < self.bot.inactive:
if rank == "Member":
uuid = g['guild']['members'][i]['uuid']
async with aiohttp.ClientSession() as session:
async with session.get(
f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}') as resp:
a = await resp.json()
time = str(datetime.fromtimestamp(int(str(joined)[:-3])))
name = a['name']
dt = (time[0:10])
if name in dnkl_names:
name = name + f'[DNKL]\n{dt}'
else:
name = name + f'[{rank}]\n{dt}'
exp += expHistory
inactivearray[name] = exp
exp = 0
elif rank == "Resident":
if expHistory < self.bot.resident_req:
uuid = g['guild']['members'][i]['uuid']
async with aiohttp.ClientSession() as session:
async with session.get(
f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}') as resp:
a = await resp.json()
time = str(datetime.fromtimestamp(int(str(joined)[:-3])))
name = a['name']
dt = (time[0:10])
if name in dnkl_names:
name = name + f'[DNKL]\n{dt}'
else:
name = name + f'[{rank}]\n{dt}'
exp += expHistory
veteranarray[name] = exp
exp = 0
else:
pass
ActivesortedList = sorted(activearray.items(), key=lambda x: x[1], reverse=True)
ActiveDemoteSortedList = sorted(activedemotearray.items(), key=lambda x: x[1], reverse=True)
VeteransortedList = sorted(veteranarray.items(), key=lambda x: x[1], reverse=True)
InactivesortedList = sorted(inactivearray.items(), key=lambda x: x[1], reverse=True)
await msg.edit(content="**Please wait!**\n `The embeds are being sent!`")
'---------------------------------------------------------------ACTIVE PROMOTION------------------------------------------------------------------------------'
embed = discord.Embed(title=f"Promote the following users:",
description=f"Total: {len(ActivesortedList)}", color=0x43b581)
y = 0
if len(ActivesortedList) <= 25:
for user in ActivesortedList:
embed.add_field(name=f"{user[0]}", value=f"```cs\n{format(user[1], ',d')}```", inline=True)
await ctx.send(embed=embed)
else:
for user in ActivesortedList:
y = y + 1
embed.add_field(name=f"{user[0]}", value=f"```cs\n{format(user[1], ',d')}```", inline=True)
if len(embed.fields) >= 25:
await ctx.send(embed=embed)
embed.clear_fields()
embed = discord.Embed(title="", color=0x43b581)
elif y == len(ActivesortedList):
await ctx.send(embed=embed)
'---------------------------------------------------------------ACTIVE DEMOTION-------------------------------------------------------------------------------'
embed = discord.Embed(title=f"Demote the following users:",
description=f"Total: {len(ActiveDemoteSortedList)}", color=0xf04747)
z = 0
if len(ActiveDemoteSortedList) <= 25:
for user in ActiveDemoteSortedList:
embed.add_field(name=f"{user[0]}", value=f"```cs\n{format(user[1], ',d')}```", inline=True)
await ctx.send(embed=embed)
else:
for user in ActiveDemoteSortedList:
z = z + 1
embed.add_field(name=f"{user[0]}", value=f"```cs\n{format(user[1], ',d')}```", inline=True)
if len(embed.fields) >= 25:
await ctx.send(embed=embed)
embed.clear_fields()
embed = discord.Embed(title="", color=0xf04747)
elif z == len(ActiveDemoteSortedList):
await ctx.send(embed=embed)
'---------------------------------------------------------------VETERAN WARNING-------------------------------------------------------------------------------'
embed = discord.Embed(
title=f"Following are the residents who don't meet the requirements:",
description=f"Total: {len(VeteransortedList)}", color=0xe5ba6c)
w = 0
if len(VeteransortedList) <= 25:
for user in VeteransortedList:
embed.add_field(name=f"{user[0]}", value=f"```\n{format(user[1], ',d')}```", inline=True)
await ctx.send(embed=embed)
else:
for user in VeteransortedList:
w = w + 1
embed.add_field(name=f"{user[0]}", value=f"```\n{format(user[1], ',d')}```", inline=True)
if len(embed.fields) >= 25:
await ctx.send(embed=embed)
embed.clear_fields()
embed = discord.Embed(title="", color=0xe5ba6c)
elif w == len(VeteransortedList):
await ctx.send(embed=embed)
'---------------------------------------------------------------INACTIVE MEMBERS------------------------------------------------------------------------------'
embed = discord.Embed(title=f"The users to be kicked are as follows:",
description=f"Total: {len(InactivesortedList)}", color=0xf04747)
x = 0
if len(InactivesortedList) <= 25:
for user in InactivesortedList:
embed.add_field(name=f"{user[0]}", value=f"```cs\n{format(user[1], ',d')}```", inline=True)
await ctx.send(embed=embed)
else:
for user in InactivesortedList:
x = x + 1
embed.add_field(name=f"{user[0]}", value=f"```cs\n{format(user[1], ',d')}```", inline=True)
if len(embed.fields) >= 25:
await ctx.send(embed=embed)
embed.clear_fields()
embed = discord.Embed(title="", color=0xf04747)
elif x == len(InactivesortedList):
await ctx.send(embed=embed)
await msg.delete()
@commands.command()
@commands.has_role(538015368782807040)
async def rolecheck(self, ctx, send_ping=None):
"""Checks the roles of all the users and changes them on the basis of their guild
"""
msg = await ctx.send("**Processing all the prerequisites**")
misc_uuids = await hypixel.get_guild_members("Miscellaneous")
misc_members, ally_members, ally_uuids = [], [], []
for x in self.bot.misc_allies:
ally_uuids = ally_uuids + await hypixel.get_guild_members(x)
# Miscellaneous Member Names
await msg.edit(content="**Processing** - 1/2")
with ThreadPoolExecutor(max_workers=10) as executor:
with requests.Session() as session:
# Set any session parameters here before calling `fetch`
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
hypixel.fetch,
*(session, individual_misc_uuid) # Allows us to pass in multiple arguments to `fetch`
)
for individual_misc_uuid in misc_uuids
]
for response in await asyncio.gather(*tasks): # Puts the result into a list
misc_members.append(response)
# Ally Member Names
await msg.edit(content="**Processing** - 2/2")
with ThreadPoolExecutor(max_workers=10) as executor:
with requests.Session() as session:
# Set any session parameters here before calling `fetch`
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
hypixel.fetch,
*(session, individual_ally_uuid) # Allows us to pass in multiple arguments to `fetch`
)
for individual_ally_uuid in ally_uuids
]
for response in await asyncio.gather(*tasks): # Puts the result into a list
ally_members.append(response)
for guild in self.bot.guilds:
if str(guild) == "Miscellaneous [MISC]": # Check if the Discord is Miscellaneous
for member in guild.members: # For loop for all members in the Discord
if member.id not in self.bot.adminids and member.bot is False:
name = await hypixel.name_grabber(member)
message = await ctx.send(f"Checking {name}")
async with aiohttp.ClientSession() as session:
async with session.get(f'https://api.mojang.com/users/profiles/minecraft/{name}') as mojang:
if mojang.status != 200: # If the IGN is invalid
await member.remove_roles(self.bot.member_role, self.bot.guest)
await member.add_roles(self.bot.new_member_role)
await message.edit(content=
f"{name} ||{member}|| Player doesn't exist. **++New Member | --Member | -- Guest**")
continue
elif self.bot.guild_master not in member.roles:
mojang_json = await mojang.json()
ign = mojang_json["name"]
uuid = mojang_json['id']
await session.close()
# Miscellaneous
if ign in misc_members and ign not in (
"Rowdies", "PolarPowah", "LBROz", "Fantastic_Doge", "ElijahRus", "BotTyler"):
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://api.hypixel.net/guild?key={hypixel.get_api()}&player={uuid}") as resp:
req = await resp.json()
await session.close()
if self.bot.member_role not in member.roles:
await member.add_roles(self.bot.member_role)
await member.remove_roles(self.bot.new_member_role, self.bot.guest)
has_tag_perms = any(role in ctx.author.roles for role in self.bot.tag_allowed_roles)
for user in req['guild']["members"]:
if | |
<reponame>colebrooke/adventure
#!/usr/bin/env python3
#
# "Adventure Game"
#
# This is a text based science fiction themed adventure game, written in Python 3.
# The database back end is created in SQLite3.
#
# Main authors <NAME> and <NAME>.
# Contributions welcome.
#
# Github page: https://github.com/colebrooke/adventure
#
# For setup instructions, see README.md
#
debug = 0
import sqlite3 as sqlitedb
import sys
import os
import time
import fnmatch # this is for pattern matching files
import re # for regex string matching
import random # for battles and npc moves
os.system('cls')
os.system('clear')
conection = sqlitedb.connect('game.db')
def p(debugstring):
if (debug == 1):
print ("%s" % debugstring)
#conection.text_factory = str
def db(sqlstring):
conection = sqlitedb.connect('game.db')
#---------------------------------------------------------
with conection:
cursor = conection.cursor()
cursor.execute("%s" % sqlstring)
row = cursor.fetchone()
return ("%s" % (row))
cursor.close()
#---------------------------------------------------------
def db_print_rows(sqlstring, colour='none'):
#---------------------------------------------------------
with conection:
cursor = conection.cursor()
cursor.execute("%s" % sqlstring)
for row in cursor:
if colour == 'none': print ("%s" % (row))
if colour == 'yellow': print ('\033[1;33m%s\033[1;m' % (row))
if colour == 'magenta': print ('\033[1;35m%s\033[1;m' % (row))
if colour == 'grey': print ('\033[1;30m%s\033[1;m' % (row))
if colour == 'bold': print ('\033[1;37m%s\033[1;m' % (row))
cursor.close()
#---------------------------------------------------------
def db_print_rows_numbered(sqlstring):
#---------------------------------------------------------
with conection:
cursor = conection.cursor()
cursor.execute("%s" % sqlstring)
a = 1
for row in cursor:
print ( " ", row[0], " ", row[1])
cursor.close()
#---------------------------------------------------------
def db_return_rows(sqlstring):
#---------------------------------------------------------
with conection:
cursor = conection.cursor()
cursor.execute("%s" % sqlstring)
row = cursor.fetchall()
return row
cursor.close()
#---------------------------------------------------------
def db_query(sqlstring):
#---------------------------------------------------------
with conection:
cursor = conection.cursor()
cursor.execute("%s" % sqlstring)
rows = cursor.fetchall()
cursor.close()
# First need to convert each tuple in the list to a list within a list:
row_list = [list(row) for row in rows ]
#This gets the length of the list of items:
#print ("number of items: %s" % len(item_list) )
# Now need to convert each list within the list to a normal string...
for i, row_text in enumerate(row_list):
row_list[i] = row_text[0]
return row_list
#---------------------------------------------------------
def scroll(lines):
#----------------------------------------------------------------------
for x in range (0, lines):
print ("")
time.sleep(0.1)
#----------------------------------------------------------------------
def where_am_i(userid):
#----------------------------------------------------------------------
current_room = db("select location from user where userid=%s" % ( userid ))
current_room_name = db("select roomname from rooms where roomid=%s" % current_room)
print ("Your current location is: %s" % current_room_name )
time.sleep(0.3)
print ("")
#----------------------------------------------------------------------
def print_current_room(userid):
#---------------------------------------------------------
current_room = db("select location from user where userid=%s" % ( userid ))
#print ("current location for this user is %s" % (current_room))
#current_room = get_current_room ( userid )
print ("")
print ("*********************************************************************")
print ("")
# print room name...
db_print_rows("select roomname from rooms where roomid=%s" % current_room, 'bold')
print ("")
time.sleep(0.3)
# print room description...
db_print_rows("select roomdesc from rooms where roomid=%s" % current_room, 'grey' )
print ("")
time.sleep(0.3)
# print available routes to other rooms...
db_print_rows("select route_desc from route where from_id=%s" % current_room )
time.sleep(0.3)
print ("")
# print any NPCs that may be in the room...
db_print_rows("select npcshortdesc from npc where npcroom=%s and npcstatus=1" % current_room, 'yellow' )
print ("")
# print any non-living npcs in the room...
db_print_rows("select npcdeaddesc from npc where npcroom=%s and npcstatus=0" % current_room, 'yellow' )
print ("")
time.sleep(0.3)
# print any objects in the room
#db_print_rows("select objectdesc_short from object where roomid=%s" % current_room )
#print ("")
#time.sleep(0.3)
# print any items in the room, if they are not in the users inventory...
db_print_rows( "select itemdesc_short from item where currentroom = %s \
and itemid not in ( select distinct itemid from inventory \
where userid = %s ) order by static desc" % ( current_room, userid ), 'magenta')
time.sleep(0.3)
#---------------------------------------------------------
def get_current_room( userid ):
#----------------------------------------------------------------------
current_room = db("select location from user where userid=%s" % ( userid ))
return current_room
#----------------------------------------------------------------------
def move( direction, userid ):
#---------------------------------------------------------
room = get_current_room ( userid )
p("current room %s " % room)
p("chosen direciton %s " % direction)
p("userid %s " % userid)
#illegal = 0
next_room = db("select to_id from route where from_id=%s and direction='%s'" % ( room, direction ))
p ("next room query result: %s " % next_room )
#time.sleep(3)
# current_room = next_room
if (next_room == "None") or (not next_room):
#current_room = room
#illegal = 1
print("You can't go in that direction!")
else:
# add one to the user moves
db("update user set moves = moves + 1 where userid = %s" % ( userid ))
# set the users current location in the db
db("update user set location = %s where userid = %s" % ( next_room, userid ))
# print the room description for the user
scroll (5)
print_current_room( userid )
# return (current_room,illegal)
#--------------------------------------------------------
def examine ():
#---------------------------------------------------------
current_room = db("select location from user where userid=%s" % ( userid ))
available_items = db_query("select lower(itemname) from item where currentroom=%s" % current_room )
# available_objects = db_query("select lower(objectname) from object where roomid=%s" % current_room )
available_npcs = db_query("select lower(npcname) from npc where npcroom =%s" % current_room )
available_inventory = ( "select lower(itemname) from user as U join \
inventory as I on U.userid=I.userid join \
item as D on I.itemid=D.itemid \
where I.userid=%s" % ( userid ))
# strip the action term from the user input to leave the item your examining.
if re.match (r'look at', userinput ):
thing_to_examine = userinput.replace('look at ', '')
else:
thing_to_examine = userinput.replace('examine ', '')
if thing_to_examine in available_items:
print ("")
print ("You examine the %s..." % thing_to_examine )
time.sleep(1.3)
print ("")
db_print_rows("select itemdesc from item where lower(itemname)='%s'" % thing_to_examine)
print ("")
time.sleep(0.8)
elif thing_to_examine in available_inventory:
print ("")
print ("You examine the %s..." % thing_to_examine )
time.sleep(1.3)
print ("")
db_print_rows("select objectdesc from object where lower(objectname)='%s'" % thing_to_examine)
elif thing_to_examine in available_npcs:
npc_id = db("select npcid from npc where lower(npcname)='%s'" % thing_to_examine)
npc_alive = db("select npcstatus from npc where lower(npcname)='%s'" % thing_to_examine)
print ("")
print ("You examine the %s..." % thing_to_examine )
time.sleep(1.3)
print ("")
if int(npc_alive) == 1:
db_print_rows("select npclongdesc from npc where lower(npcname)='%s'" % thing_to_examine)
else:
db_print_rows("select npcdeaddesc from npc where lower(npcname)='%s'" % thing_to_examine)
print ("The %s is carrying the following items:" % thing_to_examine)
db_print_rows( "select lower(itemname) from npc as U join npc_inventory as I on U.npcid=I.npcid join \
item as D on I.itemid=D.itemid \
where I.npcid=%s" % ( npc_id ))
else:
print ("You can't do that.")
#---------------------------------------------------------
def inventory ():
#---------------------------------------------------------
print ("You have the following items in your inventory:-")
db_print_rows( "select itemname from user as U join \
inventory as I on U.userid=I.userid join \
item as D on I.itemid=D.itemid \
where I.userid=%s" % (userid))
#---------------------------------------------------------
def take ():
#---------------------------------------------------------
current_room = db("select location from user where userid=%s" % ( userid ))
available_items = db_return_rows("select itemname from item where currentroom=%s" % current_room )
inventory = db_return_rows("select itemname from user as U join \
inventory as I on U.userid=I.userid join \
item as D on I.itemid=D.itemid \
where I.userid=%s" % (userid))
# For the inventory...
# First need to convert each tuple in the list to a list within a list:
inventory_list = [list(i) for i in inventory ]
# Now need to convert each list within the list to a normal string...
for i, item_name in enumerate(inventory_list):
inventory_list[i] = item_name[0]
# For the items in the room...
# First need to convert each tuple in the list to a list within a list:
item_list = [list(i) for i in available_items ]
# Now need to convert each list within the list to a normal string...
for i, item_name in enumerate(item_list):
item_list[i] = item_name[0]
# So that the list can be searched with the 'in' command...
# strip the action term from the user input to leave the item you want to take.
if re.match (r'take', userinput ):
thing_to_take = userinput.replace('take ', '')
elif re.match (r'pick up', userinput ):
thing_to_take = userinput.replace('pick up ', '')
elif re.match (r'get', userinput ):
thing_to_take = userinput.replace('get ', '')
if re.match (r'the', thing_to_take ):
thing_to_take = thing_to_take.replace('the ', '')
if thing_to_take in item_list:
if thing_to_take in inventory_list:
print ("The %s looks similar to one you already have." % thing_to_take )
print ("You decide to leave it for another adventurer to find.")
else:
thing_to_take_id = db("select itemid from item where itemname = '%s'" % thing_to_take )
item_is_static = db("select static from item where itemid = %s" % thing_to_take_id )
if item_is_static == '1':
print ("The %s is too heavy for you to take with you!" % thing_to_take )
else:
print ("You take the %s." % thing_to_take )
db("insert into inventory (userid, itemid) values ( '%s', '%s' )" % (userid, thing_to_take_id) )
else:
print ("You can't do that!")
#--------------------------------------------------------
def drop ():
#--------------------------------------------------------
inventory = db_query("select itemname from user as U join \
inventory as I on U.userid=I.userid join \
item as D on I.itemid=D.itemid \
where I.userid=%s" % (userid))
if re.match (r'drop', userinput ):
thing_to_drop = userinput.replace('drop ', '')
elif re.match (r'put down', userinput ):
thing_to_drop = userinput.replace('put down ', '')
if thing_to_drop in inventory:
print ("You drop the %s." % thing_to_drop )
# find the id of the item...
thing_to_drop_id = db("select itemid from item where itemname = '%s'" % thing_to_drop )
# delete the item from our inventory...
db("delete from inventory where userid = %s and itemid = %s" % (userid, thing_to_drop_id) )
current_room = db("select location from user where userid=%s" % ( userid ))
db("update item set currentroom = %s where itemid = %s" % (current_room, thing_to_drop_id) )
else:
print ("You can't do that!")
def open ():
#--------------------------------------------------------
current_room = db("select location from user where userid=%s" % ( userid ))
if current_room == "1":
# you must be in the appartment, so you must be opening the trophy cabinet
# TODO: do you have the key?
time.sleep(0.3)
print ("You | |
# pylint: disable=missing-function-docstring
"""Tests for '_continuous.py' file"""
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import scipy.stats.distributions as distrs
from scipy.stats.kde import gaussian_kde
from scipy.integrate import quad
import pytest
from randomvars._continuous import Cont
from randomvars.tests.commontests import (
DECIMAL,
_test_equal_rand,
_test_equal_seq,
_test_from_rv_rand,
_test_from_sample_rand,
_test_input_coercion,
_test_log_fun,
_test_one_value_input,
_test_rvs_method,
declass,
h,
)
from randomvars.options import config
DISTRIBUTIONS_COMMON = {
"beta": distrs.beta(a=10, b=20),
"chi_sq": distrs.chi2(df=10),
"expon": distrs.expon(),
"f": distrs.f(dfn=20, dfd=20),
"gamma": distrs.gamma(a=10),
"laplace": distrs.laplace(),
"lognorm": distrs.lognorm(s=0.5),
"norm": distrs.norm(),
"norm2": distrs.norm(loc=10),
"norm3": distrs.norm(scale=0.1),
"norm4": distrs.norm(scale=10),
"norm5": distrs.norm(loc=10, scale=0.1),
"t": distrs.t(df=10),
"uniform": distrs.uniform(),
"uniform2": distrs.uniform(loc=10, scale=0.1),
"weibull_max": distrs.weibull_max(c=2),
"weibull_min": distrs.weibull_min(c=2),
}
DISTRIBUTIONS_INF_DENSITY = {
"inf_beta_both": distrs.beta(a=0.4, b=0.6),
"inf_beta_left": distrs.beta(a=0.5, b=2),
"inf_beta_right": distrs.beta(a=2, b=0.5),
"inf_chi_sq": distrs.chi2(df=1),
"inf_weibull_max": distrs.weibull_max(c=0.5),
"inf_weibull_min": distrs.weibull_min(c=0.5),
}
DISTRIBUTIONS_HEAVY_TAILS = {
"heavy_cauchy": distrs.cauchy(),
"heavy_lognorm": distrs.lognorm(s=1),
"heavy_t": distrs.t(df=2),
}
DISTRIBUTIONS = {
**DISTRIBUTIONS_COMMON,
**DISTRIBUTIONS_HEAVY_TAILS,
**DISTRIBUTIONS_INF_DENSITY,
}
def augment_grid(x, n_inner_points):
test_arr = [
np.linspace(x[i], x[i + 1], n_inner_points + 1, endpoint=False)
for i in np.arange(len(x) - 1)
]
test_arr.append([x[-1]])
return np.concatenate(test_arr)
def from_sample_cdf_max_error(x):
rv = Cont.from_sample(x)
density = config.estimator_cont(x)
x_grid = augment_grid(rv.x, 10)
# Efficient way of computing `quad(density, -np.inf, x_grid)`
x_grid_ext = np.concatenate([[-np.inf], x_grid])
cdf_intervals = np.array(
[
quad(density, x_l, x_r)[0]
for x_l, x_r in zip(x_grid_ext[:-1], x_grid_ext[1:])
]
)
cdf_grid = np.cumsum(cdf_intervals)
err = cdf_grid - rv.cdf(x_grid)
return np.max(np.abs(err))
def circle_fun(x, low, high):
x = np.array(x)
center = 0.5 * (high + low)
radius = 0.5 * (high - low)
res = np.zeros_like(x)
center_dist = np.abs(x - center)
is_in = center_dist <= radius
res[is_in] = np.sqrt(radius ** 2 - center_dist[is_in] ** 2)
return res
def make_circ_density(intervals):
"""Construct circular density
Density looks like half-circles with diameters lying in elements of
`intervals`. Total integral is equal to 1.
Parameters
----------
intervals : iterable with elements being 2-element iterables
Iterable of intervals with non-zero density.
Returns
-------
density : callable
Function which returns density values.
"""
def density(x):
res = np.zeros_like(x)
tot_integral = 0
for low, high in intervals:
res += circle_fun(x, low, high)
# There is only half of circle
tot_integral += np.pi * (high - low) ** 2 / 8
return res / tot_integral
return density
class TestCont:
"""Regression tests for `Cont` class"""
def test_init_errors(self):
def check_one_input(def_args, var):
with pytest.raises(TypeError, match=f"`{var}`.*numpy array"):
def_args[var] = {"a": None}
Cont(**def_args)
with pytest.raises(TypeError, match=f"`{var}`.*float"):
def_args[var] = ["a", "a"]
Cont(**def_args)
with pytest.raises(TypeError, match=f"`{var}`.*finite values"):
def_args[var] = [0, np.nan]
Cont(**def_args)
with pytest.raises(TypeError, match=f"`{var}`.*finite values"):
def_args[var] = [0, np.inf]
Cont(**def_args)
with pytest.raises(ValueError, match=f"`{var}`.*1d array"):
def_args[var] = [[0, 1]]
Cont(**def_args)
check_one_input({"y": [1, 1]}, "x")
check_one_input({"x": [0, 1]}, "y")
with pytest.raises(ValueError, match="[Ll]engths.*match"):
Cont([0, 1], [1, 1, 1])
with pytest.raises(ValueError, match="two"):
Cont([1], [1])
with pytest.warns(UserWarning, match="`x`.*not sorted.*`x` and `y`"):
rv = Cont([1, 0], [0, 2])
rv_ref = Cont([0, 1], [2, 0])
_test_equal_rand(rv, rv_ref)
with pytest.raises(ValueError, match="`y`.*negative"):
Cont([0, 1], [1, -1])
with pytest.raises(ValueError, match="`y`.*no positive"):
Cont([0, 1], [0, 0])
def test_init(self):
x_ref = np.array([0, 1, 2])
y_ref = np.array([0, 1, 0])
rv_ref = Cont(x_ref, y_ref)
# Simple case with non-numpy input
rv_1 = Cont(x=x_ref.tolist(), y=y_ref.tolist())
_test_equal_rand(rv_1, rv_ref)
# Check if `y` is normalized
rv_2 = Cont(x=x_ref, y=10 * y_ref)
_test_equal_rand(rv_2, rv_ref)
# Check if `x` and `y` are rearranged if not sorted
with pytest.warns(UserWarning, match="`x`.*not sorted"):
rv_3 = Cont(x=x_ref[[1, 0, 2]], y=y_ref[[1, 0, 2]])
_test_equal_rand(rv_3, rv_ref)
# Check if duplicated values are removed from `x`
with pytest.warns(UserWarning, match="duplicated"):
# First pair of xy-grid is taken among duplicates
rv_4 = Cont(x=x_ref[[0, 1, 1, 2]], y=y_ref[[0, 1, 2, 2]])
_test_equal_rand(rv_4, rv_ref)
def test_str(self):
rv = Cont([0, 2, 4], [0, 1, 0])
assert str(rv) == "Continuous RV with 2 intervals (support: [0.0, 4.0])"
# Uses singular noun with one interval
rv = Cont([0, 1], [1, 1])
assert str(rv) == "Continuous RV with 1 interval (support: [0.0, 1.0])"
def test_properties(self):
x = np.arange(11)
y = np.repeat(0.1, 11)
rv = Cont(x, y)
assert list(rv.params.keys()) == ["x", "y"]
assert_array_equal(rv.params["x"], x)
assert_array_equal(rv.params["y"], y)
assert_array_equal(rv.x, x)
assert_array_equal(rv.y, y)
assert rv.a == 0.0
assert rv.b == 10.0
def test_support(self):
rv = Cont([0.5, 1.5, 4.5], [0, 0.5, 0])
assert rv.support() == (0.5, 4.5)
def test_compress(self):
# Zero tails
## Left tail
_test_equal_rand(
Cont([0, 1, 2, 3], [0, 0, 0, 2]).compress(), Cont([2, 3], [0, 2])
)
_test_equal_rand(
Cont([0, 1, 2, 3], [0, 0, 1, 0]).compress(), Cont([1, 2, 3], [0, 1, 0])
)
## Right tail
_test_equal_rand(
Cont([0, 1, 2, 3], [2, 0, 0, 0]).compress(), Cont([0, 1], [2, 0])
)
_test_equal_rand(
Cont([0, 1, 2, 3], [0, 1, 0, 0]).compress(), Cont([0, 1, 2], [0, 1, 0])
)
## Both tails
_test_equal_rand(
Cont([0, 1, 2, 3, 4], [0, 0, 1, 0, 0]).compress(),
Cont([1, 2, 3], [0, 1, 0]),
)
# Extra linearity
## Non-zero slope
_test_equal_rand(
Cont([0, 1, 2, 3, 4], [0.5, 0.25, 0, 0.25, 0.5]).compress(),
Cont([0, 2, 4], [0.5, 0, 0.5]),
)
## Zero slope, non-zero y
_test_equal_rand(
Cont([0, 1, 2], [0.5, 0.5, 0.5]).compress(), Cont([0, 2], [0.5, 0.5])
)
## Zero slope, zero y, outside of tails
_test_equal_rand(
Cont([0, 1, 2, 3, 4], [1, 0, 0, 0, 1]).compress(),
Cont([0, 1, 3, 4], [1, 0, 0, 1]),
)
# All features
_test_equal_rand(
Cont(np.arange(14), [0, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0, 1, 0, 0]).compress(),
Cont([2, 4, 6, 8, 10, 11, 12], [0, 2, 2, 0, 0, 1, 0]),
)
# If nothing to compress, self should be returned
rv = Cont([0, 1], [1, 1])
assert rv.compress() is rv
def test_ground(self):
w = config.small_width
# Basic usage
rv = Cont([0, 1], [1, 1])
_test_equal_rand(
rv.ground(), Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0])
)
# Argument `direction`
_test_equal_rand(
rv.ground(direction="both"),
Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0]),
)
_test_equal_rand(
rv.ground(direction="left"), Cont([-w, 0, w, 1], [0, 0.5, 1, 1])
)
_test_equal_rand(
rv.ground(direction="right"), Cont([0, 1 - w, 1, 1 + w], [1, 1, 0.5, 0])
)
_test_equal_rand(rv.ground(direction="none"), rv)
# Argument `w`
w2 = 0.1
_test_equal_rand(
rv.ground(w=w2, direction="both"),
Cont([-w2, 0, w2, 1 - w2, 1, 1 + w2], [0, 0.5, 1, 1, 0.5, 0]),
)
# Close neighbors
rv2 = Cont([0, 0.25 * w, 0.5, 1 - 0.1 * w, 1], [1, 1, 1, 1, 1])
rv2_grounded = rv2.ground(direction="both")
## Check that only outer points were added
assert_array_equal(rv2_grounded.x[1:-1], rv2.x)
## Check that grounded actually happend
assert_array_equal(rv2_grounded.y[[0, -1]], 0.0)
## Check that non-edge x-values havae same y-values
assert_array_equal(rv2_grounded.pdf(rv2.x[1:-1]), rv2.pdf(rv2.x[1:-1]))
def test_ground_options(self):
rv = Cont([0, 1], [1, 1])
with config.context({"small_width": 0.1}):
w = config.small_width
_test_equal_rand(
rv.ground(), Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0])
)
def test_ground_errors(self):
rv = Cont([0, 1], [1, 1])
with pytest.raises(ValueError, match="one of"):
rv.ground(direction="aaa")
def test__coeffs_by_ind(self):
# All coefficients are returned if no `ind` is specified
rv = Cont([0, 1, 2], [0, 1, 0])
inter, slope = rv._coeffs_by_ind()
assert_array_equal(inter, [0, 2])
assert_array_equal(slope, [1, -1])
def test__grid_by_ind(self):
# All grid elements are returned if no `ind` is specified
rv = Cont([0, 1, 2], [0, 1, 0])
x_out, y_out, p_out = rv._grid_by_ind()
x_ref, y_ref = rv.x, rv.y
assert_array_equal(x_out, x_ref)
assert_array_equal(y_out, y_ref)
def test_pdf_coeffs(self):
rv = Cont([0, 1, 2], [0, 1, 0])
x = np.array([-1, 0, 0.5, 1, 1.5, 2, 2.5])
with pytest.raises(ValueError, match="one of"):
rv.pdf_coeffs(x, side="a")
_test_equal_seq(
rv.pdf_coeffs(x),
(np.array([0, 0, 0, 2, 2, 2, 0]), np.array([0, 1, 1, -1, -1, -1, 0])),
)
_test_equal_seq(
rv.pdf_coeffs(x, side="left"),
(np.array([0, 0, 0, 0, 2, 2, 0]), np.array([0, 1, 1, 1, -1, -1, 0])),
)
_test_equal_seq(
rv.pdf_coeffs(np.array([-np.inf, np.nan, np.inf])),
(np.array([0, np.nan, 0]), np.array([0, np.nan, 0])),
)
def test_from_rv_basic(self):
uniform = distrs.uniform
norm = distrs.norm
# Basic usage
rv_unif = Cont.from_rv(uniform)
rv_unif_test = Cont(x=[0, 1], y=[1, 1])
_test_equal_rand(rv_unif, rv_unif_test, decimal=DECIMAL)
# Objects of `Rand` class should be `convert()`ed
_test_from_rv_rand(cls=Cont, to_class="Cont")
# Forced support edges
rv_right = Cont.from_rv(uniform, supp=(0.5, None))
rv_right_test = Cont([0.5, 1], [2, 2])
_test_equal_rand(rv_right, rv_right_test, decimal=DECIMAL)
rv_left = Cont.from_rv(uniform, supp=(None, 0.5))
rv_left_test = Cont([0, 0.5], [2, 2])
_test_equal_rand(rv_left, rv_left_test, decimal=DECIMAL)
rv_mid = Cont.from_rv(uniform, supp=(0.25, 0.75))
rv_mid_test = Cont([0.25, 0.75], [2, 2])
_test_equal_rand(rv_mid, rv_mid_test, decimal=DECIMAL)
def test_from_rv_errors(self):
# Absence of either `cdf` or `ppf` method should result intro error
class Tmp:
pass
tmp1 = Tmp()
| |
__init__(SimTK::Mat<(3,3)> self, double const & e0, double const & e1, double const & e2, double const & e3, double const & e4, double const & e5, double const & e6, double const & e7, double const & e8, double const & e9, double const & e10, double const & e11, double const & e12, double const & e13, double const & e14, double const & e15) -> Mat33
Parameters
----------
e0: double const &
e1: double const &
e2: double const &
e3: double const &
e4: double const &
e5: double const &
e6: double const &
e7: double const &
e8: double const &
e9: double const &
e10: double const &
e11: double const &
e12: double const &
e13: double const &
e14: double const &
e15: double const &
"""
this = _simbody.new_Mat33(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def setToNaN(self):
"""
setToNaN(Mat33 self)
Parameters
----------
self: SimTK::Mat< 3,3 > *
"""
return _simbody.Mat33_setToNaN(self)
def setToZero(self):
"""
setToZero(Mat33 self)
Parameters
----------
self: SimTK::Mat< 3,3 > *
"""
return _simbody.Mat33_setToZero(self)
def isNaN(self):
"""
isNaN(Mat33 self) -> bool
Parameters
----------
self: SimTK::Mat< 3,3 > const *
"""
return _simbody.Mat33_isNaN(self)
def isInf(self):
"""
isInf(Mat33 self) -> bool
Parameters
----------
self: SimTK::Mat< 3,3 > const *
"""
return _simbody.Mat33_isInf(self)
def isFinite(self):
"""
isFinite(Mat33 self) -> bool
Parameters
----------
self: SimTK::Mat< 3,3 > const *
"""
return _simbody.Mat33_isFinite(self)
def getDefaultTolerance():
"""getDefaultTolerance() -> double"""
return _simbody.Mat33_getDefaultTolerance()
getDefaultTolerance = staticmethod(getDefaultTolerance)
def isNumericallyEqual(self, *args):
"""
isNumericallyEqual(Mat33 self, double const & e, double tol) -> bool
Parameters
----------
e: double const &
tol: double
isNumericallyEqual(Mat33 self, double const & e) -> bool
Parameters
----------
e: double const &
"""
return _simbody.Mat33_isNumericallyEqual(self, *args)
def isNumericallySymmetric(self, *args):
"""
isNumericallySymmetric(Mat33 self, double tol) -> bool
Parameters
----------
tol: double
isNumericallySymmetric(Mat33 self) -> bool
Parameters
----------
self: SimTK::Mat< 3,3 > const *
"""
return _simbody.Mat33_isNumericallySymmetric(self, *args)
def isExactlySymmetric(self):
"""
isExactlySymmetric(Mat33 self) -> bool
Parameters
----------
self: SimTK::Mat< 3,3 > const *
"""
return _simbody.Mat33_isExactlySymmetric(self)
def toString(self):
"""
toString(Mat33 self) -> std::string
Parameters
----------
self: SimTK::Mat< 3,3 > const *
"""
return _simbody.Mat33_toString(self)
def get(self, i, j):
"""
get(Mat33 self, int i, int j) -> double const &
Parameters
----------
i: int
j: int
"""
return _simbody.Mat33_get(self, i, j)
def set(self, i, j, value):
"""
set(Mat33 self, int i, int j, double const & value)
Parameters
----------
i: int
j: int
value: double const &
"""
return _simbody.Mat33_set(self, i, j, value)
__swig_destroy__ = _simbody.delete_Mat33
__del__ = lambda self: None
Mat33_swigregister = _simbody.Mat33_swigregister
Mat33_swigregister(Mat33)
def Mat33_size():
"""Mat33_size() -> int"""
return _simbody.Mat33_size()
def Mat33_nrow():
"""Mat33_nrow() -> int"""
return _simbody.Mat33_nrow()
def Mat33_ncol():
"""Mat33_ncol() -> int"""
return _simbody.Mat33_ncol()
def Mat33_getDefaultTolerance():
"""Mat33_getDefaultTolerance() -> double"""
return _simbody.Mat33_getDefaultTolerance()
class CoordinateAxis(_object):
"""Proxy of C++ SimTK::CoordinateAxis class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CoordinateAxis, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CoordinateAxis, name)
__repr__ = _swig_repr
def __init__(self, i):
"""
__init__(SimTK::CoordinateAxis self, int i) -> CoordinateAxis
Parameters
----------
i: int
"""
this = _simbody.new_CoordinateAxis(i)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def getNextAxis(self):
"""
getNextAxis(CoordinateAxis self) -> CoordinateAxis
Parameters
----------
self: SimTK::CoordinateAxis const *
"""
return _simbody.CoordinateAxis_getNextAxis(self)
def getPreviousAxis(self):
"""
getPreviousAxis(CoordinateAxis self) -> CoordinateAxis
Parameters
----------
self: SimTK::CoordinateAxis const *
"""
return _simbody.CoordinateAxis_getPreviousAxis(self)
def getThirdAxis(self, axis2):
"""
getThirdAxis(CoordinateAxis self, CoordinateAxis axis2) -> CoordinateAxis
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_getThirdAxis(self, axis2)
def isXAxis(self):
"""
isXAxis(CoordinateAxis self) -> bool
Parameters
----------
self: SimTK::CoordinateAxis const *
"""
return _simbody.CoordinateAxis_isXAxis(self)
def isYAxis(self):
"""
isYAxis(CoordinateAxis self) -> bool
Parameters
----------
self: SimTK::CoordinateAxis const *
"""
return _simbody.CoordinateAxis_isYAxis(self)
def isZAxis(self):
"""
isZAxis(CoordinateAxis self) -> bool
Parameters
----------
self: SimTK::CoordinateAxis const *
"""
return _simbody.CoordinateAxis_isZAxis(self)
def isNextAxis(self, axis2):
"""
isNextAxis(CoordinateAxis self, CoordinateAxis axis2) -> bool
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_isNextAxis(self, axis2)
def isPreviousAxis(self, axis2):
"""
isPreviousAxis(CoordinateAxis self, CoordinateAxis axis2) -> bool
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_isPreviousAxis(self, axis2)
def isSameAxis(self, axis2):
"""
isSameAxis(CoordinateAxis self, CoordinateAxis axis2) -> bool
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_isSameAxis(self, axis2)
def areAllSameAxes(self, axis2, axis3):
"""
areAllSameAxes(CoordinateAxis self, CoordinateAxis axis2, CoordinateAxis axis3) -> bool
Parameters
----------
axis2: SimTK::CoordinateAxis const &
axis3: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_areAllSameAxes(self, axis2, axis3)
def isDifferentAxis(self, axis2):
"""
isDifferentAxis(CoordinateAxis self, CoordinateAxis axis2) -> bool
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_isDifferentAxis(self, axis2)
def areAllDifferentAxes(self, axis2, axis3):
"""
areAllDifferentAxes(CoordinateAxis self, CoordinateAxis axis2, CoordinateAxis axis3) -> bool
Parameters
----------
axis2: SimTK::CoordinateAxis const &
axis3: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_areAllDifferentAxes(self, axis2, axis3)
def isForwardCyclical(self, axis2):
"""
isForwardCyclical(CoordinateAxis self, CoordinateAxis axis2) -> bool
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_isForwardCyclical(self, axis2)
def isReverseCyclical(self, axis2):
"""
isReverseCyclical(CoordinateAxis self, CoordinateAxis axis2) -> bool
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_isReverseCyclical(self, axis2)
def dotProduct(self, axis2):
"""
dotProduct(CoordinateAxis self, CoordinateAxis axis2) -> int
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_dotProduct(self, axis2)
def crossProductSign(self, axis2):
"""
crossProductSign(CoordinateAxis self, CoordinateAxis axis2) -> int
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_crossProductSign(self, axis2)
def crossProductAxis(self, axis2):
"""
crossProductAxis(CoordinateAxis self, CoordinateAxis axis2) -> CoordinateAxis
Parameters
----------
axis2: SimTK::CoordinateAxis const &
"""
return _simbody.CoordinateAxis_crossProductAxis(self, axis2)
def crossProduct(self, axis2, sign):
"""
crossProduct(CoordinateAxis self, CoordinateAxis axis2, int & sign) -> CoordinateAxis
Parameters
----------
axis2: SimTK::CoordinateAxis const &
sign: int &
"""
return _simbody.CoordinateAxis_crossProduct(self, axis2, sign)
def getCoordinateAxis(i):
"""
getCoordinateAxis(int i) -> CoordinateAxis
Parameters
----------
i: int
"""
return _simbody.CoordinateAxis_getCoordinateAxis(i)
getCoordinateAxis = staticmethod(getCoordinateAxis)
def isIndexInRange(i):
"""
isIndexInRange(int i) -> bool
Parameters
----------
i: int
"""
return _simbody.CoordinateAxis_isIndexInRange(i)
isIndexInRange = staticmethod(isIndexInRange)
__swig_destroy__ = _simbody.delete_CoordinateAxis
__del__ = lambda self: None
CoordinateAxis_swigregister = _simbody.CoordinateAxis_swigregister
CoordinateAxis_swigregister(CoordinateAxis)
def CoordinateAxis_getCoordinateAxis(i):
"""
CoordinateAxis_getCoordinateAxis(int i) -> CoordinateAxis
Parameters
----------
i: int
"""
return _simbody.CoordinateAxis_getCoordinateAxis(i)
def CoordinateAxis_isIndexInRange(i):
"""
CoordinateAxis_isIndexInRange(int i) -> bool
Parameters
----------
i: int
"""
return _simbody.CoordinateAxis_isIndexInRange(i)
def __eq__(a1, a2):
"""
__eq__(CoordinateAxis a1, CoordinateAxis a2) -> bool
Parameters
----------
a1: SimTK::CoordinateAxis const &
a2: SimTK::CoordinateAxis const &
"""
return _simbody.__eq__(a1, a2)
def __ne__(a1, a2):
"""
__ne__(CoordinateAxis a1, CoordinateAxis a2) -> bool
Parameters
----------
a1: SimTK::CoordinateAxis const &
a2: SimTK::CoordinateAxis const &
"""
return _simbody.__ne__(a1, a2)
class CoordinateDirection(_object):
"""Proxy of C++ SimTK::CoordinateDirection class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CoordinateDirection, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CoordinateDirection, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SimTK::CoordinateDirection self, CoordinateAxis axis) -> CoordinateDirection
Parameters
----------
axis: SimTK::CoordinateAxis const &
__init__(SimTK::CoordinateDirection self, CoordinateAxis axis, int direction) -> CoordinateDirection
Parameters
----------
axis: SimTK::CoordinateAxis const &
direction: int
"""
this = _simbody.new_CoordinateDirection(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def getAxis(self):
"""
getAxis(CoordinateDirection self) -> CoordinateAxis
Parameters
----------
self: SimTK::CoordinateDirection const *
"""
return _simbody.CoordinateDirection_getAxis(self)
def getDirection(self):
"""
getDirection(CoordinateDirection self) -> int
Parameters
----------
self: SimTK::CoordinateDirection const *
"""
return _simbody.CoordinateDirection_getDirection(self)
def hasSameAxis(self, dir2):
"""
hasSameAxis(CoordinateDirection self, CoordinateDirection dir2) -> bool
Parameters
----------
dir2: SimTK::CoordinateDirection const &
"""
return _simbody.CoordinateDirection_hasSameAxis(self, dir2)
def isSameAxisAndDirection(self, dir2):
"""
isSameAxisAndDirection(CoordinateDirection self, CoordinateDirection dir2) -> bool
Parameters
----------
dir2: SimTK::CoordinateDirection const &
"""
return _simbody.CoordinateDirection_isSameAxisAndDirection(self, dir2)
def dotProduct(self, dir2):
"""
dotProduct(CoordinateDirection self, CoordinateDirection dir2) -> int
Parameters
----------
dir2: SimTK::CoordinateDirection const &
"""
return _simbody.CoordinateDirection_dotProduct(self, dir2)
def crossProductSign(self, dir2):
"""
crossProductSign(CoordinateDirection self, CoordinateDirection dir2) -> int
Parameters
----------
dir2: SimTK::CoordinateDirection const &
"""
return _simbody.CoordinateDirection_crossProductSign(self, dir2)
def crossProductAxis(self, dir2):
"""
crossProductAxis(CoordinateDirection self, CoordinateDirection dir2) -> CoordinateAxis
Parameters
----------
dir2: SimTK::CoordinateDirection const &
"""
return _simbody.CoordinateDirection_crossProductAxis(self, dir2)
def crossProduct(self, dir2, sign):
"""
crossProduct(CoordinateDirection self, CoordinateDirection dir2, int & sign) -> CoordinateAxis
Parameters
----------
dir2: SimTK::CoordinateDirection const &
sign: int &
"""
return _simbody.CoordinateDirection_crossProduct(self, dir2, sign)
__swig_destroy__ = _simbody.delete_CoordinateDirection
__del__ = lambda self: None
CoordinateDirection_swigregister = _simbody.CoordinateDirection_swigregister
CoordinateDirection_swigregister(CoordinateDirection)
class UnitVec3(Vec3):
"""Proxy of C++ SimTK::UnitVec<(double,1)> class."""
__swig_setmethods__ = {}
for _s in [Vec3]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, UnitVec3, name, value)
__swig_getmethods__ = {}
for _s in [Vec3]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, UnitVec3, name)
__repr__ = _swig_repr
def asVec3(self):
"""
asVec3(UnitVec3 self) -> Vec3
Parameters
----------
self: SimTK::UnitVec< double,1 | |
= s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class qcMLType(GeneratedsSuper):
"""The type of the root elementVersion number of qcML document. Pattern
is \d+\.\d+\.\d+"""
subclass = None
superclass = None
def __init__(self, version=None, runQuality=None, setQuality=None, cvList=None, embeddedStylesheetList=None):
self.original_tagname_ = None
self.version = _cast(None, version)
if runQuality is None:
self.runQuality = []
else:
self.runQuality = runQuality
if setQuality is None:
self.setQuality = []
else:
self.setQuality = setQuality
self.cvList = cvList
self.embeddedStylesheetList = embeddedStylesheetList
def factory(*args_, **kwargs_):
if qcMLType.subclass:
return qcMLType.subclass(*args_, **kwargs_)
else:
return qcMLType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_runQuality(self): return self.runQuality
def set_runQuality(self, runQuality): self.runQuality = runQuality
def add_runQuality(self, value): self.runQuality.append(value)
def insert_runQuality_at(self, index, value): self.runQuality.insert(index, value)
def replace_runQuality_at(self, index, value): self.runQuality[index] = value
def get_setQuality(self): return self.setQuality
def set_setQuality(self, setQuality): self.setQuality = setQuality
def add_setQuality(self, value): self.setQuality.append(value)
def insert_setQuality_at(self, index, value): self.setQuality.insert(index, value)
def replace_setQuality_at(self, index, value): self.setQuality[index] = value
def get_cvList(self): return self.cvList
def set_cvList(self, cvList): self.cvList = cvList
def get_embeddedStylesheetList(self): return self.embeddedStylesheetList
def set_embeddedStylesheetList(self, embeddedStylesheetList): self.embeddedStylesheetList = embeddedStylesheetList
def get_version(self): return self.version
def set_version(self, version): self.version = version
def hasContent_(self):
if (
self.runQuality or
self.setQuality or
self.cvList is not None or
self.embeddedStylesheetList is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='qcMLType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<?xml-stylesheet type="text/xml" href="#to_html"?>%s' % (eol_, ))
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='qcMLType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='qcMLType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='qcMLType'):
if self.version is not None and 'version' not in already_processed:
already_processed.add('version')
outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version), input_name='version'), ))
def exportChildren(self, outfile, level, namespace_='', name_='qcMLType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for runQuality_ in self.runQuality:
runQuality_.export(outfile, level, namespace_, name_='runQuality', pretty_print=pretty_print)
for setQuality_ in self.setQuality:
setQuality_.export(outfile, level, namespace_, name_='setQuality', pretty_print=pretty_print)
if self.cvList is not None:
self.cvList.export(outfile, level, namespace_, name_='cvList', pretty_print=pretty_print)
if self.embeddedStylesheetList is not None:
self.embeddedStylesheetList.export(outfile, level, namespace_, name_='embeddedStylesheetList', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.add('version')
self.version = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'runQuality':
obj_ = RunQualityAssessmentType.factory()
obj_.build(child_)
self.runQuality.append(obj_)
obj_.original_tagname_ = 'runQuality'
elif nodeName_ == 'setQuality':
obj_ = SetQualityAssessmentType.factory()
obj_.build(child_)
self.setQuality.append(obj_)
obj_.original_tagname_ = 'setQuality'
elif nodeName_ == 'cvList':
obj_ = CVListType.factory()
obj_.build(child_)
self.cvList = obj_
obj_.original_tagname_ = 'cvList'
elif nodeName_ == 'embeddedStylesheetList':
obj_ = embeddedStylesheetListType.factory()
obj_.build(child_)
self.embeddedStylesheetList = obj_
obj_.original_tagname_ = 'embeddedStylesheetList'
# end class qcMLType
class QualityAssessmentType(GeneratedsSuper):
"""The abstract base type for qualityParameter and attachment container"""
subclass = None
superclass = None
def __init__(self, metaDataParameter=None, qualityParameter=None, attachment=None, extensiontype_=None):
self.original_tagname_ = None
if metaDataParameter is None:
self.metaDataParameter = []
else:
self.metaDataParameter = metaDataParameter
if qualityParameter is None:
self.qualityParameter = []
else:
self.qualityParameter = qualityParameter
if attachment is None:
self.attachment = []
else:
self.attachment = attachment
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if QualityAssessmentType.subclass:
return QualityAssessmentType.subclass(*args_, **kwargs_)
else:
return QualityAssessmentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_metaDataParameter(self): return self.metaDataParameter
def set_metaDataParameter(self, metaDataParameter): self.metaDataParameter = metaDataParameter
def add_metaDataParameter(self, value): self.metaDataParameter.append(value)
def insert_metaDataParameter_at(self, index, value): self.metaDataParameter.insert(index, value)
def replace_metaDataParameter_at(self, index, value): self.metaDataParameter[index] = value
def get_qualityParameter(self): return self.qualityParameter
def set_qualityParameter(self, qualityParameter): self.qualityParameter = qualityParameter
def add_qualityParameter(self, value): self.qualityParameter.append(value)
def insert_qualityParameter_at(self, index, value): self.qualityParameter.insert(index, value)
def replace_qualityParameter_at(self, index, value): self.qualityParameter[index] = value
def get_attachment(self): return self.attachment
def set_attachment(self, attachment): self.attachment = attachment
def add_attachment(self, value): self.attachment.append(value)
def insert_attachment_at(self, index, value): self.attachment.insert(index, value)
def replace_attachment_at(self, index, value): self.attachment[index] = value
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.metaDataParameter or
self.qualityParameter or
self.attachment
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='QualityAssessmentType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='QualityAssessmentType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='QualityAssessmentType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='QualityAssessmentType'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
| |
= self._estimate_error_norm(self.E1, h, scale)
# reject step if needed
if error_norm > 1:
step_rejected = True
h_abs *= max(MIN_FACTOR, SAFETY*error_norm**self.error_exponent)
continue
# calculate solution
self._rk_stage(h, 6) # stage 6
y_new = y + self.K[:-1].T @ self.B * h
# calculate second error estimate
# now use y_new for scale
f_new = self.fun(t_new, y_new)
self.K[7] = f_new # stage 7 (FSAL)
scale = atol + rtol * np.maximum(np.abs(y), np.abs(y_new))
error_norm = self._estimate_error_norm(self.E2, h, scale)
# continue as usual
if error_norm < 1:
step_accepted = True
if error_norm == 0.0:
factor = MAX_FACTOR
else:
factor = min(MAX_FACTOR,
SAFETY * error_norm**self.error_exponent)
if step_rejected:
factor = min(1.0, factor)
h_abs *= factor
else:
step_rejected = True
h_abs *= max(MIN_FACTOR,
SAFETY * error_norm**self.error_exponent)
# after sucessful step; as usual
self.h_previous = h
self.y_old = y
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.f = f_new
return True, None
def _rk_stage(self, h, i):
dy = self.K[:i,:].T @ self.A[i,:i] * h
self.K[i] = self.fun(self.t + self.C[i]*h, self.y + dy)
def _estimate_error(self, E, h):
# pass E instead of K
return self.K[:E.size,:].T @ E * h
def _estimate_error_norm(self, E, h, scale):
# pass E instead of K
return norm(self._estimate_error(E, h) / scale)
def _dense_output_impl(self):
if self.dense_output_order=='high': # default
h = self.h_previous
K = self.K_extended
# calculate the required extra stages
for s, (a, c) in enumerate(zip(self.A_extra, self.C_extra),
start=self.n_stages+1):
dy = K[:s,:].T @ a[:s] * h
K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
# form Q. Usually: Q = K.T @ self.P
# but rksuite recommends to group summations to mitigate roundoff:
Q = np.empty((K.shape[1], self.P.shape[1]), dtype=K.dtype)
Q[:,0] = K[7,:] # term for t**1
KP = K*self.P[:,1,np.newaxis] # term for t**2
Q[:,1] = ( KP[4] + ((KP[5]+KP[7]) + KP[0])
+ ((KP[2]+KP[8]) + KP[9]) + ((KP[3]+KP[10]) + KP[6]) )
KP = K*self.P[:,2,np.newaxis] # term for t**3
Q[:,2] = ( KP[4] + KP[5]
+ ((KP[2]+KP[8]) + (KP[9]+KP[7]) + KP[0])
+ ((KP[3]+KP[10]) + KP[6]) )
KP = K*self.P[:,3,np.newaxis] # term for t**4
Q[:,3] = ( ((KP[3]+KP[7]) + (KP[6]+KP[5]) + KP[4])
+ ((KP[9]+KP[8]) + (KP[2]+KP[10]) + KP[0]) )
KP = K*self.P[:,4,np.newaxis] # term for t**5
Q[:,4] = ( (KP[9]+KP[8]) + ((KP[6]+KP[5]) + KP[4])
+ ((KP[3]+KP[7]) + (KP[2]+KP[10]) + KP[0]) )
KP = K*self.P[:,5,np.newaxis] # term for t**6
Q[:,5] = ( KP[4] + ((KP[9]+KP[7]) + (KP[6]+KP[5]))
+ ((KP[3]+KP[8]) + (KP[2]+KP[10]) + KP[0]) )
# this is almost the same as Q usual
# Rksuite uses horners rule to evaluate the polynomial. Moreover,
# the polynomial definition is different: looking back from the end
# of the step instead of forward from the start.
# The call is modified accordingly:
return HornerDenseOutput(self.t, self.t+h, self.y, Q)
else: # self.dense_output_order=='low'
# for BS45_i
# as usual:
Q = self.K.T @ self.Pfree
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
class BS45_i(BS45):
"""As BS45, but with free 4th order interpolant for dense output. Suffix _i
for interpolant.
The source [1]_ refers to the thesis of Bogacki for a free interpolant, but
this could not be found. Instead, the interpolant is constructed following
the steps in [3]_.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e., each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as
it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] <NAME>, <NAME>, "An efficient Runge-Kutta (4,5) pair",
Computers & Mathematics with Applications, Vol. 32, No. 6, 1996,
pp. 15-28, ISSN 0898-1221.
https://doi.org/10.1016/0898-1221(96)00141-1
.. [2] RKSUITE: https://www.netlib.org/ode/rksuite/
.. [3] <NAME>, "Runge-Kutta pairs of order 5(4) satisfying only the
first column simplifying assumption", Computers & Mathematics with
Applications, Vol. 62, No. 2, pp. 770 - 775, 2011.
https://doi.org/10.1016/j.camwa.2011.06.002
"""
dense_output_order = 'low'
class HornerDenseOutput(RkDenseOutput):
"""use Horner's rule for the evaluation of the polynomials"""
def _call_impl(self, t):
# scaled time
x = (t - self.t_old) / self.h
# Horner's rule:
y = np.zeros((self.Q.shape[0], x.size), dtype=self.Q.dtype)
for q in reversed(self.Q.T):
y += q[:,np.newaxis]
y *= x
# finish:
y *= self.h
y += self.y_old[:,np.newaxis]
# need this `if` to pass scipy's unit tests. I'm not sure why.
if t.shape:
return y
else:
return y[:,0]
if __name__ == '__main__':
"""Construction of a free interpolant of the BS45 pair. The approach from
"Runge-Kutta pairs of order 5(4) satisfying only the first column
simplifying assumption" by <NAME> is followed.
Bogacki has derived an interpolant for this method as well, but I was not
able to find a copy of his thesis that contains this interpolant.
"""
import numpy as np
import matplotlib.pyplot as plt
import sympy
from sympy.solvers.solveset import linsolve
from sympy import Rational as R
from pprint import pprint
n_stages = 8 # including derivative evaluation at end of step
order = 5 # of interpolation in t (not h)
T5_method4 = 1.06e-4 # error of embedded fourth order method
t = sympy.symbols('t', real=True)
bi = sympy.symbols(f'bi0:{n_stages}', real=True)
bi_vec = sympy.Matrix(bi)
# Method
A = sympy.Matrix([ # full A matrix, including last line
[0, 0, 0, 0, 0, 0, 0, 0],
[R(1,6), 0, 0, 0, 0, 0, 0, 0],
[R(2,27), R(4,27), 0, 0, 0, 0, 0, 0],
[R(183,1372), R(-162,343), R(1053,1372), 0, 0, 0, 0, 0],
[R(68,297), R(-4,11), R(42,143), R(1960,3861), 0, 0, 0, 0],
[R(597,22528), R(81,352), R(63099,585728), R(58653,366080),
R(4617,20480), 0, 0, 0],
[R(174197,959244), R(-30942,79937), R(8152137,19744439),
R(666106,1039181), R(-29421,29068), R(482048,414219), 0, 0],
[R(587,8064), 0, R(4440339,15491840), R(24353,124800), R(387,44800),
R(2152,5985), R(7267,94080), 0]])
c = sympy.Matrix([0, R(1,6), R(2,9), R(3,7), | |
"3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['speed']
stat_stats = loaddata.non_legendary_ghost_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['attack']
stat_stats = loaddata.non_legendary_ghost_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['defense']
stat_stats = loaddata.non_legendary_ghost_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['sp_attack']
stat_stats = loaddata.non_legendary_ghost_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_ghost_types['sp_defense']
stat_stats = loaddata.non_legendary_ghost_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_ghost_types['height_m']
stat_stats = loaddata.non_legendary_ghost_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_ghost_types['weight_kg']
stat_stats = loaddata.non_legendary_ghost_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# steel pokemon
elif type_set == "18":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_steel_types['total_points']
stat_stats = loaddata.non_legendary_steel_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['hp']
stat_stats = loaddata.non_legendary_steel_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['speed']
stat_stats = loaddata.non_legendary_steel_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['attack']
stat_stats = loaddata.non_legendary_steel_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['defense']
stat_stats = loaddata.non_legendary_steel_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['sp_attack']
stat_stats = loaddata.non_legendary_steel_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_steel_types['sp_defense']
stat_stats = loaddata.non_legendary_steel_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_steel_types['height_m']
stat_stats = loaddata.non_legendary_steel_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_steel_types['weight_kg']
stat_stats = loaddata.non_legendary_steel_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# all pokemon (trimmed h & w)
elif type_set == "19":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.non_legendary_total_points
stat_stats = loaddata.non_legendary_total_points_stats
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_hp
stat_stats = loaddata.non_legendary_hp_stats
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_speed
stat_stats = loaddata.non_legendary_speed_stats
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_attack
stat_stats = loaddata.non_legendary_attack_stats
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_defense
stat_stats = loaddata.non_legendary_defense_stats
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_sp_attack
stat_stats = loaddata.non_legendary_sp_attack_stats
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.non_legendary_sp_defense
stat_stats = loaddata.non_legendary_sp_defense_stats
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.non_legendary_height
stat_stats = loaddata.non_legendary_height_stats
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.non_legendary_weight
stat_stats = loaddata.non_legendary_weight_stats
unit = '(kg)'
else:
return
else:
return
elif data_set == "4": # legendary pokemon
set_name = "Legendary Pokemon"
modifier = '(legendary)'
if type_set == "1":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_grass_types['total_points']
stat_stats = loaddata.legendary_grass_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['hp']
stat_stats = loaddata.legendary_grass_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['speed']
stat_stats = loaddata.legendary_grass_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['attack']
stat_stats = loaddata.legendary_grass_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['defense']
stat_stats = loaddata.legendary_grass_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['sp_attack']
stat_stats = loaddata.legendary_grass_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_grass_types['sp_defense']
stat_stats = loaddata.legendary_grass_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_grass_types['height_m']
stat_stats = loaddata.legendary_grass_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_grass_types['weight_kg']
stat_stats = loaddata.legendary_grass_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fire pokemon
elif type_set == "2":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_fire_types['total_points']
stat_stats = loaddata.legendary_fire_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['hp']
stat_stats = loaddata.legendary_fire_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['speed']
stat_stats = loaddata.legendary_fire_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['attack']
stat_stats = loaddata.legendary_fire_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['defense']
stat_stats = loaddata.legendary_fire_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['sp_attack']
stat_stats = loaddata.legendary_fire_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_fire_types['sp_defense']
stat_stats = loaddata.legendary_fire_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_fire_types['height_m']
stat_stats = loaddata.legendary_fire_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_fire_types['weight_kg']
stat_stats = loaddata.legendary_fire_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# water pokemon
elif type_set == "3":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_water_types['total_points']
stat_stats = loaddata.legendary_water_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['hp']
stat_stats = loaddata.legendary_water_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['speed']
stat_stats = loaddata.legendary_water_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['attack']
stat_stats = loaddata.legendary_water_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['defense']
stat_stats = loaddata.legendary_water_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['sp_attack']
stat_stats = loaddata.legendary_water_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.legendary_water_types['sp_defense']
stat_stats = loaddata.legendary_water_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.legendary_water_types['height_m']
stat_stats = loaddata.legendary_water_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.legendary_water_types['weight_kg']
stat_stats = loaddata.legendary_water_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# electric pokemon
elif type_set == "4":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.legendary_electric_types['total_points']
stat_stats = loaddata.legendary_electric_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.legendary_electric_types['hp']
stat_stats = loaddata.legendary_electric_types['hp'].describe()
unit = ''
| |
cons.idtabla = nodo.idnuevo
consola += "La tabla " + nodo.idactual + \
" se cambio a " + nodo.idnuevo + " exitosamente \n"
elif lib == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "Error en la operacion."))
elif lib == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La base de datos " + useActual + " no existe"))
elif lib == 3:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La tabla " + nodo.idactual + " no existe"))
elif lib == 4:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La tabla " + nodo.idnuevo + " ya existe"))
elif op == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La tabla con nombre " + nodo.idnuevo + " ya existe"))
elif op == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La tabla con nombre " + nodo.idactual + " no existe"))
def AlterTableCheck(nodo, tablaSimbolos):
global useActual
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
condicion = nodo.expresion
opIzq = condicion.opIzq
idcol = opIzq.valor
result = False
global consola
if nodo.idcons == None:
result = tabla.modificarCheck(idcol, condicion, idcol + "_check")
listaConstraint.append(TS.Constraints(
useActual, nodo.idtabla, idcol + "_check", idcol, "check"))
consola += "Se agrego el check a la columna " + idcol + " exitosamente \n"
else:
result = tabla.modificarCheck(idcol, condicion, nodo.idcons)
listaConstraint.append(TS.Constraints(
useActual, nodo.idtabla, nodo.idcons, idcol, "check"))
consola += "Se agrego el check a la columna " + idcol + " exitosamente \n"
if result != True:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "No se encontró la columna con id " + idcol))
def AlterTableUnique(nodo, tablaSimbolos):
global consola
global useActual
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
if tabla.modificarUnique(nodo.idcolumna, True, nodo.idconstraint):
listaConstraint.append(TS.Constraints(
useActual, nodo.idtabla, nodo.idconstraint, nodo.idcolumna, "unique"))
consola += "Se agrego el unique a la columna " + \
nodo.idcolumna + " exitosamente \n"
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna con id " + nodo.idcolumna))
def AlterTableFK(nodo, tablaSimbolos):
global useActual
global consola
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
for i in range(len(nodo.idlocal)):
idlocal = nodo.idlocal[i].valor
idfk = nodo.idfk[i].valor
columnafk = tablaSimbolos.getColumna(useActual, nodo.idtablafk, idfk)
columnalocal = tabla.getColumna(idlocal)
if columnafk != None and columnalocal != None:
if columnafk.tipo.tipo == columnalocal.tipo.tipo:
tabla.modificarFk(idlocal, nodo.idtablafk, idfk)
if nodo.idconstraint != None:
listaConstraint.append(
TS.Constraints(useActual, nodo.idtabla, nodo.idconstraint, columnalocal, "FK"))
listaFK.append(TS.llaveForanea(
useActual, nodo.idtabla, nodo.idtablafk, idlocal, idfk))
consola += "Se agrego la llave foranea a " + idlocal + " exitosamente \n"
else:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"La columna %s y la columna %s no tienen el mismo tipo" % (
idlocal, idfk)))
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna"))
def AlterTableDropColumn(nodo, tablaSimbolos):
global useActual
global consola
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
for col in nodo.listaColumnas:
if jBase.alterDropColumn(useActual, nodo.idtabla, tabla.getIndex(col.idcolumna)) == 0:
if tabla.deleteColumn(col.idcolumna):
consola += "Se eliminó con exito la columna " + col.idcolumna + "\n"
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna " + col.idcolumna + " no existe"))
def AlterTableDropConstraint(nodo, tablaSimbolos):
global useActual
global consola
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
bandera = False
for cons in listaConstraint:
if cons.idconstraint == nodo.listaColumnas:
bandera = True
if cons.tipo == "unique":
if tabla.deleteUnique(cons.idcol):
consola += "Se eliminó con éxito el constraint " + nodo.listaColumnas + "\n"
else:
consola += "Error no se pudo eliminar el constraint " + nodo.listaColumnas + "\n"
elif cons.tipo == "check":
if tabla.deleteCheck(cons.idcol):
consola += "Se eliminó con éxito el constraint " + nodo.listaColumnas + "\n"
else:
consola += "Error no se pudo eliminar el constraint " + nodo.listaColumnas + "\n"
elif cons.tipo == "FK":
if tabla.deleteFk(cons.idcol):
consola += "Se eliminó con éxito el constraint " + nodo.listaColumnas + "\n"
else:
consola += "Error no se pudo eliminar el constraint " + nodo.listaColumnas + "\n"
if bandera == False:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "No se encontro el constraint " + nodo.listaColumnas))
def AlterColumnNotNull(nodo, tablaSimbolos):
global useActual
global consola
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
for col in nodo.columnas:
if tabla.modificarNull(col.idcolumna):
consola += "Se cambió a not null con exito la columna " + col.idcolumna + " \n"
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "No se encontro la columna" + col.idcolumna))
def AlterColumnCTipo(nodo, tablaSimbolos):
global useActual
global consola
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
for col in nodo.columnas:
b = tabla.modificarTipo(
col.idcolumna, col.valcambio.tipo, col.valcambio.cantidad)
if b == 0:
consola += "Se modificó el tipo exitosamente a la columna " + col.idcolumna + " \n"
elif b == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "El valor es menor al actual"))
elif b == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "Los tipos no coinciden"))
elif b == 3:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "la columna no existe " + col.idcolumna))
def InsertTable(nodo, tablaSimbolos):
global consola
flag = False
base = tablaSimbolos.get(useActual)
if base != None:
tabla = base.getTabla(nodo.id)
if tabla != None:
if nodo.listaColumnas != None:
if len(nodo.listaColumnas) == len(nodo.listValores):
result = False
# se comprueba la cantidad de columnas y las que tienen valor null
b = tabla.comprobarNulas(nodo.listaColumnas)
if b["cod"] == 0:
# se validan tipos
for i in range(len(nodo.listaColumnas)):
col = tabla.getColumna(nodo.listaColumnas[i].valor)
val = Interpreta_Expresion(nodo.listValores[i], tablaSimbolos, tabla)
if col.tipo.tipo == TipoDato.NUMERICO:
result = validarTiposNumericos(
col.tipo.dato.lower(), val)
elif col.tipo.tipo == TipoDato.CHAR:
if val.tipo == Expresion.CADENA:
result = validarTiposChar(col.tipo, val)
else:
result = False
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"Error de tipos: tipo " + col.tipo.dato + " columna " + col.nombre + " valor a insertar " + str(
val.tipo)))
elif col.tipo.tipo == TipoDato.FECHA:
result = validarTiposFecha(
col.tipo.dato.lower(), val)
elif col.tipo.tipo == TipoDato.BOOLEAN:
if val.tipo == Expresion.BOOLEAN:
result = True
if not result:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error de tipos: tipo " + col.tipo.dato + " columna " + col.nombre + " valor a insertar " + str(
val.tipo)))
flag = False
break
else:
bas1 = validaCheck(
col, val, nodo.listaColumnas, nodo.listValores)
if (bas1 == 0):
if validarUnique(col, val.valor, tabla):
if validarPK(col, val.valor, tabla):
if validarFK(col, val.valor, tabla, tablaSimbolos):
flag = True
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "El valor " + str(
val.valor) + " no corresponde a ningún valor de llave foránea"))
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "El valor " + str(
val.valor) + " infringe la condición de llave primaria"))
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"El valor " + val.valor + " infringe la condición de columna única"))
elif bas1 == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"La columna " + col.nombre + " no superó la condición CHECK"))
return
elif bas1 == 2:
flag = False
listaSemanticos.append(Error.ErrorS("Error Semantico", "La columna " + col.nombre +
" en su condición CHECK contienen un operario inexistente dentro de la tabla actual "))
return
if flag:
flag = False
tupla = validarDefault(nodo.listaColumnas, nodo.listValores, tabla, tablaSimbolos)
rs = jBase.insert(useActual, tabla.nombre, tupla)
if rs == 0:
consola += "Se insertó con éxito la tupla" + str(tupla) + "\n"
elif rs == 1:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Fallo al insertar la tupla: " + str(tupla)))
elif rs == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Fallo al insertar, la base de datos '%s' no existe " % useActual))
elif rs == 3:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Fallo al insertar, la tabla '%s' no existe" % tabla.nombre))
elif rs == 4:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Fallo al insertar, Llaves duplicadas"))
elif rs == 5:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Fallo al insertar, La tupla excede el número de columnas"))
elif b["cod"] == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna " + b["col"] + "no existe en la tabla"))
elif b["cod"] == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna " + b["col"] + " no puede ser nula"))
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "El numero de columnas a insertar no coincide"))
else:
if (len(nodo.listValores) == len(tabla.columnas)):
result = False
# se comprueba la cantidad de columnas y las que tienen valor null
columnas = list(tabla.columnas.keys())
b = tabla.comprobarNulas2(columnas)
if b["cod"] == 0:
# se validan tipos
for i in range(len(columnas)):
col = tabla.getColumna(columnas[i])
val = Interpreta_Expresion(nodo.listValores[i], tablaSimbolos, tabla)
if col.tipo.tipo == TipoDato.NUMERICO:
result = validarTiposNumericos(
col.tipo.dato.lower(), val)
elif col.tipo.tipo == TipoDato.CHAR:
if val.tipo == Expresion.CADENA:
result = validarTiposChar(col.tipo, val)
else:
result = False
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"Error de tipos: tipo " + col.tipo.dato + " columna " + col.nombre + " valor a insertar " + str(
val.tipo)))
elif col.tipo.tipo == TipoDato.FECHA:
result = validarTiposFecha(
col.tipo.dato.lower(), val)
elif col.tipo.tipo == TipoDato.BOOLEAN:
if val.tipo == Expresion.BOOLEAN:
result = True
if not | |
from enum import Enum
import numpy as np
from cvxopt import matrix, solvers
from processing import vector_of_quants
import math
class KnowledgePatternManager:
@staticmethod
def checkConsistency(knowledgePattern):
return KnowledgePatternManager.__getConsistencyChecker(knowledgePattern.type) \
.isConsistent(knowledgePattern)
@staticmethod
def __getConsistencyChecker(type):
if type == KnowledgePatternType.QUANTS:
return QuantConsistencyChecker()
elif type == KnowledgePatternType.DISJUNCTS:
return DisjunctConsistencyChecker()
elif type == KnowledgePatternType.CONJUNCTS:
return ConjunctConsistencyChecker()
else:
raise TypeError("Correct type of knowledge pattern")
@staticmethod
def getProbabilityFormula(knowledgePattern, formulaPattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = FormulaManager.getQuantsVector(formulaPattern, int(math.log(size, 2)))
return LinearProgrammingProblemSolver.findOptimalFormulaValues(matrix, intervals, size, vector)
@staticmethod
def __getEvidenceCorrector(type):
if type == EvidencePatternType.DETERMINISTIC:
return DeterministicEvidenceCorrector()
elif type == EvidencePatternType.STOCHASTIC:
return StochasticEvidenceCorrector()
elif type == EvidencePatternType.INACCURATE:
return InaccurateEvidenceCorrector()
@staticmethod
def correctEvidenceData(knowledgePattern, evidencePattern):
return KnowledgePatternManager.__getEvidenceCorrector(evidencePattern.type).getCorrectData(knowledgePattern, evidencePattern)
class FormulaManager:
@staticmethod
def getQuantsVector(formulaPattern, size):
return vector_of_quants(formulaPattern.string, size)
@staticmethod
def getFormulaForOptimise(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2**(evidencePattern.size)
result_formula = np.zeros(size)
vector = EvidenceManager.getSubIdealProbability(evidencePattern)
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
for i in range(0, 2**evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
formula = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
formula = np.dot(formula, np.dot(I, vector)[i])
result_formula += formula
return result_formula
@staticmethod
def getConjunctstoQuantsVector(vector):
return np.dot(MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(len(vector), 2))), vector)
@staticmethod
def getFormulaForOptimiseIn(knowledgePattern, evidencePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
matrix_for_opt = FormulaManager.getSubIdealtoIdealMatrix(evidencePattern, knowledgePattern)
size_evidence = 2 ** (evidencePattern.size)
result_formula_min = np.zeros(2 **evidencePattern.size)
result_formula_max = np.zeros(2 **evidencePattern.size)
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
for i in range(0, 2**evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
formula = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
prob = LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, formula).array
result_formula_min += I[i]*prob[0]
result_formula_max += I[i]*prob[1]
result = np.vstack([result_formula_min, result_formula_max])
return result
@staticmethod
def getSubIdealtoIdealMatrix(evidencePattern, knowledgePattern):
I = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
Matrix = np.zeros((2 ** evidencePattern.size, knowledgePattern.size), dtype = np.double)
for i in range(0, 2 ** evidencePattern.size):
for j in range(0, 2 **evidencePattern.size):
Matrix[i][int(ideal[j])] = I[i][j]
return Matrix
class EvidenceManager:
@staticmethod
def getConjunctsVector(evidencePattern):
arr_conj = []
num_conj = 0
p_arr = evidencePattern.p_array
for i in range(len(p_arr)):
if p_arr[i] == 0: continue #?
num_conj += pow(2, p_arr[i] - 1)
arr_conj.append(num_conj)
num_conj = 0
m_arr = evidencePattern.m_array
for i in range(len(m_arr)):
num_conj += pow(2, p_arr[i] - 1)
arr_conj.append(num_conj)
return np.array(arr_conj)
@staticmethod
def getProbabilityOfDeterministicEvidence(knowledgePattern, mas):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = MatrixProducer.getTMatrix(mas, int(math.log(size, 2)))[0].tolist()
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, vector)
@staticmethod
def getProbabilityofStochasticEvidence(knowledgePattern, evidencePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
vector = FormulaManager.getFormulaForOptimise(knowledgePattern, evidencePattern)
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValues(matrix, intervals, size, vector)
@staticmethod
def getProbabilityofInaccurateEvidence(knowledgePattern, evidencePattern):
size = evidencePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
vectors = FormulaManager.getFormulaForOptimiseIn(knowledgePattern, evidencePattern)
intervals = EvidenceManager.getSubIdealIntervalProbability(evidencePattern)
return LinearProgrammingProblemSolver.findOptimalConjunctsFormulaValuesIn(matrix, intervals, size, vectors)
@staticmethod
def getSubIdealProbability(evidencePattern):
vector = np.ones(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector[i+1] = array[i][1]
return vector
@staticmethod
def getSubIdealIntervalProbability(evidencePattern):
vector_min = np.ones(2 ** evidencePattern.size)
vector_max = np.ones(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector_min[i+1] = array[i][1]
vector_max[i+1] = array[i][2]
vector = []
vector.append(vector_min)
vector.append(vector_max)
return vector
@staticmethod
def getSubIdeal(evidencePattern):
vector = np.zeros(2 ** evidencePattern.size)
array = evidencePattern.arr
for i in range(0, 2**evidencePattern.size-1):
vector[i+1] = array[i][0]
return vector
class EvidencePatternType(Enum):
DETERMINISTIC = 'deterministic',
STOCHASTIC = 'stochastic',
INACCURATE = 'inaccurate'
class KnowledgePatternType(Enum):
QUANTS = 'quants',
DISJUNCTS = 'disjuncts',
CONJUNCTS = 'conjuncts'
class ConsistencyChecker:
@staticmethod
def isConsistent(knowledgePattern):
raise NotImplementedError("It's a method of abstract class, use appropriate implementation")
class EvidenceCorrector:
@staticmethod
def getCorrextData(knowledgePattern, evidencePattern):
raise NotImplementedError("It's a method of abstract class, use appropriate implementation")
class DeterministicEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
# разобраться с 1 и нулем
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
return LinearProgrammingProblemSolver.findOptimalEvidenceValues(matrix, intervals, size, MatrixProducer.getEvidencevector(evidencePattern.arr, int(math.log(size, 2))), intervals, MatrixProducer.getTMatrix(evidencePattern.arr, int(math.log(size, 2))))
class StochasticEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2 ** (evidencePattern.size)
result = [[0, 0] for i in range(knowledgePattern.size)]
vector = EvidenceManager.getSubIdealProbability(evidencePattern) #p_ca
I = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(knowledgePattern.size, 2)))
I_1 = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
vector_quants = np.dot(I_1, vector)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
intervals = np.array(knowledgePattern.array, dtype=np.double)
for i in range(0, 2 ** evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
divider = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
numerator = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))
ideal_ = LinearProgrammingProblemSolver.findOptimalStochasticEvidenceValues(I, intervals, size, numerator, divider)
if len(ideal_) == 0:
return EvidenceCorrectorResult(False, [])
for j in range(size):
result[j][0] += round(vector_quants[i] * ideal_[j][0], 3)
result[j][1] += round(vector_quants[i] * ideal_[j][1], 3)
if result[0][0] == 0: return EvidenceCorrectorResult(False, [])
return EvidenceCorrectorResult(True, result)
class InaccurateEvidenceCorrector(EvidenceCorrector):
@staticmethod
def getCorrectData(knowledgePattern, evidencePattern):
size = knowledgePattern.size
size_evidence = 2 ** (evidencePattern.size)
result_formula_min = np.zeros((size, size_evidence))
result_formula_max = np.zeros((size, size_evidence))
I = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(knowledgePattern.size, 2)))
I_1 = MatrixProducer.getConjunctsToQuantsMatrix(evidencePattern.size)
#vector_quants = np.dot(I_1, vector)
ideal = EvidenceManager.getSubIdeal(evidencePattern)
intervals = np.array(knowledgePattern.array, dtype=np.double)
for i in range(0, 2 ** evidencePattern.size):
array = [[ideal[i]], [ideal[size_evidence - 1 - i]]]
divider = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))[0]
numerator = MatrixProducer.getTMatrix(array, int(math.log(size, 2)))
ideal_ = LinearProgrammingProblemSolver.findOptimalStochasticEvidenceValues(I, intervals, size, numerator, divider)
if len(ideal_) == 0:
return EvidenceCorrectorResult(False, [])
for j in range(size):
result_formula_min[j] += I_1[i] * ideal_[j][0]
result_formula_max[j] += I_1[i] * ideal_[j][1]
return LinearProgrammingProblemSolver.findOptimalInaccurateEvidenceValues(I_1, EvidenceManager.getSubIdealIntervalProbability(evidencePattern),size, size_evidence, result_formula_min, result_formula_max)
class QuantConsistencyChecker(ConsistencyChecker):
@staticmethod
def isConsistent(knowledgePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getIdentityMatrix(size)
intervals = np.array(knowledgePattern.array, dtype=np.double)
result = LinearProgrammingProblemSolver.findOptimalValues(matrix, intervals, size)
if result.consistent:
result = LinearProgrammingProblemSolver.findNormalizedOptimalValues(np.array(result.array, dtype=np.double),
size)
return result
class ConjunctConsistencyChecker(ConsistencyChecker):
@staticmethod
def isConsistent(knowledgePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
return LinearProgrammingProblemSolver.findOptimalValues(matrix, intervals, size)
class DisjunctConsistencyChecker(ConsistencyChecker):
@staticmethod
def isConsistent(knowledgePattern):
size = knowledgePattern.size
matrix = MatrixProducer.getDisjunctsToQuantsMatrix(int(math.log(size, 2)))
intervals = np.array(knowledgePattern.array, dtype=np.double)
return LinearProgrammingProblemSolver.findOptimalValues(matrix, intervals, size)
class MatrixProducer:
@staticmethod
def getDisjunctsToQuantsMatrix(n):
return np.linalg.inv(MatrixProducer.getQuantsToDisjunctsMatrix(n))
@staticmethod
def getQuantsToDisjunctsMatrix(n):
if n == 0:
return np.array([1], dtype=np.double)
elif n == 1:
return np.array([[1, 1], [0, 1]], dtype=np.double)
else:
k = MatrixProducer.getQuantsToDisjunctsMatrix(n - 1)
i = np.ones((2 ** (n - 1), 2 ** (n - 1)), dtype=np.double)
k_o = k.copy()
k_o[0] = [0] * 2 ** (n - 1)
return np.block([[k, k], [k_o, i]])
@staticmethod
def getConjunctsToQuantsMatrix(n):
if n == 0:
return np.array([1], dtype=np.double)
elif n == 1:
return np.array([[1, -1], [0, 1]], dtype=np.double)
else:
i = MatrixProducer.getConjunctsToQuantsMatrix(n - 1)
o = np.zeros((2 ** (n - 1), 2 ** (n - 1)), dtype=np.double)
return np.block([[i, (-1) * i], [o, i]])
@staticmethod
def getIdentityMatrix(size):
return np.eye(size, dtype=np.double)
@staticmethod
def getTMatrix(mas, size):
matrix = np.array([1])
I_1 = MatrixProducer.getConjunctsToQuantsMatrix(1)
J_1 = np.linalg.inv(I_1)
H_p = np.array([[0, 0], [0, 1]])
H_m = np.array([[1, 0], [0, 0]])
H = MatrixProducer.getIdentityMatrix(2)
for i in range(size, 0, -1):
if i == 0:
matrix = np.kron(matrix, np.dot(np.dot(J_1, H_p), I_1))
continue
if i in mas[0]:
matrix = np.kron(matrix, np.dot(np.dot(J_1, H_p), I_1))
elif i in mas[1]:
matrix = np.kron(matrix, np.dot(np.dot(J_1, H_m), I_1))
else:
matrix = np.kron(matrix, np.dot(np.dot(J_1, H), I_1))
return matrix
@staticmethod
def getEvidencevector(mas, size):
return MatrixProducer.getTMatrix(mas, size)[0]
class LinearProgrammingProblemSolver:
@staticmethod
def findOptimalFormulaValues(matrixs, array, size, vector):
a = np.vstack(((-1) * matrixs, (-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double)))
a = matrix(a)
b = np.hstack((np.zeros(size, dtype=np.double), (-1) * array[:, 0], array[:, 1]))
b = matrix(b)
c = np.dot(np.array(MatrixProducer.getConjunctsToQuantsMatrix(int(math.log(len(vector), 2))).transpose()), vector)
c = matrix(c)
return LinearProgrammingProblemSolver.optimizeForFormula(a, b, c)
@staticmethod
def findOptimalConjunctsFormulaValues(matrixs, array, size, vector):
a = np.vstack(((-1) * matrixs, (-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double)))
a = matrix(a)
b = np.hstack((np.zeros(size, dtype=np.double), (-1) * array[:, 0], array[:, 1]))
b = matrix(b)
c = np.array(vector)
c = matrix(c)
return LinearProgrammingProblemSolver.optimizeForFormula(a, b, c)
@staticmethod
def findOptimalValues(matrixs, array, size):
a = np.vstack(((-1) * matrixs, (-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double)))
a = matrix(a)
b = np.hstack((np.zeros(size, dtype=np.double), (-1) * array[:, 0], array[:, 1]))
b = matrix(b)
c = np.array(np.zeros(size, dtype=np.double))
c = matrix(c)
return LinearProgrammingProblemSolver.optimizeForMatrices(a, b, c, size, array)
@staticmethod
def findNormalizedOptimalValues(array, size):
a = np.vstack(((-1) * np.ones(size, dtype=np.double), np.ones(size, dtype=np.double),
(-1) * np.eye(size, dtype=np.double), np.eye(size, dtype=np.double)))
a = matrix(a)
b = np.hstack(
((-1) * np.ones(1, dtype=np.double), np.ones(1, dtype=np.double), (-1) * array[:, 0], array[:, 1]))
b = matrix(b)
c = np.array(np.zeros(size, dtype=np.double))
c = matrix(c)
return LinearProgrammingProblemSolver.optimizeForMatrices(a, b, c, size, array)
@staticmethod
def optimizeForMatrices(a, b, c, size, intervals):
solvers.options['show_progress'] = False
_intervals = intervals.copy()
for i in range(size):
c[i] = 1
sol = solvers.lp(c, a, b)
if sol['status'] != 'optimal':
return ConsistencyResult(False, [])
_intervals[i][0] = round(sol['x'][i], 3)
c[i] = -1
sol = solvers.lp(c, a, b)
if sol['status'] != 'optimal':
return ConsistencyResult(False, [])
_intervals[i][1] = round(sol['x'][i], 3)
c[i] = 0
return ConsistencyResult(True, _intervals.tolist())
@staticmethod
def optimizeForFormula(a, b, c):
answer = | |
<gh_stars>0
# coding=utf-8
import os
import torch
import mpu
import torch.nn.functional as F
from collections import defaultdict
from tokenization_eva import EVATokenizer
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping, tokenizer=None):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.length_fact = []
self.beams = []
self.worst_score = 1e9
self.raw_worst_score = 1e9
self.tokenizer = tokenizer
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
self.length_fact.append(len(hyp) ** self.length_penalty)
if len(self) > self.num_beams:
sorted_scores = sorted([(s, idx, _) for idx, (s, _) in enumerate(self.beams)])
del self.beams[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
self.raw_worst_score = self.worst_score * (len(sorted_scores[1][2]) ** self.length_penalty)
else:
self.worst_score = min(score, self.worst_score)
self.raw_worst_score = sum_logprobs
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
def construct_antonym_dict(args):
if args.rule_path is None:
return None
with open(os.path.join(args.rule_path, './antonym/antonym.txt'), 'r') as f:
data = f.read().split("\n")
data = [eval(item) for item in data if item]
antonym_dict = defaultdict(list)
for first, second in data:
antonym_dict[first].append(second)
antonym_dict[second].append(first)
return antonym_dict
def calc_banned_antonym_words_ids(input_tokens, tokenizer, antonym_dict):
if antonym_dict is None:
return []
antonym_words = [set()] * len(input_tokens)
# only consider tokens occurring in current sentence
for idx, tokens in enumerate(input_tokens):
for word in tokenizer.convert_ids_to_tokens(reversed(tokens.tolist())):
if word == '<sep>':
break
antonym_words[idx].update(tokenizer.convert_tokens_to_ids(antonym_dict[word]))
return [list(tokens) for tokens in antonym_words]
def calc_banned_ngram_tokens(prev_input_ids, num_hypos: int, no_repeat_ngram_size: int, tokenizer: EVATokenizer) -> None:
"""Copied from fairseq for no_repeat_ngram in beam_search"""
generated_ngrams = [{} for _ in range(num_hypos)]
prev_input_words = []
for ids in prev_input_ids:
tokens = tokenizer.convert_ids_to_tokens(ids.tolist())
words = []
for token in tokens:
if token == '<sep>':
words.append(token)
else:
words += list(token)
prev_input_words.append(words)
for idx in range(num_hypos):
gen_words = prev_input_words[idx]
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_words[i:] for i in range(no_repeat_ngram_size)]):
for prefix_len in range(no_repeat_ngram_size):
prev_ngram = ''.join(ngram[:prefix_len])
suffix_ngram = ''.join(ngram[prefix_len:])
if tokenizer.check(suffix_ngram): # 在词表中
generated_ngram[prev_ngram] = generated_ngram.get(prev_ngram, set()) | set([suffix_ngram])
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
cur_len = len(prev_input_words[hypo_idx])
generated_ngram_idx = []
'''
3-gram, prefix的长度可以是2/1/0
'''
for prefix_len in range(no_repeat_ngram_size):
ngram_words = ''.join(prev_input_words[hypo_idx][cur_len-prefix_len:])
generated_ngram_words = generated_ngrams[hypo_idx].get(ngram_words, [])
generated_ngram_idx += tokenizer.convert_tokens_to_ids(generated_ngram_words)
if prev_input_words[hypo_idx][-1] in [',', ',', '。']:
generated_ngram_idx.append(tokenizer.convert_token_to_id('但'))
generated_ngram_idx.append(tokenizer.convert_token_to_id('不过'))
return generated_ngram_idx
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-10000):
# This function has been mostly taken from huggingface conversational ai code at
# https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
batch_size = logits.size()[0]
if top_p > 0.0:
# logits : (batch_size, vocab_size)
logits=logits.view(batch_size, -1).contiguous()
# logits : (batch_size, vocab_size)
for logit in logits:
# logit: (vocab_size)
sorted_logits, sorted_indices = torch.sort(logit, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logit[indices_to_remove] = filter_value
logits=logits.view(batch_size, -1).contiguous()
return logits
def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids):
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_input_ids):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in bad_words_ids:
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
if _tokens_match(prev_input_ids_slice.tolist(), banned_token_seq[:-1]) is False:
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def enforce_repetition_penalty_(tokenizer, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty):
"""repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858). """
for i in range(batch_size * num_beams):
for previous_token in set(prev_output_tokens[i].tolist()):
if previous_token != tokenizer.sep_id:
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
def postprocess_next_token_scores(
tokenizer: EVATokenizer,
scores,
input_ids,
no_repeat_ngram_size,
bad_words_ids,
cur_len,
min_length,
max_length,
eos_token_id,
repetition_penalty,
batch_size,
num_beams,
antonym_dict,
):
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
enforce_repetition_penalty_(
tokenizer, scores, batch_size, num_beams, input_ids, repetition_penalty,
)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -10000
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(input_ids, num_batch_hypotheses, no_repeat_ngram_size, tokenizer=tokenizer)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -10000
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -10000
# add antonym banned list
banned_tokens = calc_banned_antonym_words_ids(input_ids, tokenizer, antonym_dict)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -10000
scores[:, 0] = -50000
return scores
def generate_no_beam(model_batch, full_context, model, tokenizer: EVATokenizer, args, device):
target_length = args.max_generation_length
dec_init_length = 1 # +1 for s_0
enc_input_ids = model_batch['enc_input_ids']
enc_attention_mask = model_batch['enc_attention_mask']
enc_outputs = model(
enc_input_ids=enc_input_ids,
enc_attention_mask=enc_attention_mask,
only_encoder=True
)
enc_hidden_states = enc_outputs["encoder_last_hidden_state"]
batch_size = enc_input_ids.size(0)
# for generating responses
# we only use the <go> token, so truncate other tokens
dec_input_ids = model_batch['dec_input_ids'][..., :dec_init_length]
dec_attention_mask = model_batch['dec_attention_mask'][..., :dec_init_length, :dec_init_length]
# we use past_key_values, so only the current token mask is needed
cross_attention_mask = model_batch['cross_attention_mask'][..., :dec_init_length, :]
unfinished_sents = enc_input_ids.new(enc_input_ids.size(0)).fill_(1)
output_ids = enc_input_ids.new_zeros([enc_input_ids.size(0), 0]) # not include the prompt
prob_idx = torch.arange(batch_size)
past_key_values = None
gen_len = 0
# construct antonym dict
antonym_dict = construct_antonym_dict(args)
while gen_len < target_length:
if unfinished_sents.max() == 0:
tokens_to_add = tokenizer.sep_id * (1 - unfinished_sents)
output_ids = torch.cat([output_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
else:
dec_outputs = model(
dec_input_ids=dec_input_ids,
dec_attention_mask=dec_attention_mask,
cross_attention_mask=cross_attention_mask,
enc_hidden_states=enc_hidden_states,
past_key_values=past_key_values,
)
past_key_values = dec_outputs['past_key_values']
lm_logits = dec_outputs['lm_logits']
gathered_lm_logits = [torch.zeros_like(lm_logits).to(device) for _ in range(mpu.get_model_parallel_world_size())]
torch.distributed.all_gather(gathered_lm_logits, lm_logits.data, mpu.get_model_parallel_group())
lm_logits = torch.cat(gathered_lm_logits, dim=-1)
logits = lm_logits[:, -1, :] / args.temperature
prev_output_tokens = torch.cat([full_context, output_ids], dim=-1)
logits = postprocess_next_token_scores(
tokenizer=tokenizer,
scores=logits,
input_ids=prev_output_tokens,
no_repeat_ngram_size=args.no_repeat_ngram_size,
bad_words_ids=[[0]],
cur_len=gen_len,
min_length=args.min_generation_length,
max_length=args.max_generation_length,
eos_token_id=tokenizer.sep_id,
repetition_penalty=args.repetition_penalty,
batch_size=batch_size,
num_beams=1,
antonym_dict=antonym_dict
)
logits = top_k_logits(logits, top_k=args.top_k, top_p=args.top_p)
# next_token = torch.argmax(logits, dim=-1)
probs = F.softmax(logits.float(), dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
tokens_to_add = next_token * unfinished_sents + tokenizer.sep_id * (1 - unfinished_sents)
dec_input_ids = tokens_to_add.unsqueeze(-1)
output_ids = torch.cat([output_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
# let the current token attend to all previous tokens
dec_attention_mask = torch.cat([dec_attention_mask[:, :, -1:, :], dec_attention_mask[:, :, -1:, -1:]], dim=-1)
cross_attention_mask = cross_attention_mask[:, :, -1:, :]
gen_len += 1
unfinished_sents.mul_(tokens_to_add.ne(tokenizer.sep_id).long())
output_ids = output_ids.cpu().tolist()
generation_token_ids_list = []
generation_str_list = []
for e in output_ids:
generation_token_ids = e[:e.index(tokenizer.sep_id)] if tokenizer.sep_id in e else e
generation_token_ids_list.append(generation_token_ids)
generation_str_list.append(tokenizer.decode(generation_token_ids))
return generation_str_list, generation_token_ids_list
def generate_beam(model_batch, full_context, model, tokenizer: EVATokenizer, args, device):
'''
Since the context in model batch is truncated, we need full_context to store the tokens in the entire context.
'''
num_beams = args.num_beams
target_length = args.max_generation_length
do_sample = args.top_p > 0 or args.top_k > 0
vocab_size = tokenizer.vocab_size
enc_input_ids = model_batch['enc_input_ids']
enc_attention_mask = model_batch['enc_attention_mask']
enc_input_length = enc_input_ids.size(-1)
batch_size = enc_input_ids.size(0)
enc_input_ids = enc_input_ids.unsqueeze(1).expand(batch_size, num_beams, enc_input_length)
enc_attention_mask = enc_attention_mask.unsqueeze(1).expand(batch_size, num_beams, 1, enc_input_length, enc_input_length)
enc_input_ids = enc_input_ids.contiguous().view(batch_size * num_beams, enc_input_length)
enc_attention_mask = enc_attention_mask.contiguous().view(batch_size * num_beams, 1, enc_input_length, enc_input_length)
full_context = full_context.unsqueeze(1).expand(batch_size, num_beams, full_context.size(-1))
full_context = full_context.contiguous().view(batch_size * num_beams, full_context.size(-1))
enc_outputs = model(
enc_input_ids=enc_input_ids,
enc_attention_mask=enc_attention_mask,
only_encoder=True
)
| |
class TestSequence(Enum):
_init_ = 'sequence'
_order_ = lambda member: member.sequence
item_id = 'An$(1,6)', 0 # Item Code
warehouse_no = 'An$(9,4)', 2 # Warehouse Number
company = 'Hn$(13,6)', 3 # 4 SPACES + COMPANY
company_id = 'An$(7,2)', 1 # Company Code
inv_units = 'Qn$(7,2)', 10 # Inv Units
available = 'Zn$(1,1)', 5 # Available?
contract_item = 'Bn(2,1)', 6 # Contract Item?
sales_category = 'Fn', 7 # Sales Category
key_type = '<KEY>)', 4 # Key Type = '1**'
gl_category = 'Rn$(5,1)', 8 # G/L Category
warehouse_category = 'Sn$(6,1)', 9 # Warehouse Category
def test_order_as_function_in_subclass(self):
#
class Parent(Enum):
_init_ = 'value sequence'
_order_ = lambda m: m.sequence
#
class Child(Parent):
item_id = 'An$(1,6)', 0 # Item Code
company_id = 'An$(7,2)', 1 # Company Code
warehouse_no = 'An$(9,4)', 2 # Warehouse Number
company = 'Hn$(13,6)', 3 # 4 SPACES + COMPANY
key_type = '<KEY>)', 4 # Key Type = '1**'
available = 'Zn$(1,1)', 5 # Available?
contract_item = 'Bn(2,1)', 6 # Contract Item?
sales_category = 'Fn', 7 # Sales Category
gl_category = 'Rn$(5,1)', 8 # G/L Category
warehouse_category = 'Sn$(6,1)', 9 # Warehouse Category
inv_units = 'Qn$(7,2)', 10 # Inv Units
#
for i, member in enumerate(Child):
self.assertEqual(i, member.sequence)
#
ts = Child
self.assertEqual(ts.item_id.name, 'item_id')
self.assertEqual(ts.item_id.value, 'An$(1,6)')
self.assertEqual(ts.item_id.sequence, 0)
self.assertEqual(ts.company_id.name, 'company_id')
self.assertEqual(ts.company_id.value, 'An$(7,2)')
self.assertEqual(ts.company_id.sequence, 1)
self.assertEqual(ts.warehouse_no.name, 'warehouse_no')
self.assertEqual(ts.warehouse_no.value, 'An$(9,4)')
self.assertEqual(ts.warehouse_no.sequence, 2)
self.assertEqual(ts.company.name, 'company')
self.assertEqual(ts.company.value, 'Hn$(13,6)')
self.assertEqual(ts.company.sequence, 3)
self.assertEqual(ts.key_type.name, 'key_type')
self.assertEqual(ts.key_type.value, 'Cn$(19,3)')
self.assertEqual(ts.key_type.sequence, 4)
self.assertEqual(ts.available.name, 'available')
self.assertEqual(ts.available.value, 'Zn$(1,1)')
self.assertEqual(ts.available.sequence, 5)
self.assertEqual(ts.contract_item.name, 'contract_item')
self.assertEqual(ts.contract_item.value, 'Bn(2,1)')
self.assertEqual(ts.contract_item.sequence, 6)
self.assertEqual(ts.sales_category.name, 'sales_category')
self.assertEqual(ts.sales_category.value, 'Fn')
self.assertEqual(ts.sales_category.sequence, 7)
self.assertEqual(ts.gl_category.name, 'gl_category')
self.assertEqual(ts.gl_category.value, 'Rn$(5,1)')
self.assertEqual(ts.gl_category.sequence, 8)
self.assertEqual(ts.warehouse_category.name, 'warehouse_category')
self.assertEqual(ts.warehouse_category.value, 'Sn$(6,1)')
self.assertEqual(ts.warehouse_category.sequence, 9)
self.assertEqual(ts.inv_units.name, 'inv_units')
self.assertEqual(ts.inv_units.value, 'Qn$(7,2)')
self.assertEqual(ts.inv_units.sequence, 10)
pass
if StdlibEnumMeta is not None:
def test_stdlib_inheritence(self):
self.assertTrue(isinstance(self.Season, StdlibEnumMeta))
self.assertTrue(issubclass(self.Season, StdlibEnum))
def test_multiple_mixin(self):
class MaxMixin(object):
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin(object):
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
_order_ = 'RED GREEN BLUE'
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
_order_ = 'RED GREEN BLUE'
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue', '%r is not %r' % (str(Color.BLUE), 'blue'))
class Color(StrMixin, MaxMixin, Enum):
_order_ = 'RED GREEN BLUE'
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue', '%r is not %r' % (str(Color.BLUE), 'blue'))
class CoolColor(StrMixin, SomeEnum, Enum):
_order_ = 'RED GREEN BLUE'
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue', '%r is not %r' % (str(Color.BLUE), 'blue'))
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
_order_ = 'RED GREEN BLUE'
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue', '%r is not %r' % (str(Color.BLUE), 'blue'))
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
_order_ = 'RED GREEN BLUE'
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue', '%r is not %r' % (str(Color.BLUE), 'blue'))
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
_order_ = 'RED GREEN BLUE'
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue', '%r is not %r' % (str(Color.BLUE), 'blue'))
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
_order_ = 'RED GREEN BLUE'
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue', '%r is not %r' % (str(Color.BLUE), 'blue'))
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_enum_of_types(self):
"""Support using Enum to refer to types deliberately."""
class MyTypes(Enum):
i = int
f = float
s = str
self.assertEqual(MyTypes.i.value, int)
self.assertEqual(MyTypes.f.value, float)
self.assertEqual(MyTypes.s.value, str)
class Foo:
pass
class Bar:
pass
class MyTypes2(Enum):
a = Foo
b = Bar
self.assertEqual(MyTypes2.a.value, Foo)
self.assertEqual(MyTypes2.b.value, Bar)
class SpamEnumNotInner:
pass
class SpamEnum(Enum):
spam = SpamEnumNotInner
self.assertEqual(SpamEnum.spam.value, SpamEnumNotInner)
if pyver < 3.0:
def test_nested_classes_in_enum_do_become_members(self):
# manually set __qualname__ to remove testing framework noise
class Outer(Enum):
_order_ = 'a b Inner'
__qualname__ = "Outer"
a = 1
b = 2
class Inner(Enum):
__qualname__ = "Outer.Inner"
foo = 10
bar = 11
self.assertTrue(isinstance(Outer.Inner, Outer))
self.assertEqual(Outer.a.value, 1)
self.assertEqual(Outer.Inner.value.foo.value, 10)
self.assertEqual(
list(Outer.Inner.value),
[Outer.Inner.value.foo, Outer.Inner.value.bar],
)
self.assertEqual(
list(Outer),
[Outer.a, Outer.b, Outer.Inner],
)
def test_really_nested_classes_in_enum_do_become_members(self):
class Outer(Enum):
_order_ = 'a b Inner'
a = 1
b = 2
class Inner(Enum):
foo = 10
bar = 11
self.assertTrue(isinstance(Outer.Inner, Outer))
self.assertEqual(Outer.a.value, 1)
self.assertEqual(Outer.Inner.value.foo.value, 10)
self.assertEqual(
list(Outer.Inner.value),
[Outer.Inner.value.foo, Outer.Inner.value.bar],
)
self.assertEqual(
list(Outer),
[Outer.a, Outer.b, Outer.Inner],
)
def test_nested_classes_in_enum_are_skipped_with_skip(self):
"""Support locally-defined nested classes using @skip"""
# manually set __qualname__ to remove testing framework noise
class Outer(Enum):
__qualname__ = "Outer"
a = 1
b = 2
@skip
class Inner(Enum):
__qualname__ = "Outer.Inner"
foo = 10
bar = 11
self.assertTrue(isinstance(Outer.Inner, type))
self.assertEqual(Outer.a.value, 1)
self.assertEqual(Outer.Inner.foo.value, 10)
self.assertEqual(
list(Outer.Inner),
[Outer.Inner.foo, Outer.Inner.bar],
)
self.assertEqual(
list(Outer),
[Outer.a, Outer.b],
)
def test_really_nested_classes_in_enum_are_skipped_with_skip(self):
"""Support locally-defined nested classes using @skip"""
class Outer(Enum):
a = 1
b = 2
@skip
class Inner(Enum):
foo = 10
bar = 11
self.assertTrue(isinstance(Outer.Inner, type))
self.assertEqual(Outer.a.value, 1)
self.assertEqual(Outer.Inner.foo.value, 10)
self.assertEqual(
list(Outer.Inner),
[Outer.Inner.foo, Outer.Inner.bar],
)
self.assertEqual(
list(Outer),
[Outer.a, Outer.b],
)
def test_enum_call_without_arg(self):
class Color(Enum):
black = 0
red = 1
green = 2
blue = 3
#
@classmethod
def _missing_value_(cls, value):
if value is no_arg:
return cls.black
self.assertTrue(Color.red is Color(1))
self.assertTrue(Color.black is Color())
def test_init_subclass(self):
class MyEnum(Enum):
def __init_subclass__(cls, **kwds):
super(MyEnum, cls).__init_subclass__(**kwds)
self.assertFalse(cls.__dict__.get('_test', False))
cls._test1 = 'MyEnum'
#
class TheirEnum(MyEnum):
def __init_subclass__(cls, **kwds):
super(TheirEnum, cls).__init_subclass__(**kwds)
cls._test2 = 'TheirEnum'
class WhoseEnum(TheirEnum):
def __init_subclass__(cls, **kwds):
pass
class NoEnum(WhoseEnum):
ONE = 1
self.assertEqual(TheirEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test2'], 'TheirEnum')
self.assertFalse(NoEnum.__dict__.get('_test1', False))
self.assertFalse(NoEnum.__dict__.get('_test2', False))
#
class OurEnum(MyEnum):
def __init_subclass__(cls, **kwds):
cls._test2 = 'OurEnum'
class WhereEnum(OurEnum):
def __init_subclass__(cls, **kwds):
pass
class NeverEnum(WhereEnum):
ONE = 'one'
self.assertEqual(OurEnum.__dict__['_test1'], 'MyEnum')
self.assertFalse(WhereEnum.__dict__.get('_test1', False))
self.assertEqual(WhereEnum.__dict__['_test2'], 'OurEnum')
self.assertFalse(NeverEnum.__dict__.get('_test1', False))
self.assertFalse(NeverEnum.__dict__.get('_test2', False))
class TestStrEnum(TestCase):
def test_set_name(self):
class Descriptor(object):
name = None
def __get__(self, instance, owner_class=None):
if instance is None:
return self
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
instance.__dict__[self.name] = value
def __set_name__(self, owner, name):
self.name = name
#
class AnEnum(Enum):
ONE = 'one'
two = Descriptor()
#
self.assertEqual(list(AnEnum), [AnEnum.ONE])
self.assertEqual(AnEnum.two.name, 'two')
AnEnum.ONE.two = 'three'
self.assertEqual(AnEnum.ONE.two, 'three')
self.assertEqual(AnEnum.ONE.__dict__['two'], 'three')
def test_private_names(self):
class Private(Enum):
__corporal = 'Radar'
__major_ = 'Hoolihan'
self.assertEqual(len(Private), 0)
self.assertEqual(Private._Private__corporal, 'Radar')
self.assertFalse(isinstance(Private._Private__corporal, Enum))
self.assertEqual(Private._Private__major_, 'Hoolihan')
self.assertFalse(isinstance(Private._Private__major_, Enum))
def test_strenum_inherited_methods(self):
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
self.assertEqual(phy.pi.upper(), 'PI')
self.assertEqual(phy.tau.count('a'), 1)
def test_strict_strenum(self):
with self.assertRaisesRegex(TypeError, r'too many arguments for str'):
class Huh(StrEnum):
huh = 'this', 'is', 'too', 'many'
for uhoh in (object, object(), [], Enum, 9):
with self.assertRaisesRegex(TypeError, r'values must be str'):
class Huh(StrEnum):
huh = uhoh
#
class Either(StrEnum):
_order_ = 'this that Those lower upper'
this = auto()
that = 'That'
Those = auto()
lower = 'lower'
upper = 'UPPER'
self.assertEqual([m.value for m in Either], ['this', 'That', 'those', 'lower', 'UPPER'])
#
with self.assertRaisesRegex(ValueError, r' is not lower-case'):
class Huh(LowerStrEnum):
huh = 'What'
#
class Lower(LowerStrEnum):
_order_ = 'this that Those lower upper'
this = auto()
that = 'that'
Those = auto()
lower = 'lower'
upper = 'upper'
self.assertEqual([m.value for m in Lower], ['this', 'that', 'those', 'lower', 'upper'])
#
with self.assertRaisesRegex(ValueError, r' is not upper-case'):
class Huh(UpperStrEnum):
huh = 'What'
#
class Upper(UpperStrEnum):
_order_ = 'this that Those lower upper'
this = auto()
that = 'THAT'
Those = auto()
lower = 'LOWER'
upper = 'UPPER'
self.assertEqual([m.value for m in Upper], ['THIS', 'THAT', 'THOSE', 'LOWER', 'UPPER'])
def test_init_subclass(self):
class MyEnum(StrEnum):
def __init_subclass__(cls, **kwds):
super(MyEnum, cls).__init_subclass__(**kwds)
self.assertFalse(cls.__dict__.get('_test', False))
cls._test1 = 'MyEnum'
#
class TheirEnum(MyEnum):
def __init_subclass__(cls, **kwds):
super(TheirEnum, cls).__init_subclass__(**kwds)
cls._test2 = 'TheirEnum'
class WhoseEnum(TheirEnum):
def __init_subclass__(cls, **kwds):
pass
| |
121.4*m.x682 == 0)
m.c1643 = Constraint(expr= m.x143 - m.x148 + 728.4*m.x673 + 364.2*m.x678 + 121.4*m.x683 == 0)
m.c1644 = Constraint(expr= m.x144 - m.x149 + 728.4*m.x674 + 364.2*m.x679 + 121.4*m.x684 == 0)
m.c1645 = Constraint(expr= m.x145 - m.x150 + 728.4*m.x675 + 364.2*m.x680 + 121.4*m.x685 == 0)
m.c1646 = Constraint(expr= m.x146 - m.x151 + 728.4*m.x686 + 364.2*m.x691 + 121.4*m.x696 == 0)
m.c1647 = Constraint(expr= m.x147 - m.x152 + 728.4*m.x687 + 364.2*m.x692 + 121.4*m.x697 == 0)
m.c1648 = Constraint(expr= m.x148 - m.x153 + 728.4*m.x688 + 364.2*m.x693 + 121.4*m.x698 == 0)
m.c1649 = Constraint(expr= m.x149 - m.x154 + 728.4*m.x689 + 364.2*m.x694 + 121.4*m.x699 == 0)
m.c1650 = Constraint(expr= m.x150 - m.x155 + 728.4*m.x690 + 364.2*m.x695 + 121.4*m.x700 == 0)
m.c1651 = Constraint(expr= m.x151 - m.x156 + 728.4*m.x701 + 364.2*m.x706 + 121.4*m.x711 == 0)
m.c1652 = Constraint(expr= m.x152 - m.x157 + 728.4*m.x702 + 364.2*m.x707 + 121.4*m.x712 == 0)
m.c1653 = Constraint(expr= m.x153 - m.x158 + 728.4*m.x703 + 364.2*m.x708 + 121.4*m.x713 == 0)
m.c1654 = Constraint(expr= m.x154 - m.x159 + 728.4*m.x704 + 364.2*m.x709 + 121.4*m.x714 == 0)
m.c1655 = Constraint(expr= m.x155 - m.x160 + 728.4*m.x705 + 364.2*m.x710 + 121.4*m.x715 == 0)
m.c1656 = Constraint(expr= m.x156 - m.x161 + 728.4*m.x716 + 364.2*m.x721 + 121.4*m.x726 == 0)
m.c1657 = Constraint(expr= m.x157 - m.x162 + 728.4*m.x717 + 364.2*m.x722 + 121.4*m.x727 == 0)
m.c1658 = Constraint(expr= m.x158 - m.x163 + 728.4*m.x718 + 364.2*m.x723 + 121.4*m.x728 == 0)
m.c1659 = Constraint(expr= m.x159 - m.x164 + 728.4*m.x719 + 364.2*m.x724 + 121.4*m.x729 == 0)
m.c1660 = Constraint(expr= m.x160 - m.x165 + 728.4*m.x720 + 364.2*m.x725 + 121.4*m.x730 == 0)
m.c1661 = Constraint(expr= m.x161 - m.x166 + 728.4*m.x731 + 364.2*m.x736 + 121.4*m.x741 == 0)
m.c1662 = Constraint(expr= m.x162 - m.x167 + 728.4*m.x732 + 364.2*m.x737 + 121.4*m.x742 == 0)
m.c1663 = Constraint(expr= m.x163 - m.x168 + 728.4*m.x733 + 364.2*m.x738 + 121.4*m.x743 == 0)
m.c1664 = Constraint(expr= m.x164 - m.x169 + 728.4*m.x734 + 364.2*m.x739 + 121.4*m.x744 == 0)
m.c1665 = Constraint(expr= m.x165 - m.x170 + 728.4*m.x735 + 364.2*m.x740 + 121.4*m.x745 == 0)
m.c1666 = Constraint(expr= m.x166 - m.x171 + 728.4*m.x746 + 364.2*m.x751 + 121.4*m.x756 == 0)
m.c1667 = Constraint(expr= m.x167 - m.x172 + 728.4*m.x747 + 364.2*m.x752 + 121.4*m.x757 == 0)
m.c1668 = Constraint(expr= m.x168 - m.x173 + 728.4*m.x748 + 364.2*m.x753 + 121.4*m.x758 == 0)
m.c1669 = Constraint(expr= m.x169 - m.x174 + 728.4*m.x749 + 364.2*m.x754 + 121.4*m.x759 == 0)
m.c1670 = Constraint(expr= m.x170 - m.x175 + 728.4*m.x750 + 364.2*m.x755 + 121.4*m.x760 == 0)
m.c1671 = Constraint(expr= m.x171 - m.x176 + 728.4*m.x761 + 364.2*m.x766 + 121.4*m.x771 == 0)
m.c1672 = Constraint(expr= m.x172 - m.x177 + 728.4*m.x762 + 364.2*m.x767 + 121.4*m.x772 == 0)
m.c1673 = Constraint(expr= m.x173 - m.x178 + 728.4*m.x763 + 364.2*m.x768 + 121.4*m.x773 == 0)
m.c1674 = Constraint(expr= m.x174 - m.x179 + 728.4*m.x764 + 364.2*m.x769 + 121.4*m.x774 == 0)
m.c1675 = Constraint(expr= m.x175 - m.x180 + 728.4*m.x765 + 364.2*m.x770 + 121.4*m.x775 == 0)
m.c1676 = Constraint(expr= m.x176 - m.x181 + 728.4*m.x776 + 364.2*m.x781 + 121.4*m.x786 == 0)
m.c1677 = Constraint(expr= m.x177 - m.x182 + 728.4*m.x777 + 364.2*m.x782 + 121.4*m.x787 == 0)
m.c1678 = Constraint(expr= m.x178 - m.x183 + 728.4*m.x778 + 364.2*m.x783 + 121.4*m.x788 == 0)
m.c1679 = Constraint(expr= m.x179 - m.x184 + 728.4*m.x779 + 364.2*m.x784 + 121.4*m.x789 == 0)
m.c1680 = Constraint(expr= m.x180 - m.x185 + 728.4*m.x780 + 364.2*m.x785 + 121.4*m.x790 == 0)
m.c1681 = Constraint(expr= m.x181 - m.x186 + 728.4*m.x791 + 364.2*m.x796 + 121.4*m.x801 == 0)
m.c1682 = Constraint(expr= m.x182 - m.x187 + 728.4*m.x792 + 364.2*m.x797 + 121.4*m.x802 == 0)
m.c1683 = Constraint(expr= m.x183 - m.x188 + 728.4*m.x793 + 364.2*m.x798 + 121.4*m.x803 == 0)
m.c1684 = Constraint(expr= m.x184 - m.x189 + 728.4*m.x794 + 364.2*m.x799 + 121.4*m.x804 == 0)
m.c1685 = Constraint(expr= m.x185 - m.x190 + 728.4*m.x795 + 364.2*m.x800 + 121.4*m.x805 == 0)
m.c1686 = Constraint(expr= m.x186 - m.x191 + 728.4*m.x806 + 364.2*m.x811 + 121.4*m.x816 == 0)
m.c1687 = Constraint(expr= m.x187 - m.x192 + 728.4*m.x807 + 364.2*m.x812 + 121.4*m.x817 == 0)
m.c1688 = Constraint(expr= m.x188 - m.x193 + 728.4*m.x808 + 364.2*m.x813 + 121.4*m.x818 == 0)
m.c1689 = Constraint(expr= m.x189 - m.x194 + 728.4*m.x809 + 364.2*m.x814 + 121.4*m.x819 == 0)
m.c1690 = Constraint(expr= m.x190 - m.x195 + 728.4*m.x810 + 364.2*m.x815 + 121.4*m.x820 == 0)
m.c1691 = Constraint(expr= m.x191 - m.x196 + 728.4*m.x821 + 364.2*m.x826 + 121.4*m.x831 == 0)
m.c1692 = Constraint(expr= m.x192 - m.x197 + 728.4*m.x822 + 364.2*m.x827 + 121.4*m.x832 == 0)
m.c1693 = Constraint(expr= m.x193 - m.x198 + 728.4*m.x823 + 364.2*m.x828 + 121.4*m.x833 == 0)
m.c1694 = Constraint(expr= m.x194 - m.x199 + 728.4*m.x824 + 364.2*m.x829 + 121.4*m.x834 == 0)
m.c1695 = Constraint(expr= m.x195 - m.x200 + 728.4*m.x825 + 364.2*m.x830 + 121.4*m.x835 == 0)
m.c1696 = Constraint(expr= m.x196 - m.x201 + 728.4*m.x836 + 364.2*m.x841 + 121.4*m.x846 == 0)
m.c1697 = Constraint(expr= m.x197 - m.x202 + 728.4*m.x837 + 364.2*m.x842 + 121.4*m.x847 == 0)
m.c1698 = Constraint(expr= m.x198 - m.x203 + 728.4*m.x838 + 364.2*m.x843 + 121.4*m.x848 == 0)
m.c1699 = Constraint(expr= m.x199 - m.x204 + 728.4*m.x839 + 364.2*m.x844 + 121.4*m.x849 == 0)
m.c1700 = Constraint(expr= m.x200 - m.x205 + 728.4*m.x840 + 364.2*m.x845 + 121.4*m.x850 == 0)
m.c1701 = Constraint(expr= m.x201 - m.x206 + 728.4*m.x851 + 364.2*m.x856 + 121.4*m.x861 == 0)
m.c1702 = Constraint(expr= m.x202 - m.x207 + 728.4*m.x852 + 364.2*m.x857 + 121.4*m.x862 == 0)
m.c1703 = Constraint(expr= m.x203 - m.x208 + 728.4*m.x853 + 364.2*m.x858 + 121.4*m.x863 == 0)
m.c1704 = Constraint(expr= m.x204 - m.x209 + 728.4*m.x854 + 364.2*m.x859 + 121.4*m.x864 == 0)
m.c1705 = Constraint(expr= m.x205 - m.x210 + 728.4*m.x855 + 364.2*m.x860 + 121.4*m.x865 == 0)
m.c1706 = Constraint(expr= m.x206 - m.x211 + 728.4*m.x866 + 364.2*m.x871 + 121.4*m.x876 == 0)
m.c1707 = Constraint(expr= m.x207 - m.x212 + 728.4*m.x867 + 364.2*m.x872 + 121.4*m.x877 == 0)
m.c1708 = Constraint(expr= m.x208 - m.x213 + 728.4*m.x868 + 364.2*m.x873 + 121.4*m.x878 == 0)
m.c1709 = Constraint(expr= m.x209 - m.x214 + 728.4*m.x869 + 364.2*m.x874 + 121.4*m.x879 == 0)
m.c1710 = Constraint(expr= m.x210 - m.x215 + 728.4*m.x870 + 364.2*m.x875 + 121.4*m.x880 == 0)
m.c1711 = Constraint(expr= m.x211 - m.x216 + 728.4*m.x881 + 364.2*m.x886 + 121.4*m.x891 == 0)
m.c1712 = Constraint(expr= m.x212 - m.x217 + 728.4*m.x882 + 364.2*m.x887 + 121.4*m.x892 == 0)
m.c1713 = Constraint(expr= m.x213 - m.x218 + 728.4*m.x883 + 364.2*m.x888 + 121.4*m.x893 == 0)
m.c1714 = Constraint(expr= m.x214 - m.x219 + 728.4*m.x884 + 364.2*m.x889 + 121.4*m.x894 == 0)
m.c1715 = Constraint(expr= m.x215 - m.x220 + 728.4*m.x885 + 364.2*m.x890 + 121.4*m.x895 == 0)
m.c1716 = Constraint(expr= m.x216 - m.x221 + 728.4*m.x896 + 364.2*m.x901 + 121.4*m.x906 == 0)
m.c1717 = Constraint(expr= m.x217 - m.x222 + 728.4*m.x897 + 364.2*m.x902 + 121.4*m.x907 == 0)
m.c1718 = Constraint(expr= m.x218 - m.x223 + 728.4*m.x898 + 364.2*m.x903 + 121.4*m.x908 == 0)
m.c1719 = Constraint(expr= m.x219 - m.x224 + 728.4*m.x899 + 364.2*m.x904 + 121.4*m.x909 == 0)
m.c1720 = Constraint(expr= m.x220 - m.x225 + 728.4*m.x900 + 364.2*m.x905 + 121.4*m.x910 == 0)
m.c1721 = Constraint(expr= m.x221 - m.x226 + 728.4*m.x911 + 364.2*m.x916 + 121.4*m.x921 == 0)
m.c1722 = Constraint(expr= m.x222 - m.x227 + 728.4*m.x912 + 364.2*m.x917 + 121.4*m.x922 == 0)
m.c1723 = Constraint(expr= m.x223 - m.x228 + 728.4*m.x913 + 364.2*m.x918 + 121.4*m.x923 == 0)
m.c1724 = Constraint(expr= m.x224 - m.x229 + 728.4*m.x914 + 364.2*m.x919 + 121.4*m.x924 == 0)
m.c1725 = Constraint(expr= m.x225 - m.x230 + 728.4*m.x915 + 364.2*m.x920 + 121.4*m.x925 == 0)
m.c1726 = Constraint(expr= m.x226 - m.x231 + 728.4*m.x926 + 364.2*m.x931 + 121.4*m.x936 == 0)
m.c1727 = Constraint(expr= m.x227 - m.x232 + 728.4*m.x927 + 364.2*m.x932 + 121.4*m.x937 == 0)
m.c1728 = Constraint(expr= m.x228 - m.x233 + 728.4*m.x928 + 364.2*m.x933 + 121.4*m.x938 == 0)
m.c1729 = Constraint(expr= m.x229 - m.x234 + 728.4*m.x929 + 364.2*m.x934 + 121.4*m.x939 == 0)
m.c1730 = Constraint(expr= m.x230 - m.x235 + 728.4*m.x930 + 364.2*m.x935 + 121.4*m.x940 == 0)
m.c1731 = Constraint(expr= m.x231 - m.x236 + 728.4*m.x941 + 364.2*m.x946 + 121.4*m.x951 == 0)
m.c1732 = Constraint(expr= m.x232 - m.x237 + 728.4*m.x942 + 364.2*m.x947 + 121.4*m.x952 == 0)
m.c1733 = Constraint(expr= m.x233 - m.x238 + 728.4*m.x943 + 364.2*m.x948 + 121.4*m.x953 == 0)
m.c1734 = Constraint(expr= m.x234 - m.x239 + 728.4*m.x944 + 364.2*m.x949 + 121.4*m.x954 == 0)
m.c1735 = Constraint(expr= m.x235 - m.x240 + 728.4*m.x945 + 364.2*m.x950 + 121.4*m.x955 == 0)
m.c1736 = Constraint(expr= m.x236 - m.x241 + 728.4*m.x956 + 364.2*m.x961 + 121.4*m.x966 == 0)
m.c1737 = Constraint(expr= m.x237 - m.x242 + 728.4*m.x957 + 364.2*m.x962 + 121.4*m.x967 == 0)
m.c1738 = Constraint(expr= m.x238 - m.x243 + 728.4*m.x958 + 364.2*m.x963 + 121.4*m.x968 == 0)
m.c1739 = Constraint(expr= m.x239 - m.x244 + 728.4*m.x959 + 364.2*m.x964 + 121.4*m.x969 == 0)
m.c1740 = Constraint(expr= m.x240 - m.x245 + 728.4*m.x960 + 364.2*m.x965 + 121.4*m.x970 == 0)
m.c1741 = Constraint(expr= m.x241 | |
'black'
`markerfacecolor` = 'white'
`marker` = 'o',
`markersize` = 4.0.
Returns
-------
:c:`matplotlib.axes.Axes`
The axes that the :a:`nodes` were plotted on.
Examples
--------
>>> # initialize a mesh and plot the nodes
>>> import matplotlib.pyplot as plt
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D('test mesh')
>>> msh.add_vertices([[0, 0], [0, 1], [1, 1], [1, 0]])
>>> msh.insert_boundary_vertices(0, [0, 1, 2, 3])
>>> rock = vcfempy.materials.Material('rock', color='xkcd:greenish')
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh, msh.boundary_vertices,
... rock, 'rock region')
>>> msh.mesh_scale = 0.2
>>> msh.mesh_rand = 0.2
>>> msh.generate_mesh()
>>> fig = plt.figure()
>>> ax = msh.plot_mesh()
>>> ax = msh.plot_vertices()
>>> ax = msh.plot_nodes()
>>> xmin, xmax, ymin, ymax = ax.axis('equal')
>>> xtext = ax.set_xlabel('x')
>>> ytext = ax.set_ylabel('y')
>>> ttext = ax.set_title('PolyMesh2D Nodes Test Plot')
>>> plt.savefig('PolyMesh2D_nodes_test_plot.png')
"""
if ax is None or not isinstance(ax, plt.Axes):
ax = plt.gca()
if 'linewidth' not in kwargs.keys():
kwargs['linewidth'] = 0.0
if 'markeredgecolor' not in kwargs.keys():
kwargs['markeredgecolor'] = 'black'
if 'markerfacecolor' not in kwargs.keys():
kwargs['markerfacecolor'] = 'white'
if 'marker' not in kwargs.keys():
kwargs['marker'] = 'o'
if 'markersize' not in kwargs.keys():
kwargs['markersize'] = 4.0
ax.plot(self.nodes[:, 0], self.nodes[:, 1], **kwargs)
return ax
class MaterialRegion2D():
"""A class for defining material regions and their attributes for meshes
generated by a :c:`PolyMesh2D`..
Parameters
----------
mesh : :c:`PolyMesh2D`
The parent mesh. Sets :a:`mesh`.
vertices : list[int], optional
Initial list of vertices defining the :c:`MaterialRegion2D`. Passed
to :m:`insert_vertices`.
material : :c:`vcfempy.materials.Material`, optional
The material type of the :c:`MaterialRegion2D`. Sets :a:`material`.
name : str, optional
A descriptive name for the :c:`MaterialRegion2D`. If not provided,
will be set to a default 'Unnamed Material Region {`k`}' where `k` is
a counter for how many :c:`MaterialRegion2D` have been created.
Other Parameters
----------------
add_to_mesh : bool, optional, default=True
Flag for whether to add the :c:`MaterialRegion2D` to its parent mesh.
This is done by default when the :c:`MaterialRegion2D` is created.
Examples
--------
>>> # initialize a mesh, no material regions added
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D('test mesh')
>>> msh.add_vertices([[0, 0], [0, 1], [1, 1], [1, 0]])
>>> msh.insert_boundary_vertices(0, [0, 1, 2, 3])
>>> print(msh.num_material_regions)
0
>>> # create a material region, this will add it to its parent mesh
>>> import vcfempy.materials
>>> rock_material = vcfempy.materials.Material('rock material')
>>> rock_region = vcfempy.meshgen.MaterialRegion2D(msh, [0, 1, 2, 3],
... rock_material,
... 'rock region')
>>> print(msh.num_material_regions)
1
>>> print(rock_region in msh.material_regions)
True
>>> print(rock_region.name)
rock region
>>> print(rock_region.material.name)
rock material
>>> print(rock_region.vertices)
[0, 1, 2, 3]
>>> # generate a mesh, then change material region material
>>> # this clears the mesh
>>> msh.mesh_scale = 0.4
>>> msh.add_seed_points([0.5, 0.5])
>>> msh.generate_mesh()
>>> print(msh.mesh_valid)
True
>>> rock_region.material = None
>>> print(rock_region.material)
None
>>> print(msh.mesh_valid)
False
>>> # regenerate the mesh, then change the material region vertices
>>> # this also clears the mesh
>>> # note that the material region need not be fully inside the
>>> # mesh boundaries
>>> msh.generate_mesh()
>>> print(msh.mesh_valid)
True
>>> msh.add_vertices([0.5, 1.5])
>>> print(msh.mesh_valid)
True
>>> rock_region.insert_vertices(2, 4)
>>> print(rock_region.vertices)
[0, 1, 4, 2, 3]
>>> print(msh.mesh_valid)
False
"""
_num_created = 0
def __init__(self, mesh, vertices=None, material=None, name=None,
add_to_mesh=True):
if not isinstance(mesh, PolyMesh2D):
raise TypeError('type(mesh) must be vcfempy.meshgen.PolyMesh2D')
self._mesh = mesh
if add_to_mesh:
self.mesh.add_material_region(self)
if name is None:
name = ('Unnamed Material Region '
+ f'{MaterialRegion2D._num_created}')
self.name = name
MaterialRegion2D._num_created += 1
self._vertices = []
self.insert_vertices(0, vertices)
self.material = material
@property
def name(self):
"""A descriptive name for the :c:`MaterialRegion2D`.
Parameters
----------
name : str
The name of the :c:`MaterialRegion2D`. Will be cast to `str`
regardless of type.
Returns
-------
`str`
The :a:`name` of the :c:`MaterialRegion2D`.
Examples
--------
>>> # create a blank material region without a name (reset counter)
>>> import vcfempy.meshgen
>>> vcfempy.meshgen.MaterialRegion2D._num_created = 0
>>> msh = vcfempy.meshgen.PolyMesh2D()
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh)
>>> print(mr.name)
Unnamed Material Region 0
>>> # setting the name
>>> mr.name = 'Rock region'
>>> print(mr.name)
Rock region
>>> # changing the name property to non-str
>>> # will be cast to str
>>> mr.name = 1
>>> print(mr.name)
1
>>> print(type(mr.name).__name__)
str
>>> # initialize a material region with a name
>>> mr = vcfempy.meshgen.MaterialRegion2D(mesh=msh, name='new region')
>>> print(mr.name)
new region
>>> # initialize another material region without a name
>>> # notice that the "Unnamed" counter increases for every region
>>> # created (including those that were assigned an initial name)
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh)
>>> print(mr.name)
Unnamed Material Region 2
"""
return self._name
@name.setter
def name(self, name):
self._name = str(name)
@property
def mesh(self):
"""The parent :c:`PolyMesh2D` of the :c:`MaterialRegion2D`.
Returns
-------
:c:`PolyMesh2D`
The parent mesh object
Note
----
This property is immutable to ensure connection between a
:c:`PolyMesh2D` and a :c:`MaterialRegion2D`.
Examples
--------
>>> # create a mesh and a material region
>>> # note that creating the material region requires a parent mesh
>>> # and the material region will add itself to the list of parent
>>> # mesh material regions by default
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D('test mesh')
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh)
>>> print(mr.mesh.name)
test mesh
>>> print(mr in msh.material_regions)
True
>>> # try to set parent mesh (immutable)
>>> new_mesh = vcfempy.meshgen.PolyMesh2D()
>>> mr.mesh = new_mesh
Traceback (most recent call last):
...
AttributeError: can't set attribute
"""
return self._mesh
@property
def num_vertices(self):
"""Number of vertices defining the :c:`MaterialRegion2D` geometry.
Returns
-------
`int`
The number of :a:`vertices` in the :c:`MaterialRegion2D`.
Examples
--------
>>> # creating a material region, no initial vertices provided
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D()
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh)
>>> print(mr.num_vertices)
0
>>> # creating a material region, providing initial vertices
>>> # these are indices referencing vertices in the parent mesh
>>> new_verts = [[0, 0], [0, 1], [1, 1], [1, 0]]
>>> msh.add_vertices(new_verts)
>>> mr.insert_vertices(0, [k for k, _ in enumerate(new_verts)])
>>> print(mr.num_vertices)
4
>>> # add a vertex and check num_vertices
>>> msh.add_vertices([1.5, 0.5])
>>> mr.insert_vertices(3, 4)
>>> print(mr.num_vertices)
5
"""
return len(self.vertices)
@property
def vertices(self):
"""List of vertex indices defining the boundary of the
:c:`MaterialRegion2D`.
Returns
-------
`list[int]`
The list of vertex indices referencing :a:`PolyMesh2D.vertices`
of :a:`mesh`.
Examples
--------
>>> # creating a material region, no initial vertices provided
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D()
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh)
>>> print(mr.num_vertices)
0
>>> # creating a material region, providing initial vertices
>>> # these are indices referencing vertices in the parent mesh
>>> new_verts = [[0, 0], [0, 1], [1, 1], [1, 0]]
>>> msh.add_vertices(new_verts)
>>> mr.insert_vertices(0, [k for k, _ in enumerate(new_verts)])
>>> print(mr.vertices)
[0, 1, 2, 3]
>>> print(msh.vertices[mr.vertices, :])
[[0. 0.]
[0. 1.]
[1. 1.]
[1. 0.]]
>>> # add a vertex and check vertices
>>> msh.add_vertices([1.5, 0.5])
>>> mr.insert_vertices(3, 4)
>>> print(mr.vertices)
[0, 1, 2, 4, 3]
>>> print(msh.vertices[mr.vertices, :])
[[0. 0. ]
[0. 1. ]
[1. 1. ]
[1.5 0.5]
[1. 0. ]]
"""
return self._vertices
@property
def material(self):
"""The :c:`vcfempy.materials.Material` assigned to the
:c:`MaterialRegion2D`.
Parameters
----------
material : None | :c:`vcfempy.materials.Material`
The material type to assign to the :c:`MaterialRegion2D`.
Returns
-------
``None`` | :c:`vcfempy.materials.Material`
The material type assigned to the :c:`MaterialRegion2D`.
Raises
------
TypeError
If **material** is not ``None`` or a
:c:`vcfempy.materials.Material`.
Examples
--------
>>> # create a material region, no material type assigned
>>> import vcfempy.materials
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D()
>>> msh.add_vertices([[0, 0], [0, 1], [1, 1], [1, 0]])
>>> msh.insert_boundary_vertices(0, [0, 1, 2, 3])
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh, msh.boundary_vertices,
... name='rock region')
>>> print(mr in msh.material_regions)
True
>>> print(mr.material)
None
>>> # assign a material type to the material region
>>> rock = vcfempy.materials.Material('rock')
>>> mr.material = rock
>>> print(mr.material.name)
rock
>>> # changing material type of a material region resets the mesh
>>> msh.mesh_scale = 0.4
>>> msh.add_seed_points([0.5, 0.5])
>>> msh.generate_mesh()
>>> print(msh.mesh_valid)
True
>>> mr.material = None
>>> print(mr.material)
None
>>> print(msh.mesh_valid)
False
>>> # try to assign invalid materials to a material region
>>> mr.material = 1
Traceback (most recent call | |
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
"""
* Copyright (C) 2021 <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
"""
import requests, json, sys
import xml.dom.minidom
import datetime
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
def getconfigelement(path):
try:
tree = config
for node in path.split('/'):
tree = tree.getElementsByTagName(node)
if len(tree) == 0: return False
tree = tree[0]
return tree.firstChild.nodeValue
except: return False
def vpnapi():
key = getconfigelement('OPNsense/ipcheck/vpnapikey')
apikey= "?key="+key if key else ""
if ipv4:
try:
url = Request("https://vpnapi.io/api/"+ipv4+apikey)
url.add_header('User-Agent', 'Mozilla/5.0')
out['ipv4']['vpnapi'] = json.load(urlopen(url, timeout=4))
if debug:
out['ipv4']['vpnapi']['apikey'] = key
m = out['ipv4']['vpnapi'].get('message')
out['ipv4']['vpnapi']['status'] = m if m else "ok"
except:
pass
if ipv6:
try:
url = Request("https://vpnapi.io/api/"+ipv6+apikey)
url.add_header('User-Agent', 'Mozilla/5.0')
out['ipv6']['vpnapi'] = json.load(urlopen(url, timeout=4))
if debug:
out['ipv6']['vpnapi']['apikey'] = key
m = out['ipv6']['vpnapi'].get('message')
out['ipv6']['vpnapi']['status'] = m if m else "ok"
except:
pass
return
def proxycheck():
key = getconfigelement('OPNsense/ipcheck/proxycheckkey')
apikey= 'key='+key+'&' if key else ""
if ipv4:
try:
url = Request("http://proxycheck.io/v2/"+ipv4+"?"+apikey+"vpn=1&asn=1&risk=1&port=1&seen=1")
url.add_header('User-Agent', 'Mozilla/5.0')
ret = json.load(urlopen(url, timeout=4))
out['ipv4']['proxycheck']={}
out['ipv4']['proxycheck']['status'] = ret['status']
out['ipv4']['proxycheck'] = ret[ipv4]
if debug: out['ipv4']['proxycheck']['apikey'] = key
except:
pass
if ipv6:
try:
url = Request("http://proxycheck.io/v2/"+ipv6+"?"+apikey+"vpn=1&asn=1&risk=1&port=1&seen=1")
url.add_header('User-Agent', 'Mozilla/5.0')
ret = json.load(urlopen(url, timeout=4))
out['ipv6']['proxycheck']={}
out['ipv6']['proxycheck']['status'] = ret['status']
out['ipv6']['proxycheck'] = ret[ipv6]
if debug: out['ipv6']['proxycheck']['apikey'] = key
except:
pass
return
def ip2loc():
key = getconfigelement('OPNsense/ipcheck/ip2lockey')
apikey= key if key else "demo"
if ipv4:
try:
url = Request("https://api.ip2location.com/v2/?ip="+ipv4+"&key="+apikey+"&format=json&package=WS8")
out['ipv4']['ip2location'] = json.load(urlopen(url, timeout=4))
out['ipv4']['ip2location']['apikey'] = key
except:
pass
if ipv6:
try:
pass
url = Request("https://api.ip2location.com/v2/?ip="+ipv6+"&key="+apikey+"&format=json&package=WS8")
out['ipv6']['ip2location'] = json.load(urlopen(url, timeout=4))
out['ipv6']['ip2location']['apikey'] = key
except:
pass
return
def ip2proxy():
key = getconfigelement('OPNsense/ipcheck/ip2proxykey')
apikey= key if key else "demo"
if ipv4:
try:
url = Request("https://api.ip2proxy.com/?ip="+ipv4+"&key="+apikey+"&package=PX10")
out['ipv4']['ip2proxy'] = json.load(urlopen(url, timeout=4))
out['ipv4']['ip2proxy']['apikey'] = key
except:
pass
if ipv6:
try:
url = Request("https://api.ip2proxy.com/?ip="+ipv6+"&key="+apikey+"&package=PX10")
out['ipv6']['ip2proxy'] = json.load(urlopen(url, timeout=4))
out['ipv6']['ip2proxy']['apikey'] = key
except:
pass
return
def onionoo():
if ipv4:
try:
out['ipv4']['onionoo']={}
url = Request("https://onionoo.torproject.org/details?limit=4&search="+ipv4)
ret = json.load(urlopen(url, timeout=4))
if len(ret['relays']): out['ipv4']['onionoo']['relay'] = (ret['relays'][0])
if len(ret['bridges']): out['ipv4']['onionoo']['bridge'] = (ret['relays'][0])
except:
pass
if ipv6:
try:
out['ipv6']['onionoo']={}
url = Request("https://onionoo.torproject.org/details?limit=4&search="+ipv6)
ret = json.load(urlopen(url, timeout=4))
if len(ret['relays']): out['ipv6']['onionoo']['relay'] = (ret['relays'][0])
if len(ret['bridges']): out['ipv6']['onionoo']['bridge'] = (ret['relays'][0])
except:
pass
return
def ipqs():
key = getconfigelement('OPNsense/ipcheck/ipqskey')
apikey= key if key else "false"
if ipv4:
try:
url = Request("https://ipqualityscore.com/api/json/ip/"+apikey+"/"+ipv4+"?strictness=0")
url.add_header('User-Agent', 'Mozilla/5.0')
out['ipv4']['ipqs'] = json.load(urlopen(url, timeout=4))
if debug: out['ipv4']['ipqs']['apikey'] = key
except:
pass
if ipv6:
try:
url = Request("https://ipqualityscore.com/api/json/ip/"+apikey+"/"+ipv6+"?strictness=0")
url.add_header('User-Agent', 'Mozilla/5.0')
out['ipv6']['ipqs'] = json.load(urlopen(url, timeout=4))
if debug: out['ipv6']['ipqs']['apikey'] = key
except:
pass
return
def transform(out):
o={}
if ipv4:
o['ipv4']={}
o['ipv4']['ip'] = ipv4
net = out['ipv4'].get('vpnapi', {}).get('network', {}).get('network')
o['ipv4']['network'] = net if net else False
ntype =[out['ipv4'].get('proxycheck', {}).get('type'),
out['ipv4'].get('ip2proxy', {}).get('usageType'),
False]
if debug: o['ipv4']['network_type_list']=ntype
o['ipv4']['network_type']=next(i for i in ntype if i not in [None, ""])
isp=[out['ipv4'].get('ip2location', {}).get('isp'),
out['ipv4'].get('ip2proxy', {}).get('isp'),
out['ipv4'].get('ipqs', {}).get('ISP'),
out['ipv4'].get('proxycheck', {}).get('provider'),
out['ipv4'].get('vpnapi', {}).get('network', {}).get('autonomous_system_organization'),
False]
if debug: o['ipv4']['isp_list']=isp
o['ipv4']['isp']=next(i for i in isp if i not in [None, ""])
asn=[out['ipv4'].get('vpnapi', {}).get('network', {}).get('autonomous_system_number'),
out['ipv4'].get('proxycheck', {}).get('asn'),
out['ipv4'].get('ipqs', {}).get('ASN'),
out['ipv4'].get('ip2proxy', {}).get('asn'),
False]
if debug: o['ipv4']['asn_list']=asn
o['ipv4']['asn']=str(next(i for i in asn if i not in [None, "","0"]))
city=[out['ipv4'].get('ipqs', {}).get('city'),
out['ipv4'].get('proxycheck', {}).get('city'),
out['ipv4'].get('vpnapi', {}).get('location', {}).get('city'),
out['ipv4'].get('ip2location', {}).get('city_name'),
out['ipv4'].get('ip2proxy', {}).get('cityName'),
False]
if debug: o['ipv4']['city_list']=city
o['ipv4']['city']=next(i for i in city if i not in [None, ""])
region=[out['ipv4'].get('ipqs', {}).get('region'),
out['ipv4'].get('proxycheck', {}).get('region'),
out['ipv4'].get('ip2proxy', {}).get('regionName'),
out['ipv4'].get('ip2proxy', {}).get('regionName'),
out['ipv4'].get('vpnapi', {}).get('location', {}).get('region_code'),
False]
if debug: o['ipv4']['region_list']=region
o['ipv4']['region']=next(i for i in region if i not in [None, ""])
country=[out['ipv4'].get('ip2location', {}).get('country_name'),
out['ipv4'].get('ip2proxy', {}).get('countryName'),
out['ipv4'].get('vpnapi', {}).get('country'),
out['ipv4'].get('proxycheck', {}).get('country'),
out['ipv4'].get('vpnapi', {}).get('location', {}).get('country_code'),
out['ipv4'].get('ipqs', {}).get('country_code'),
False]
if debug: o['ipv4']['country_list']=country
o['ipv4']['country']=next(i for i in country if i not in [None, ""])
tor=['vpnapi' if out['ipv4'].get('vpnapi', {}).get('security', {}).get('tor') else False,
'ipqualityscore' if out['ipv4'].get('ipqs', {}).get('tor') else False,
'ip2proxy' if out['ipv4'].get('ip2proxy', {}).get('proxyType')=="TOR" else False,
'proxycheck' if out['ipv4'].get('proxycheck', {}).get('type')=="TOR" else False,
'onionoo' if not len(out['ipv4'].get('onionoo', {}))==0 else False,
False]
if debug: o['ipv4']['tor_list']=tor
o['ipv4']['tor']=any(tor)
tor = [i for i in tor if i != False]
o['ipv4']['tor_detected_by']=tor
vpn=['vpnapi' if out['ipv4'].get('vpnapi', {}).get('security', {}).get('vpn') else False,
'ipqs' if out['ipv4'].get('ipqs', {}).get('vpn') else False,
'ip2proxy' if out['ipv4'].get('ip2proxy', {}).get('proxyType')=="VPN" else False,
'proxycheck' if out['ipv4'].get('proxycheck', {}).get('type')=="VPN" else False,
'proxycheck' if out['ipv4'].get('proxycheck', {}).get('type')=="OPENVPN" else False,
'proxycheck' if out['ipv4'].get('proxycheck', {}).get('type')=="SOCKS" else False,
False]
if debug: o['ipv4']['vpn_list']=vpn
o['ipv4']['vpn']=any(vpn)
vpn = [i for i in vpn if i != False]
o['ipv4']['vpn_detected_by']=vpn
proxy=['vpnapi' if out['ipv4'].get('vpnapi', {}).get('security', {}).get('proxy') else False,
'ipqualityscore' if out['ipv4'].get('ipqs', {}).get('proxy') else False,
'ip2proxy' if out['ipv4'].get('ip2proxy', {}).get('proxyType')=="PUB" else False,
'ip2proxy' if out['ipv4'].get('ip2proxy', {}).get('proxyType')=="WEB" else False,
'ip2proxy' if out['ipv4'].get('ip2proxy', {}).get('proxyType')=="RES" else False,
False]
if debug: o['ipv4']['proxy_list']=proxy
o['ipv4']['proxy']=any(proxy)
proxy = [i for i in proxy if i != False]
o['ipv4']['proxy_detected_by']=proxy
o['ipv4']['ipqs_fraud_score'] = out['ipv4'].get('ipqs', {}).get('fraud_score')
o['ipv4']['proxycheck_risk'] = out['ipv4'].get('proxycheck', {}).get('risk')
# IPv6 section
if ipv6:
o['ipv6']={}
o['ipv6']['ip'] = ipv6
net = out['ipv6'].get('vpnapi', {}).get('network', {}).get('network')
o['ipv6']['network'] = net if net else False
ntype =[out['ipv6'].get('proxycheck', {}).get('type'),
out['ipv6'].get('ip2proxy', {}).get('usageType'),
False]
if debug: o['ipv6']['network_type_list']=ntype
o['ipv6']['network_type']=next(i for i in ntype if i not in [None, ""])
isp=[out['ipv6'].get('ip2location', {}).get('isp'),
out['ipv6'].get('ip2proxy', {}).get('isp'),
out['ipv6'].get('ipqs', {}).get('ISP'),
out['ipv6'].get('proxycheck', {}).get('provider'),
out['ipv6'].get('vpnapi', {}).get('network', {}).get('autonomous_system_organization'),
False]
if debug: o['ipv6']['isp_list']=isp
o['ipv6']['isp']=next(i for i in isp if i not in [None, ""])
asn=[out['ipv6'].get('vpnapi', {}).get('network', {}).get('autonomous_system_number'),
out['ipv6'].get('ip2proxy', {}).get('asn'),
out['ipv6'].get('ipqs', {}).get('ASN'),
out['ipv6'].get('proxycheck', {}).get('asn'),
False]
if debug: o['ipv6']['asn_list']=asn
o['ipv6']['asn']= str(next(i for i in asn if i not in [None, ""]))
city=[out['ipv6'].get('ipqs', {}).get('city'),
out['ipv6'].get('proxycheck', {}).get('city'),
out['ipv6'].get('vpnapi', {}).get('location', {}).get('city'),
out['ipv6'].get('ip2location', {}).get('city_name'),
out['ipv6'].get('ip2proxy', {}).get('cityName'),
False]
if debug: o['ipv6']['city_list']=city
o['ipv6']['city']=next(i for i in city if i not in [None, ""])
region=[out['ipv6'].get('ipqs', {}).get('region'),
out['ipv6'].get('proxycheck', {}).get('region'),
out['ipv6'].get('ip2proxy', {}).get('regionName'),
out['ipv6'].get('ip2location', {}).get('region_name'),
out['ipv6'].get('vpnapi', {}).get('location', {}).get('region_code'),
False]
if debug: o['ipv6']['region_list']=region
o['ipv6']['region']=next(i for i in region if i not in [None, ""])
country=[out['ipv6'].get('ip2location', {}).get('country_name'),
out['ipv6'].get('ip2proxy', {}).get('countryName'),
out['ipv6'].get('vpnapi', {}).get('location', {}).get('country'),
out['ipv6'].get('proxycheck', {}).get('country'),
out['ipv6'].get('vpnapi', {}).get('location', {}).get('country_code'),
out['ipv6'].get('ipqs', {}).get('country_code'),
False]
if debug: o['ipv6']['country_list']=country
o['ipv6']['country']=next(i for i in country if i not in [None, ""])
tor=['vpnapi' if out['ipv6'].get('vpnapi', {}).get('security', {}).get('tor') else False,
'ipqualityscore' if out['ipv6'].get('ipqs', {}).get('tor') else False,
'ip2proxy' if out['ipv6'].get('ip2proxy', {}).get('proxyType')=="TOR" else False,
'proxycheck' if out['ipv6'].get('proxycheck', {}).get('type')=="TOR" else False,
'onionoo' if not len(out['ipv6'].get('onionoo', {}))==0 else False,
False]
if debug: o['ipv6']['tor_list']=tor
o['ipv6']['tor']=next(i for i in tor if i not in [None, ""])
tor = [i for i in tor if i != False]
o['ipv6']['tor_detected_by']=tor
vpn=['vpnapi' if out['ipv6'].get('vpnapi', {}).get('security', {}).get('vpn') else False,
'ipqualityscore' if out['ipv6'].get('ipqs', {}).get('vpn') else False,
'ip2proxy' if out['ipv6'].get('ip2proxy', {}).get('proxyType')=="VPN" else False,
'proxycheck' if out['ipv6'].get('proxycheck', {}).get('type')=="VPN" else False,
'proxycheck' if out['ipv6'].get('proxycheck', {}).get('type')=="OPENVPN" else False,
False]
if debug: o['ipv6']['vpn_list']=vpn
o['ipv6']['vpn']=any(vpn)
vpn = [i for i in vpn if i != False]
o['ipv6']['vpn_detected_by']=vpn
proxy=['vpnapi' if out['ipv6'].get('vpnapi', {}).get('security', {}).get('proxy') else False,
'ipqualityscore' if out['ipv6'].get('ipqs', {}).get('proxy') else False,
'ip2proxy' if out['ipv6'].get('ip2proxy', {}).get('proxyType')=="PUB" else False,
'ip2proxy' if out['ipv6'].get('ip2proxy', {}).get('proxyType')=="WEB" else False,
'ip2proxy' if out['ipv6'].get('ip2proxy', {}).get('proxyType')=="RES" else False,
'proxycheck' if out['ipv6'].get('proxycheck', {}).get('type')=="SOCKS" else False,
False]
if debug: o['ipv6']['proxy_list']=proxy
o['ipv6']['proxy']=any(proxy)
proxy = [i for i in proxy if i != False]
o['ipv6']['proxy_detected_by']=proxy
o['ipv6']['ipqs_fraud_score'] = out['ipv6'].get('ipqs', {}).get('fraud_score')
o['ipv6']['proxycheck_risk'] = out['ipv6'].get('proxycheck', {}).get('risk')
return o
#############
# Main loop #
#############
debug = False
config = xml.dom.minidom.parse('/conf/config.xml')
savestate = "/usr/local/opnsense/scripts/OPNsense/ipcheck/savestate.json"
out={}
arg=''
if len(sys.argv)>1:
arg=str(sys.argv[1])
if arg == '':
try:
with open(savestate, "r") as f: out = json.loads(f.read())
out['source']='Cached record'
print(json.dumps(out, indent=3))
except:
arg = 'all'
if arg == | |
<reponame>dkamotsky/addons
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfa.seq2seq.seq2seq.beam_search_decoder."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_addons.seq2seq import attention_wrapper
from tensorflow_addons.seq2seq import beam_search_decoder, gather_tree
def test_gather_tree():
# (max_time = 3, batch_size = 2, beam_width = 3)
# create (batch_size, max_time, beam_width) matrix and transpose it
predicted_ids = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[2, 3, 4], [5, 6, 7], [8, 9, 10]]],
dtype=np.int32,
).transpose([1, 0, 2])
parent_ids = np.array(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2]], [[0, 0, 0], [1, 2, 0], [2, 1, 1]]],
dtype=np.int32,
).transpose([1, 0, 2])
# sequence_lengths is shaped (batch_size = 3)
max_sequence_lengths = [3, 3]
expected_result = np.array(
[[[2, 2, 2], [6, 5, 6], [7, 8, 9]], [[2, 4, 4], [7, 6, 6], [8, 9, 10]]]
).transpose([1, 0, 2])
res = gather_tree(
predicted_ids,
parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=11,
)
np.testing.assert_equal(expected_result, res)
def _test_gather_tree_from_array(depth_ndims=0, merged_batch_beam=False):
array = np.array(
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 4], [5, 6, 7], [8, 9, 10], [11, 12, 0]],
]
).transpose([1, 0, 2])
parent_ids = np.array(
[
[[0, 0, 0], [0, 1, 1], [2, 1, 2], [-1, -1, -1]],
[[0, 0, 0], [1, 1, 0], [2, 0, 1], [0, 1, 0]],
]
).transpose([1, 0, 2])
expected_array = np.array(
[
[[2, 2, 2], [6, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 2], [7, 5, 7], [8, 9, 8], [11, 12, 0]],
]
).transpose([1, 0, 2])
sequence_length = [[3, 3, 3], [4, 4, 3]]
array = tf.convert_to_tensor(array, dtype=tf.float32)
parent_ids = tf.convert_to_tensor(parent_ids, dtype=tf.int32)
expected_array = tf.convert_to_tensor(expected_array, dtype=tf.float32)
max_time = tf.shape(array)[0]
batch_size = tf.shape(array)[1]
beam_width = tf.shape(array)[2]
def _tile_in_depth(tensor):
# Generate higher rank tensors by concatenating tensor and
# tensor + 1.
for _ in range(depth_ndims):
tensor = tf.stack([tensor, tensor + 1], -1)
return tensor
if merged_batch_beam:
array = tf.reshape(array, [max_time, batch_size * beam_width])
expected_array = tf.reshape(expected_array, [max_time, batch_size * beam_width])
if depth_ndims > 0:
array = _tile_in_depth(array)
expected_array = _tile_in_depth(expected_array)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length
)
np.testing.assert_equal(expected_array.numpy(), sorted_array.numpy())
def test_gather_tree_from_array_scalar():
_test_gather_tree_from_array()
def test_gather_tree_from_array_1d():
_test_gather_tree_from_array(depth_ndims=1)
def test_gather_tree_from_array_1d_with_merged_batch_beam():
_test_gather_tree_from_array(depth_ndims=1, merged_batch_beam=True)
def test_gather_tree_from_array_2d():
_test_gather_tree_from_array(depth_ndims=2)
def test_gather_tree_from_array_complex_trajectory():
# Max. time = 7, batch = 1, beam = 5.
array = np.expand_dims(
np.array(
[
[[25, 12, 114, 89, 97]],
[[9, 91, 64, 11, 162]],
[[34, 34, 34, 34, 34]],
[[2, 4, 2, 2, 4]],
[[2, 3, 6, 2, 2]],
[[2, 2, 2, 3, 2]],
[[2, 2, 2, 2, 2]],
]
),
-1,
)
parent_ids = np.array(
[
[[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0]],
[[0, 1, 2, 3, 4]],
[[0, 0, 1, 2, 1]],
[[0, 1, 1, 2, 3]],
[[0, 1, 3, 1, 2]],
[[0, 1, 2, 3, 4]],
]
)
expected_array = np.expand_dims(
np.array(
[
[[25, 25, 25, 25, 25]],
[[9, 9, 91, 9, 9]],
[[34, 34, 34, 34, 34]],
[[2, 4, 2, 4, 4]],
[[2, 3, 6, 3, 6]],
[[2, 2, 2, 3, 2]],
[[2, 2, 2, 2, 2]],
]
),
-1,
)
sequence_length = [[4, 6, 4, 7, 6]]
array = tf.convert_to_tensor(array, dtype=tf.float32)
parent_ids = tf.convert_to_tensor(parent_ids, dtype=tf.int32)
expected_array = tf.convert_to_tensor(expected_array, dtype=tf.float32)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length
)
np.testing.assert_equal(expected_array.numpy(), sorted_array.numpy())
def basic_test_array_shape_dynamic_checks(
static_shape, dynamic_shape, batch_size, beam_width, is_valid=True
):
t = tf.compat.v1.placeholder_with_default(
np.random.randn(*static_shape).astype(np.float32), shape=dynamic_shape
)
batch_size = tf.constant(batch_size)
def _test_body():
beam_search_decoder._check_batch_beam(t, batch_size, beam_width)
if is_valid:
_test_body()
else:
with pytest.raises(tf.errors.InvalidArgumentError):
_test_body()
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_array_shape_dynamic_checks():
basic_test_array_shape_dynamic_checks(
(8, 4, 5, 10), (None, None, 5, 10), 4, 5, is_valid=True
)
basic_test_array_shape_dynamic_checks(
(8, 20, 10), (None, None, 10), 4, 5, is_valid=True
)
basic_test_array_shape_dynamic_checks(
(8, 21, 10), (None, None, 10), 4, 5, is_valid=False
)
basic_test_array_shape_dynamic_checks(
(8, 4, 6, 10), (None, None, None, 10), 4, 5, is_valid=False
)
basic_test_array_shape_dynamic_checks((8, 4), (None, None), 4, 5, is_valid=False)
def test_array_shape_static_checks():
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([None, None, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([15, None, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([16, None, None]), 3, 5
)
is False
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([3, 5, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([3, 6, None]), 3, 5
)
is False
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([5, 3, None]), 3, 5
)
is False
)
def test_eos_masking():
probs = tf.constant(
[
[
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.3, -0.3, -0.3, 3, 0],
[5, 6, 0, 0, 0],
],
[[-0.2, -0.2, -0.2, -0.2, 0], [-0.3, -0.3, -0.1, 3, 0], [5, 6, 3, 0, 0],],
]
)
eos_token = 0
previously_finished = np.array([[0, 1, 0], [0, 1, 1]], dtype=bool)
masked = beam_search_decoder._mask_probs(probs, eos_token, previously_finished)
masked = masked.numpy()
np.testing.assert_equal(probs[0][0], masked[0][0])
np.testing.assert_equal(probs[0][2], masked[0][2])
np.testing.assert_equal(probs[1][0], masked[1][0])
np.testing.assert_equal(masked[0][1][0], 0)
np.testing.assert_equal(masked[1][1][0], 0)
np.testing.assert_equal(masked[1][2][0], 0)
for i in range(1, 5):
np.testing.assert_allclose(masked[0][1][i], np.finfo("float32").min)
np.testing.assert_allclose(masked[1][1][i], np.finfo("float32").min)
np.testing.assert_allclose(masked[1][2][i], np.finfo("float32").min)
def test_beam_step():
batch_size = 2
beam_width = 3
vocab_size = 5
end_token = 0
length_penalty_weight = 0.6
coverage_penalty_weight = 0.0
dummy_cell_state = tf.zeros([batch_size, beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=tf.nn.log_softmax(tf.ones([batch_size, beam_width])),
lengths=tf.constant(2, shape=[batch_size, beam_width], dtype=tf.int64),
finished=tf.zeros([batch_size, beam_width], dtype=tf.bool),
accumulated_attention_probs=(),
)
logits_ = np.full([batch_size, beam_width, vocab_size], 0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 2.7
logits_[1, 2, 2] = 10.0
logits_[1, 2, 3] = 0.2
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=tf.convert_to_tensor(batch_size),
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
)
outputs_, next_state_, state_, log_probs_ = [
outputs,
next_beam_state,
beam_state,
log_probs,
]
np.testing.assert_equal(
outputs_.predicted_ids.numpy(), np.asanyarray([[3, 3, 2], [2, 2, 1]])
)
np.testing.assert_equal(
outputs_.parent_ids.numpy(), np.asanyarray([[1, 0, 0], [2, 1, 0]])
)
np.testing.assert_equal(
next_state_.lengths.numpy(), np.asanyarray([[3, 3, 3], [3, 3, 3]])
)
np.testing.assert_equal(
next_state_.finished.numpy(),
np.asanyarray([[False, False, False], [False, False, False]]),
)
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0].numpy())
expected_log_probs.append(state_.log_probs[1].numpy())
expected_log_probs[0][0] += log_probs_[0, 1, 3]
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 2, 2]
expected_log_probs[1][1] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
np.testing.assert_equal(
next_state_.log_probs.numpy(), np.asanyarray(expected_log_probs)
)
def test_step_with_eos():
batch_size = 2
beam_width = 3
vocab_size = 5
end_token = 0
length_penalty_weight = 0.6
coverage_penalty_weight = 0.0
dummy_cell_state = tf.zeros([batch_size, beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=tf.nn.log_softmax(tf.ones([batch_size, beam_width])),
lengths=tf.convert_to_tensor([[2, 1, 2], [2, 2, 1]], dtype=tf.int64),
finished=tf.convert_to_tensor(
[[False, True, False], [False, False, True]], dtype=tf.bool
),
accumulated_attention_probs=(),
)
logits_ = np.full([batch_size, beam_width, vocab_size], 0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 5.7 # why does this not work when it's 2.7?
logits_[1, 2, 2] = 1.0
logits_[1, 2, 3] = 0.2
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=tf.convert_to_tensor(batch_size),
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
)
outputs_, next_state_, state_, log_probs_ = [
outputs,
next_beam_state,
beam_state,
log_probs,
]
np.testing.assert_equal(
outputs_.parent_ids.numpy(), np.asanyarray([[1, 0, 0], [1, 2, 0]])
)
np.testing.assert_equal(
outputs_.predicted_ids.numpy(), np.asanyarray([[0, 3, 2], [2, 0, 1]])
)
np.testing.assert_equal(
next_state_.lengths.numpy(), np.asanyarray([[1, 3, 3], [3, 1, 3]])
)
np.testing.assert_equal(
next_state_.finished.numpy(),
np.asanyarray([[True, False, False], [False, True, False]]),
)
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0].numpy())
expected_log_probs.append(state_.log_probs[1].numpy())
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
np.testing.assert_equal(
next_state_.log_probs.numpy(), np.asanyarray(expected_log_probs)
)
def test_large_beam_step():
batch_size = 2
beam_width = 8
vocab_size = 5
end_token = 0
length_penalty_weight = 0.6
coverage_penalty_weight = 0.0
def get_probs():
"""this simulates the initialize method in BeamSearchDecoder."""
log_prob_mask = tf.one_hot(
tf.zeros([batch_size], dtype=tf.int32),
depth=beam_width,
on_value=True,
off_value=False,
dtype=tf.bool,
)
log_prob_zeros = tf.zeros([batch_size, beam_width], dtype=tf.float32)
log_prob_neg_inf = tf.ones([batch_size, beam_width], dtype=tf.float32) * -np.Inf
log_probs = tf.where(log_prob_mask, log_prob_zeros, log_prob_neg_inf)
return log_probs
log_probs = get_probs()
dummy_cell_state = tf.zeros([batch_size, beam_width])
_finished | |
go.layout.Annotation(
text = f'regressor: {self.label}',
x = 0,
y = 2.1,
font = go.layout.annotation.Font(
color = '#5E5E5E',
size = 15,
),
showarrow = False,
xref = 'paper',
yref = 'paper'
)
annotations.append(annotation)"""
# draw vertical line at output value
shape = go.layout.Shape(type='line',
x0=self.sample_prediction_prob,
x1=self.sample_prediction_prob,
y0= 0,
y1= 1.1,
yref = "paper",
line=dict(
color="#A6A6A6",
width=4,
)
)
shapes.append(shape)
#draw bosplots if selected
if boxplots:
self.box_plot_shapes = []
self.box_plot_colors = [("#FF9300", "rgba(255,147,0, 0.3)"),("#70AD47", "rgba(112,173,71, 0.3)")]
for elem, c in zip(self.box_plots, self.box_plot_colors):
q1 = elem.quantile(0.25)
q2 = elem.quantile(0.75)
median = elem.quantile(0.5)
minimum = elem.min()
maximum = elem.max()
box_plot_values = [minimum, median, maximum]
for e in box_plot_values:
shape = go.layout.Shape(type='line',
x0= float(e),
x1= float(e),
y0= 1.6,
y1= 1.8,
yref = 'paper',
line=dict(
color=c[0],
width=3,
)
)
shapes.append(shape)
shape = go.layout.Shape(type='line',
x0 = float(minimum),
x1= float(q1),
y0= 1.7,
y1= 1.7,
yref = 'paper',
line=dict(
color=c[0],
width=3,
)
)
shapes.append(shape)
shape = go.layout.Shape(type='line',
x0 = float(q2),
x1= float(maximum),
y0= 1.7,
y1= 1.7,
yref = 'paper',
line=dict(
color=c[0],
width=3,
)
)
shapes.append(shape)
shape = go.layout.Shape(type='rect',
x0 = float(q1),
x1= float(q2),
y0= 1.6,
y1= 1.8,
yref = 'paper',
fillcolor = c[1],
line=dict(
color=c[0],
width=3,
)
)
shapes.append(shape)
#get visualization ordering of features while visualizing them (actual ordering of absolute SHAP values -> (feature name, visualization ordering))
self.visualization_ordering = dict()
count = 0
#visualize polygons for features with positive SHAP values
first_pos_feature = True
color_count = 0
for elem in self.pos_x_coordinates:
coords = elem[2]
self.visualization_ordering[elem[3]] = (elem[0], count)
count +=1
if elem[0] in self.sample_impacting_features_names:
if first_pos_feature:
x =[coords[1]-0.001, coords[1]-0.001, coords[3]-0.005, coords[2], coords[3]-0.005, coords[1]-0.001]
y =[0.7, 0.4, 0.4, 0.55, 0.7, 0.7]
else:
x=[coords[1]-0.006, coords[0]-0.001, coords[1]-0.006, coords[3]-0.005, coords[2], coords[3]-0.005, coords[1]-0.006]
y=[0.7, 0.55, 0.4, 0.4, 0.55, 0.7, 0.7]
else:
if first_pos_feature:
x =[coords[1]-0.001, coords[1]-0.001, coords[3], coords[2], coords[3], coords[1]-0.001]
y =[0.65, 0.45, 0.45, 0.55, 0.65, 0.65]
else:
x=[coords[1]-0.001, coords[0]-0.001, coords[1]-0.001, coords[3], coords[2], coords[3], coords[1]-0.001]
y=[0.65, 0.55, 0.45, 0.45, 0.55, 0.65, 0.65]
if color_count < len(self.red_colors):
color = self.red_colors[color_count]
else:
color = self.red_colors[-1]
text_plus = ""
if elem[0] in self.feature_value_range.keys():
text_plus = "<br>Max: " + str(self.feature_value_range[elem[0]][2]) + "<br>Average: " + str(self.feature_value_range[elem[0]][1]) + "<br>Min:" + str(self.feature_value_range[elem[0]][0])
trace = go.Scatter(x = x,
y = y,
fill = 'toself',
fillcolor = 'hsv'+ str(color),
hoveron = 'fills',
line_color = '#FFFFFF' if highlighted_feature != elem[0] else "#000000",
name = elem[0] + " = " + str(self.sample_feature_values[elem[0]]),
text= "<b>" + str(elem[0]) + " = " + str(self.sample_feature_values[elem[0]]) + "<br>SHAP value: " + "{:.3f}".format(elem[1]) + "</b><br>" + text_plus,
hoverinfo = 'text' ,
mode = 'lines',
line = dict(width = 1)
)
traces.append(trace)
first_pos_feature = False
color_count+=1
# visualize polygons for features with negative SHAP values
first_neg_feature = True
color_count = 0
for elem in self.neg_x_coordinates:
coords = elem[2]
self.visualization_ordering[elem[3]] = (elem[0], count)
count +=1
if elem[0] in self.sample_impacting_features_names:
if first_neg_feature:
x =[coords[1]+0.001, coords[1]+0.001, coords[3]+0.005, coords[2], coords[3]+0.005, coords[1]+0.001]
y =[0.7, 0.4, 0.4, 0.55, 0.7, 0.7]
else:
x=[coords[1]+0.006, coords[0]+0.001, coords[1]+0.006, coords[3]+0.005, coords[2], coords[3]+0.005, coords[1]+0.006]
y=[0.7, 0.55, 0.4, 0.4, 0.55, 0.7, 0.7]
else:
if first_neg_feature:
x=[coords[1]+0.001, coords[1]+0.001, coords[3], coords[2], coords[3], coords[1]+0.001]
y =[0.65, 0.45, 0.45, 0.55, 0.65, 0.65]
else:
x=[coords[1]+0.001, coords[0]+0.001, coords[1]+0.001, coords[3], coords[2], coords[3], coords[1]+0.001]
y=[0.65, 0.55, 0.45, 0.45, 0.55, 0.65, 0.65]
if color_count < len(self.blue_colors):
color = self.blue_colors[color_count]
else:
color = self.blue_colors[-1]
text_plus = ""
if elem[0] in self.feature_value_range.keys():
text_plus = "<br>Max: " + str(self.feature_value_range[elem[0]][2]) + "<br>Average: " + str(self.feature_value_range[elem[0]][1]) + "<br>Min: " + str(self.feature_value_range[elem[0]][0])
trace = go.Scatter(x=x,
y=y,
fill='toself',
fillcolor= "hsv" + str(color),
hoveron = 'fills',
line_color= '#FFFFFF' if highlighted_feature != elem[0] else '#000000',
name = elem[0] + " = " + str(self.sample_feature_values[elem[0]]),
text = "<b>" + str(elem[0]) + " = " + str(self.sample_feature_values[elem[0]]) + "<br>SHAP value: " + "{:.3f}".format(elem[1]) + "</b><br>" + text_plus,
hoverinfo = 'text',
mode = 'lines',
line = dict(width = 1)
)
traces.append(trace)
first_neg_feature = False
color_count+=1
# draw max pos shap value over all samples
trace = go.Scatter(x=[self.sample_prediction_prob, self.sample_prediction_prob - self.max_pos_shap_value[1]],
y=[0.85, 0.85],
hoveron = 'points',
line_color='#FF0D57',
text= "SHAP value: " + str(self.max_pos_shap_value[1]) + "<br>feature: " + str(self.max_pos_shap_value[0])
+ "<br>sample id: " + str(self.max_pos_shap_value[2]),
hoverinfo = 'text',
mode = 'lines',
line = dict(
width = 4,
),
showlegend = False,
)
traces.append(trace)
# draw max neg shap value over all samples
trace = go.Scatter(x=[self.sample_prediction_prob, self.sample_prediction_prob - self.max_neg_shap_value[1]],
y=[0.85, 0.85],
hoveron = 'points',
line_color='#1E88E5',
text= "SHAP value: " + str(self.max_neg_shap_value[1]) + "<br>feature: " + str(self.max_neg_shap_value[0]) + "<br>sample id: "
+ str(self.max_neg_shap_value[2]),
hoverinfo = 'text',
mode = 'lines',
line = dict(
width = 4,
),
showlegend = False,
)
traces.append(trace)
#draw shap value ranges
if highlighted_feature != "":
avg_pos, avg_neg, pos_count, neg_count, intervall = self.calculate_feature_value_range(highlighted_feature)
if intervall is None:
hovertext_pos = "average positive SHAP value for "+highlighted_feature+" = " + str(self.sample_feature_values[highlighted_feature]) + "<br>number of samples: " + str(pos_count),
hovertext_neg = "average negative SHAP value for "+highlighted_feature+" = " + str(self.sample_feature_values[highlighted_feature]) + "<br>number of samples: "+str(neg_count),
else:
hovertext_pos = "average positive SHAP value for "+highlighted_feature+" in " + str(intervall) + "<br>number of samples: " + str(pos_count),
hovertext_neg = "average negative SHAP value for "+highlighted_feature+ " in " + str(intervall) + "<br>number of samples: "+str(neg_count),
annotation = go.layout.Annotation(
x = self.sample_prediction_prob - avg_pos - 0.01,
y = 0.1,
xanchor = "right",
text = "|avg|↑ for " + highlighted_feature + " = " + str(self.sample_feature_values[highlighted_feature]) if intervall is None else "|avg|↑ for " + highlighted_feature + " in " + str(intervall),
showarrow = False,
font=go.layout.annotation.Font(
color = '#FF0D57',
size = 11
),
)
annotations.append(annotation)
annotation = go.layout.Annotation(
x = self.sample_prediction_prob - avg_neg + 0.01,
y = 0.1,
xanchor = "left",
text = "|avg|↓ for " + highlighted_feature + " = " + str(self.sample_feature_values[highlighted_feature]) if intervall is None else "|avg|↓ for " + highlighted_feature + " in " + str(intervall),
showarrow = False,
font=go.layout.annotation.Font(
color = '#1E88E5',
size = 11
),
)
annotations.append(annotation)
trace = go.Scatter( x=[self.sample_prediction_prob, self.sample_prediction_prob - avg_pos],
y=[0.1, 0.1],
hoveron = 'points',
line_color= "#FF0D57" ,
text=hovertext_pos,
hoverinfo = 'text',
mode = 'lines',
line = dict(
width = 4,
),
showlegend = False,
)
traces.append(trace)
trace = go.Scatter( x=[self.sample_prediction_prob, self.sample_prediction_prob - avg_neg],
y=[0.1, 0.1],
hoveron = 'points',
line_color= "#1E88E5",
text=hovertext_neg,
hoverinfo = 'text',
mode = 'lines',
line = dict(
width = 4,
),
showlegend = False,
)
traces.append(trace)
#design layout of forceplot
layout = go.Layout(
hovermode='closest',
plot_bgcolor = '#FFFFFF',
autosize=False,
width=1500,
height = 400,
legend = dict(
x = 1.05,
font= dict(
color = '#5E5E5E',
size = 10,
),
),
margin= dict(t = 250,
b = 0,
l = 0,
r = 0
),
xaxis = go.layout.XAxis(
position = 1,
side = 'top',
fixedrange = True,
range = [self.min_x_coordinate - 0.05, self.max_x_coordinate + 0.05],
showgrid = False,
ticks = 'inside',
zeroline = False,
tickcolor = '#A6A6A6',
tickfont = go.layout.xaxis.Tickfont(
color = '#A6A6A6',
),
tick0 = self.base_value,
dtick = 0.1,
nticks = 15,
tickformat = '.3f',
showline = True,
linecolor = '#A6A6A6'
),
yaxis = go.layout.YAxis(
showgrid = False,
showticklabels = False,
fixedrange = True,
range = [0, 1],
zeroline = False,
showline = False,
linecolor = '#A6A6A6',
),
annotations = annotations,
shapes = shapes
)
#create figure
fig = go.Figure(data = traces, layout = layout)
#create and add slider
steps = []
for i in range(len(self.visualization_ordering)):
step = dict(
method="update",
args=[{"visible": [elem.visible for elem in fig.data]}],
label = str(i+1)
)
for e in range (len(self.visualization_ordering)):
step["args"][0]["visible"][self.visualization_ordering[e][1]] = False
for e in range (i+1):
step["args"][0]["visible"][self.visualization_ordering[e][1]] = True
steps.append(step)
fig.update_layout(
sliders = [dict(
active=len(self.sample_shap_values)-1,
currentvalue={"prefix": "number of features shown: ",
"font": {
"size":15,
"color": '#5E5E5E',
}},
tickcolor= '#5E5E5E',
font={"color": '#5E5E5E',
"size": 10 },
ticklen= 5,
len= 0.2,
x= 0.8,
y= 2.5,
pad={"t": 0},
bordercolor = '#5E5E5E',
steps=steps,
)]
)
fig.show()
def draw_table(self):
shapes = []
traces = []
annotations = | |
<filename>Yolo/yolo2_model_base.py
# coding = utf-8
import tensorflow as tf
import tensorflow.contrib.layers as layers
from colorama import Fore
import numpy as np
import random
import Putil.np.util as npu
import Putil.tf.util as tfu
import base.logger as plog
root_logger = plog.PutilLogConfig('yolo2ModelBase').logger()
root_logger.setLevel(plog.DEBUG)
Yolo2BuildLogger = root_logger.getChild('Yolo2Build')
Yolo2BuildLogger.setLevel(plog.DEBUG)
Yolo2GenerateFeedLogger = root_logger.getChild('Yolo2GenerateFeed')
Yolo2GenerateFeedLogger.setLevel(plog.DEBUG)
StandardYolo2GenerateLogger = root_logger.getChild('StandardYolo2Generate')
StandardYolo2GenerateLogger.setLevel(plog.DEBUG)
Yolo2GenerateLogger = root_logger.getChild('Yolo2Generate')
Yolo2GenerateLogger.setLevel(plog.DEBUG)
Yolo2GenerateILogger = root_logger.getChild('Yolo2GenerateI')
Yolo2GenerateILogger.setLevel(plog.DEBUG)
assert tf.__version__ == '1.6.0', Fore.RED + 'version of tensorflow should be 1.6.0'
class Yolo2Build:
def __init__(self, net_output, class_num, prior_h, prior_w, scalar, _dtype):
self._net_output = net_output
self._class_amount = class_num
self.__check_prior(prior_h, prior_w)
self._prior_height = prior_h
self._prior_width = prior_w
self._cluster_object_count = len(prior_h)
self._scalar = scalar
self._dtype = _dtype
self._pro__ = self.GeneratePro()
self._output_tensor = self._pro__['pro']
self._anchor_pro__ = self._pro__['anchor']
self._precision_pro__ = self._pro__['precision']
self._class_pro__ = self._pro__['class']
self._y_pro__ = self._pro__['y']
self._x_pro__ = self._pro__['x']
self._h_pro__ = self._pro__['h']
self._w_pro__ = self._pro__['w']
self._output_loss__, self._place_gt_result__, self._iou_result__ = self.AppendLoss()
self._gt_one_hot_class__ = self._place_gt_result__['class']
self._gt_feed_class__ = self._place_gt_result__['feed_class']
self._gt_y_offset__ = self._place_gt_result__['y_offset']
self._gt_y__ = self._place_gt_result__['y']
self._gt_y_feed__ = self._place_gt_result__['y_feed']
self._gt_x_offset__ = self._place_gt_result__['x_offset']
self._gt_x__ = self._place_gt_result__['x']
self._gt_x_feed__ = self._place_gt_result__['x_feed']
self._gt_h__ = self._place_gt_result__['h']
self._gt_h_feed__ = self._place_gt_result__['h_feed']
self._gt_w__ = self._place_gt_result__['w']
self.__gt_w_feed__ = self._place_gt_result__['w_feed']
self._anchor_mask__ = self._place_gt_result__['anchor_mask']
self._negative_anchor_mask__ = self._place_gt_result__['negative_anchor_mask']
self._total_loss__ = self._output_loss__['total_loss']
self._anchor_loss__ = self._output_loss__['anchor_loss']
self._precision_loss__ = self._output_loss__['precision_loss']
self._class_loss__ = self._output_loss__['class_loss']
self._indicator_mean_iou = self._output_loss__['mean_iou']
self._indicator_classify_top_one_acc = self._output_loss__['classify_top_one_acc']
self._gt_iou__ = self._iou_result__
pass
@property
def IndicatorClassifyTopOneAcc(self):
return self._indicator_classify_top_one_acc
@property
def IndicatorIoU(self):
return self._indicator_mean_iou
@property
def NewOutput(self):
return self._net_output
@property
def ClassAmount(self):
return self._class_amount
@property
def PriorHeight(self):
return self._prior_height
@property
def PriorWidth(self):
return self._prior_width
@property
def ClusterObjectAmount(self):
return self._cluster_object_count
@property
def Scalar(self):
return self._scalar
@property
def Dtype(self):
return self._dtype
@property
def Pro(self):
return self._output_tensor
@property
def AnchorPro(self):
return self._anchor_pro__
@property
def PrecisionPro(self):
return self._precision_pro__
@property
def ClassPro(self):
return self._class_pro__
@property
def YPro(self):
return self._y_pro__
@property
def XPro(self):
return self._x_pro__
@property
def HPro(self):
return self._h_pro__
@property
def WPro(self):
return self._w_pro__
@property
def GtOneHotClass(self):
return self._gt_one_hot_class__
@property
def GtClassFeed(self):
return self._gt_feed_class__
@property
def GtYOffset(self):
return self._gt_y_offset__
@property
def GtY(self):
return self._gt_y__
@property
def GtYFeed(self):
return self._gt_y_feed__
@property
def GtXOffset(self):
return self._gt_x_offset__
@property
def GtX(self):
return self._gt_x__
@property
def GtxFeed(self):
return self._gt_x_feed__
@property
def GtH(self):
return self._gt_h__
@property
def GtHFeed(self):
return self._gt_h_feed__
@property
def GtW(self):
return self._gt_w__
@property
def GtWFeed(self):
return self.__gt_w_feed__
@property
def AnchorMask(self):
return self._anchor_mask__
@property
def NegativateAnchorMask(self):
return self._negative_anchor_mask__
@property
def TotalLoss(self):
return self._total_loss__
@property
def AnchorLoss(self):
return self._anchor_loss__
@property
def PrecisionLoss(self):
return self._precision_loss__
@property
def ClassLoss(self):
return self._class_loss__
@property
def GtIou(self):
return self._gt_iou__
def __check_prior(self, prior_h, prior_w):
# check failed throw exception
return True
pass
def GeneratePro(self):
return gen_pro(self._net_output, self._class_amount, self._cluster_object_count, self._dtype)
pass
def AppendLoss(self):
return append_yolo2_loss(self._pro__, self._class_amount, self._prior_height, self._prior_width, self._scalar,
self._dtype)
pass
pass
def append_yolo2_loss(
yolo2_net_feature,
class_num,
prior_h,
prior_w,
scalar,
_dtype=0.32
):
"""
:param yolo2_net_feature: feature from base net output
:param class_num: the count of the class with background
:param prior_h: prior height list or 1-D ndarray
:param prior_w: prior width list or 1-D ndarray
:param scalar: down sample scalar
:param _dtype: model parameter dtype, default 0.32
:return:
"""
assert len(prior_w) == len(prior_h), Fore.RED + 'prior height should be same length with prior width'
print(Fore.YELLOW + '-------generate yolo2 loss---------')
print(Fore.GREEN + 'class_num : ', class_num)
print(Fore.GREEN + 'prior_h : ', prior_h)
print(Fore.GREEN + 'prior_w : ', prior_w)
print(Fore.GREEN + 'scalar : ', scalar)
cluster_object_count = len(prior_w)
place_gt_result = __PlaceGT(cluster_object_count=cluster_object_count, _dtype=_dtype).Place
place_process_result = __place_process(
place_gt_result,
class_num,
prior_h,
prior_w,
scalar=scalar,
_dtype=_dtype
)
pro_result_read_result = __pro_result_reader(
split_pro_result=yolo2_net_feature)
calc_iou_result = __calc_iou(
pro_result_read_result=pro_result_read_result,
place_process_result=place_process_result,
scalar=scalar,
prior_h=prior_h,
prior_w=prior_w,
_dtype=_dtype
)
loss = __calc_loss(
split_pro_result=yolo2_net_feature,
gt_process_result=place_process_result,
calc_iou_result=calc_iou_result)
print(Fore.YELLOW + '-------generate yolo2 loss done---------')
return loss, place_process_result, calc_iou_result
# generator placeholder for total feed, designed to easy used and generate
# gt is the standard data
# 'class' : int include background and all kind of object
# 'p_mask' : set 1.0 in the cell location which has an object and set 0.0 for other
# 'n_mask' : set 1.0 in the cell location which does not contain any object and set 0.0 for other
# 'y': object center location y shift from the top left point int the cell, set 0.0 which cell does not contain object
# 'x': object center location x shift from the top left point int the cell, set 0.0 which cell does not contain object
# relationship between real (center_y, center_x, height, width) and (y_shift, x_shift, h_shift, w_shift):
class __PlaceGT:
def __init__(self, cluster_object_count, _dtype):
gt_place = dict()
dtype = tfu.tf_type(_dtype).Type
with tf.name_scope('GT'):
gt_place['class'] = tf.placeholder(dtype=tf.int32, shape=[None, None, None, cluster_object_count],
name='class')
# set 0.0 in the cell which does not contain any object except background
gt_place['y'] = tf.placeholder(dtype=dtype, shape=[None, None, None, cluster_object_count],
name='y')
gt_place['x'] = tf.placeholder(dtype=dtype, shape=[None, None, None, cluster_object_count],
name='x')
# !!!!important: because of the follow process in (__place_process), hw should not contain negative and zero
# !!!!suggest fill prior value in the cell location which does not contain any object
gt_place['h'] = tf.placeholder(dtype=dtype, shape=[None, None, None, cluster_object_count],
name='h')
gt_place['w'] = tf.placeholder(dtype=dtype, shape=[None, None, None, cluster_object_count],
name='w')
# the mask frequently used in calc loss
gt_place['p_mask'] = tf.placeholder(dtype=dtype, shape=[None, None, None, 1], name='p_mask')
gt_place['n_mask'] = tf.placeholder(dtype=dtype, shape=[None, None, None, 1], name='n_mask')
# avoid learning illegal anchor
gt_place['anchor_mask'] = tf.placeholder(
dtype=dtype, shape=[None, None, None, cluster_object_count], name='anchor_mask')
pass
self._gt_place = gt_place
pass
@property
def Place(self):
return self._gt_place
def __generate(self):
return self._gt_place
pass
@property
def Class(self):
return self._gt_place['class']
@property
def Y(self):
return self._gt_place['y']
@property
def X(self):
return self._gt_place['x']
@property
def H(self):
return self._gt_place['h']
@property
def W(self):
return self._gt_place['w']
@property
def PMask(self):
return self._gt_place['p_mask']
@property
def NMask(self):
return self._gt_place['n_mask']
@property
def LegalAnchor(self):
return self._gt_place['anchor_mask']
pass
# : the pro tensor is not easy to used in calc loss, make same process in this function, this function should make
# : sure gradient can propagate directly
def __split_pro_ac(pro, class_num, cluster_object_count):
"""
to split the pro of yolo2 into several part
:param pro: the pro of gen_pro
:param class_num: class amount
:param cluster_object_count: prior anchor amount
:return:
{'pro': pro, 'anchor': anchor_pro, 'precision': precision_pro, 'class': class_pro,
'y': y_pro, 'x': x_pro, 'h': h_pro, 'w': w_pro}
pro: the input pro [y, x, h, w, precision, class_part, .., ..., ]
anchor: take all anchor and concat then[batch, cell_height, cell_width, cluster_object_count, 4(include [y, x, h, w])]
precision:[batch, cell_height, cell_width, cluster_object_count]
class:[batch * cell_height * cell_width * cluster_object_count, class_amount]
y: [batch, cell_height, cell_width, cluster_object_count]
x: [batch, cell_height, cell_width, cluster_object_count]
h: [batch, cell_height, cell_width, cluster_object_count]
w: [batch, cell_height, cell_width, cluster_object_count]
"""
with tf.name_scope('split_and_pro'):
# generate all part y x: sigmoid; h w: None; precision: sigmoid; class: part softmax
with tf.name_scope('total_split'):
with tf.name_scope('y_part'):
y_part = pro[:, :, :, 0: ((cluster_object_count - 1) * (4 + 1 + class_num) + 1): 4 + 1 + class_num]
y_pro = y_part
pass
with tf.name_scope('x_part'):
x_part = pro[:, :, :, 1: ((cluster_object_count - 1) * (4 + 1 + class_num) + 2): 4 + 1 + class_num]
x_pro = x_part
pass
with tf.name_scope('h_part'):
h_part = pro[:, :, :, 2: ((cluster_object_count - 1) * (4 + 1 + class_num) + 3): 4 + 1 + class_num]
h_pro = h_part
pass
with tf.name_scope('w_part'):
w_part = pro[:, :, :, 3: ((cluster_object_count - 1) * (4 + 1 + class_num) + 4): 4 + 1 + class_num]
w_pro = w_part
pass
with tf.name_scope('precision_part'):
precision_part = pro[:, :, :,
4: ((cluster_object_count - 1) * (4 + 1 + class_num) + 5): 4 + 1 + class_num]
precision_pro = precision_part
pass
with tf.name_scope('class_part'):
class_part = tf.reshape(pro, [-1, 4 + 1 + class_num])
class_part = class_part[:, 5::]
class_pro = class_part
pass
pass
with tf.name_scope('anchor_pro'):
anchor_pro = tf.concat(
[tf.expand_dims(y_pro, axis=-1), tf.expand_dims(x_pro, -1), tf.expand_dims(h_pro, -1),
tf.expand_dims(w_pro, -1)],
axis=-1)
return {'pro': pro, 'anchor': anchor_pro, 'precision': precision_pro, 'class': class_pro,
'y': y_pro, 'x': x_pro, 'h': h_pro, 'w': w_pro}
pass
# : this function is used to generate the standard pro in yolo-version2 network, which split into
# {'pro': pro, 'anchor': anchor_pro, 'precision': precision_pro, 'class': class_pro,
# 'y': y_pro, 'x': x_pro, 'h': h_pro, 'w': w_pro}
def gen_pro(other_new_feature, class_num, cluster_object_count, _dtype=0.32):
"""
pro = {'pro': pro, 'anchor': anchor_pro, 'precision': precision_pro, 'class': class_pro,
'y': y_pro, 'x': x_pro, 'h': h_pro, 'w': w_pro}
:param other_new_feature: base net feature
:param class_num:
:param cluster_object_count:
:return:
"""
print(Fore.YELLOW + '-----------generate yolo2 base pro---------')
print(Fore.GREEN + 'class_num : ', class_num)
print(Fore.GREEN + 'cluster_object_count : ', cluster_object_count)
feature_chanel = other_new_feature.shape.as_list()[-1]
dtype = tfu.tf_type(_dtype).Type
with tf.name_scope('yolo_pro'):
| |
<reponame>maxbeegee/yt
"""
openPMD data structures
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
# Copyright (c) 2015, <NAME> (HZDR)
# Copyright (c) 2016, <NAME> (HZDR)
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
from distutils.version import StrictVersion
from functools import reduce
from operator import mul
from os import \
path, \
listdir
from re import match
import numpy as np
from yt.data_objects.grid_patch import AMRGridPatch
from yt.data_objects.static_output import Dataset
from yt.data_objects.time_series import DatasetSeries
from yt.frontends.open_pmd.fields import OpenPMDFieldInfo
from yt.frontends.open_pmd.misc import \
is_const_component, \
get_component
from yt.funcs import setdefaultattr
from yt.geometry.grid_geometry_handler import GridIndex
from yt.utilities.file_handler import HDF5FileHandler, \
warn_h5py
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5
ompd_known_versions = [StrictVersion("1.0.0"),
StrictVersion("1.0.1"),
StrictVersion("1.1.0")]
opmd_required_attributes = ["openPMD", "basePath"]
class OpenPMDGrid(AMRGridPatch):
"""Represents chunk of data on-disk.
This defines the index and offset for every mesh and particle type.
It also defines parents and children grids. Since openPMD does not have multiple levels of refinement,
there are no parents or children for any grid.
"""
_id_offset = 0
__slots__ = ["_level_id"]
# Every particle species and mesh might have different hdf5-indices and offsets
ftypes = []
ptypes = []
findex = 0
foffset = 0
pindex = 0
poffset = 0
def __init__(self, gid, index, level=-1, fi=0, fo=0, pi=0, po=0, ft=[], pt=[]):
AMRGridPatch.__init__(self, gid, filename=index.index_filename,
index=index)
self.findex = fi
self.foffset = fo
self.pindex = pi
self.poffset = po
self.ftypes = ft
self.ptypes = pt
self.Parent = None
self.Children = []
self.Level = level
def __repr__(self):
return "OpenPMDGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
class OpenPMDHierarchy(GridIndex):
"""Defines which fields and particles are created and read from disk.
Furthermore it defines the characteristics of the grids.
"""
grid = OpenPMDGrid
def __init__(self, ds, dataset_type="openPMD"):
self.dataset_type = dataset_type
self.dataset = ds
self.index_filename = ds.parameter_filename
self.directory = path.dirname(self.index_filename)
GridIndex.__init__(self, ds, dataset_type)
def _get_particle_type_counts(self):
"""Reads the active number of particles for every species.
Returns
-------
dict
keys are ptypes
values are integer counts of the ptype
"""
result = {}
f = self.dataset._handle
bp = self.dataset.base_path
pp = self.dataset.particles_path
try:
for ptype in self.ds.particle_types_raw:
if str(ptype) == "io":
spec = list(f[bp + pp].keys())[0]
else:
spec = ptype
axis = list(f[bp + pp + "/" + spec + "/position"].keys())[0]
pos = f[bp + pp + "/" + spec + "/position/" + axis]
if is_const_component(pos):
result[ptype] = pos.attrs["shape"]
else:
result[ptype] = pos.len()
except(KeyError):
result["io"] = 0
return result
def _detect_output_fields(self):
"""Populates ``self.field_list`` with native fields (mesh and particle) on disk.
Each entry is a tuple of two strings. The first element is the on-disk fluid type or particle type.
The second element is the name of the field in yt. This string is later used for accessing the data.
Convention suggests that the on-disk fluid type should be "openPMD",
the on-disk particle type (for a single species of particles) is "io"
or (for multiple species of particles) the particle name on-disk.
"""
f = self.dataset._handle
bp = self.dataset.base_path
mp = self.dataset.meshes_path
pp = self.dataset.particles_path
mesh_fields = []
try:
meshes = f[bp + mp]
for mname in meshes.keys():
try:
mesh = meshes[mname]
for axis in mesh.keys():
mesh_fields.append(mname.replace("_", "-")
+ "_" + axis)
except AttributeError:
# This is a h5.Dataset (i.e. no axes)
mesh_fields.append(mname.replace("_", "-"))
except(KeyError, TypeError, AttributeError):
pass
self.field_list = [("openPMD", str(field)) for field in mesh_fields]
particle_fields = []
try:
particles = f[bp + pp]
for pname in particles.keys():
species = particles[pname]
for recname in species.keys():
record = species[recname]
if is_const_component(record):
# Record itself (e.g. particle_mass) is constant
particle_fields.append(pname.replace("_", "-")
+ "_" + recname.replace("_", "-"))
elif "particlePatches" not in recname:
try:
# Create a field for every axis (x,y,z) of every property (position)
# of every species (electrons)
axes = list(record.keys())
if str(recname) == "position":
recname = "positionCoarse"
for axis in axes:
particle_fields.append(pname.replace("_", "-")
+ "_" + recname.replace("_", "-")
+ "_" + axis)
except AttributeError:
# Record is a dataset, does not have axes (e.g. weighting)
particle_fields.append(pname.replace("_", "-")
+ "_" + recname.replace("_", "-"))
pass
else:
pass
if len(list(particles.keys())) > 1:
# There is more than one particle species, use the specific names as field types
self.field_list.extend(
[(str(field).split("_")[0],
("particle_" + "_".join(str(field).split("_")[1:]))) for field in particle_fields])
else:
# Only one particle species, fall back to "io"
self.field_list.extend(
[("io",
("particle_" + "_".join(str(field).split("_")[1:]))) for field in particle_fields])
except(KeyError, TypeError, AttributeError):
pass
def _count_grids(self):
"""Sets ``self.num_grids`` to be the total number of grids in the simulation.
The number of grids is determined by their respective memory footprint.
"""
f = self.dataset._handle
bp = self.dataset.base_path
mp = self.dataset.meshes_path
pp = self.dataset.particles_path
self.meshshapes = {}
self.numparts = {}
self.num_grids = 0
try:
meshes = f[bp + mp]
for mname in meshes.keys():
mesh = meshes[mname]
if type(mesh) is h5.Group:
shape = mesh[list(mesh.keys())[0]].shape
else:
shape = mesh.shape
spacing = tuple(mesh.attrs["gridSpacing"])
offset = tuple(mesh.attrs["gridGlobalOffset"])
unit_si = mesh.attrs["gridUnitSI"]
self.meshshapes[mname] = (shape, spacing, offset, unit_si)
except(KeyError, TypeError, AttributeError):
pass
try:
particles = f[bp + pp]
for pname in particles.keys():
species = particles[pname]
if "particlePatches" in species.keys():
for (patch, size) in enumerate(species["/particlePatches/numParticles"]):
self.numparts[pname + "#" + str(patch)] = size
else:
axis = list(species["/position"].keys())[0]
if is_const_component(species["/position/" + axis]):
self.numparts[pname] = species["/position/" + axis].attrs["shape"]
else:
self.numparts[pname] = species["/position/" + axis].len()
except(KeyError, TypeError, AttributeError):
pass
# Limit values per grid by resulting memory footprint
self.vpg = int(self.dataset.gridsize / 4) # 4Byte per value (f32)
# Meshes of the same size do not need separate chunks
for (shape, spacing, offset, unit_si) in set(self.meshshapes.values()):
self.num_grids += min(shape[0], int(np.ceil(reduce(mul, shape) * self.vpg**-1)))
# Same goes for particle chunks if they are not inside particlePatches
patches = {}
no_patches = {}
for (k, v) in self.numparts.items():
if "#" in k:
patches[k] = v
else:
no_patches[k] = v
for size in set(no_patches.values()):
self.num_grids += int(np.ceil(size * self.vpg**-1))
for size in patches.values():
self.num_grids += int(np.ceil(size * self.vpg**-1))
def _parse_index(self):
"""Fills each grid with appropriate properties (extent, dimensions, ...)
This calculates the properties of every OpenPMDGrid based on the total number of grids in the simulation.
The domain is divided into ``self.num_grids`` (roughly) equally sized chunks along the x-axis.
``grid_levels`` is always equal to 0 since we only have one level of refinement in openPMD.
Notes
-----
``self.grid_dimensions`` is rounded to the nearest integer. Grid edges are calculated from this dimension.
Grids with dimensions [0, 0, 0] are particle only. The others do not have any particles affiliated with them.
"""
f = self.dataset._handle
bp = self.dataset.base_path
pp = self.dataset.particles_path
self.grid_levels.flat[:] = 0
self.grids = np.empty(self.num_grids, dtype="object")
grid_index_total = 0
# Mesh grids
for mesh in set(self.meshshapes.values()):
(shape, spacing, offset, unit_si) = mesh
shape = np.asarray(shape)
spacing = np.asarray(spacing)
offset = np.asarray(offset)
# Total dimension of this grid
domain_dimension = np.asarray(shape, dtype=np.int32)
domain_dimension = np.append(domain_dimension, np.ones(3 - len(domain_dimension)))
# Number of grids of this shape
num_grids = min(shape[0], int(np.ceil(reduce(mul, shape) * self.vpg**-1)))
gle = offset * unit_si # self.dataset.domain_left_edge
gre = domain_dimension[:spacing.size] * unit_si * spacing + gle # self.dataset.domain_right_edge
gle = np.append(gle, np.zeros(3 - len(gle)))
gre = np.append(gre, np.ones(3 - len(gre)))
grid_dim_offset = np.linspace(0, domain_dimension[0], num_grids + 1, dtype=np.int32)
grid_edge_offset = grid_dim_offset * np.float(domain_dimension[0])**-1 * (gre[0] - gle[0]) + gle[0]
mesh_names = []
for (mname, mdata) in self.meshshapes.items():
if mesh == mdata:
mesh_names.append(str(mname))
prev = 0
for grid in np.arange(num_grids):
self.grid_dimensions[grid_index_total] = domain_dimension
self.grid_dimensions[grid_index_total][0] = grid_dim_offset[grid + 1] - grid_dim_offset[grid]
self.grid_left_edge[grid_index_total] = gle
self.grid_left_edge[grid_index_total][0] = grid_edge_offset[grid]
self.grid_right_edge[grid_index_total] = gre
self.grid_right_edge[grid_index_total][0] = grid_edge_offset[grid + 1]
self.grid_particle_count[grid_index_total] = 0
self.grids[grid_index_total] = self.grid(grid_index_total, self, 0,
fi=prev,
fo=self.grid_dimensions[grid_index_total][0],
ft=mesh_names)
prev += self.grid_dimensions[grid_index_total][0]
grid_index_total += 1
handled_ptypes = []
# Particle grids
for (species, count) in self.numparts.items():
if "#" in species:
# This is a particlePatch
spec = species.split("#")
patch = f[bp + pp + "/" + spec[0] + "/particlePatches"]
domain_dimension = np.ones(3, dtype=np.int32)
for (ind, axis) in enumerate(list(patch["extent"].keys())):
domain_dimension[ind] = patch["extent/" + axis][()][int(spec[1])]
num_grids = int(np.ceil(count * self.vpg**-1))
gle = []
for axis in patch["offset"].keys():
gle.append(get_component(patch, "offset/" + axis, int(spec[1]), 1)[0])
gle = np.asarray(gle)
gle = np.append(gle, np.zeros(3 - len(gle)))
gre = []
for axis in patch["extent"].keys():
gre.append(get_component(patch, "extent/" | |
<reponame>YoheiN/donkeycar
'''
utils.py
Functions that don't fit anywhere else.
'''
from io import BytesIO
import os
import glob
import socket
import zipfile
import sys
import itertools
import subprocess
import math
import random
import time
from PIL import Image
import numpy as np
'''
IMAGES
'''
def scale(im, size=128):
'''
accepts: PIL image, size of square sides
returns: PIL image scaled so sides lenght = size
'''
size = (size,size)
im.thumbnail(size, Image.ANTIALIAS)
return im
def img_to_binary(img, format='jpeg'):
'''
accepts: PIL image
returns: binary stream (used to save to database)
'''
f = BytesIO()
try:
img.save(f, format=format)
except Exception as e:
raise e
return f.getvalue()
def arr_to_binary(arr):
'''
accepts: numpy array with shape (Hight, Width, Channels)
returns: binary stream (used to save to database)
'''
img = arr_to_img(arr)
return img_to_binary(img)
def arr_to_img(arr):
'''
accepts: numpy array with shape (Hight, Width, Channels)
returns: binary stream (used to save to database)
'''
arr = np.uint8(arr)
img = Image.fromarray(arr)
return img
def img_to_arr(img):
'''
accepts: numpy array with shape (Hight, Width, Channels)
returns: binary stream (used to save to database)
'''
return np.array(img)
def binary_to_img(binary):
'''
accepts: binary file object from BytesIO
returns: PIL image
'''
if binary is None or len(binary) == 0:
return None
img = BytesIO(binary)
try:
img = Image.open(img)
return img
except:
return None
def norm_img(img):
return (img - img.mean() / np.std(img))/255.0
def create_video(img_dir_path, output_video_path):
import envoy
# Setup path to the images with telemetry.
full_path = os.path.join(img_dir_path, 'frame_*.png')
# Run ffmpeg.
command = ("""ffmpeg
-framerate 30/1
-pattern_type glob -i '%s'
-c:v libx264
-r 15
-pix_fmt yuv420p
-y
%s""" % (full_path, output_video_path))
response = envoy.run(command)
def rgb2gray(rgb):
'''
take a numpy rgb image return a new single channel image converted to greyscale
'''
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def img_crop(img_arr, top, bottom):
if bottom is 0:
end = img_arr.shape[0]
else:
end = -bottom
return img_arr[top:end,: ,:]
def normalize_and_crop(img_arr, cfg):
img_arr = img_arr.astype(np.float32) / 255.0
if cfg.ROI_CROP_TOP or cfg.ROI_CROP_BOTTOM:
img_arr = img_crop(img_arr, cfg.ROI_CROP_TOP, cfg.ROI_CROP_BOTTOM)
return img_arr
def load_scaled_image_arr(filename, cfg):
'''
load an image from the filename, and use the cfg to resize if needed
also apply cropping and normalize
'''
import donkeycar as dk
try:
img = Image.open(filename)
if img.height != cfg.IMAGE_H or img.width != cfg.IMAGE_W:
img = img.resize((cfg.IMAGE_W, cfg.IMAGE_H))
img_arr = np.array(img)
img_arr = normalize_and_crop(img_arr, cfg)
if img_arr.shape[2] == 3 and cfg.IMAGE_DEPTH == 1:
img_arr = dk.utils.rgb2gray(img_arr).reshape(cfg.IMAGE_H, cfg.IMAGE_W, 1)
except Exception as e:
print(e)
print('failed to load image:', filename)
img_arr = None
return img_arr
'''
FILES
'''
def most_recent_file(dir_path, ext=''):
'''
return the most recent file given a directory path and extension
'''
query = dir_path + '/*' + ext
newest = min(glob.iglob(query), key=os.path.getctime)
return newest
def make_dir(path):
real_path = os.path.expanduser(path)
if not os.path.exists(real_path):
os.makedirs(real_path)
return real_path
def zip_dir(dir_path, zip_path):
"""
Create and save a zipfile of a one level directory
"""
file_paths = glob.glob(dir_path + "/*") #create path to search for files.
zf = zipfile.ZipFile(zip_path, 'w')
dir_name = os.path.basename(dir_path)
for p in file_paths:
file_name = os.path.basename(p)
zf.write(p, arcname=os.path.join(dir_name, file_name))
zf.close()
return zip_path
'''
BINNING
functions to help converte between floating point numbers and categories.
'''
def clamp(n, min, max):
if n < min:
return min
if n > max:
return max
return n
def linear_bin(a, N=15, offset=1, R=2.0):
'''
create a bin of length N
map val A to range R
offset one hot bin by offset, commonly R/2
'''
a = a + offset
b = round(a / (R/(N-offset)))
arr = np.zeros(N)
b = clamp(b, 0, N - 1)
arr[int(b)] = 1
return arr
def linear_unbin(arr, N=15, offset=-1, R=2.0):
'''
preform inverse linear_bin, taking
one hot encoded arr, and get max value
rescale given R range and offset
'''
b = np.argmax(arr)
a = b *(R/(N + offset)) + offset
return a
def map_range(x, X_min, X_max, Y_min, Y_max):
'''
Linear mapping between two ranges of values
'''
X_range = X_max - X_min
Y_range = Y_max - Y_min
XY_ratio = X_range/Y_range
y = ((x-X_min) / XY_ratio + Y_min) // 1
return int(y)
'''
ANGLES
'''
def norm_deg(theta):
while theta > 360:
theta -= 360
while theta < 0:
theta += 360
return theta
DEG_TO_RAD = math.pi / 180.0
def deg2rad(theta):
return theta * DEG_TO_RAD
'''
VECTORS
'''
def dist(x1, y1, x2, y2):
return math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))
'''
NETWORKING
'''
def my_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('172.16.31.10', 1027))
return s.getsockname()[0]
'''
OTHER
'''
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
def param_gen(params):
'''
Accepts a dictionary of parameter options and returns
a list of dictionary with the permutations of the parameters.
'''
for p in itertools.product(*params.values()):
yield dict(zip(params.keys(), p ))
def run_shell_command(cmd, cwd=None, timeout=15):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
out = []
err = []
try:
proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
kill(proc.pid)
for line in proc.stdout.readlines():
out.append(line.decode())
for line in proc.stderr.readlines():
err.append(line)
return out, err, proc.pid
'''
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
'''
import signal
def kill(proc_id):
os.kill(proc_id, signal.SIGINT)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
"""
Tub management
"""
def expand_path_masks(paths):
'''
take a list of paths and expand any wildcards
returns a new list of paths fully expanded
'''
import glob
expanded_paths = []
for path in paths:
if '*' in path or '?' in path:
mask_paths = glob.glob(path)
expanded_paths += mask_paths
else:
expanded_paths.append(path)
return expanded_paths
def gather_tub_paths(cfg, tub_names=None):
'''
takes as input the configuration, and the comma seperated list of tub paths
returns a list of Tub paths
'''
if tub_names:
if type(tub_names) == list:
tub_paths = [os.path.expanduser(n) for n in tub_names]
else:
tub_paths = [os.path.expanduser(n) for n in tub_names.split(',')]
return expand_path_masks(tub_paths)
else:
paths = [os.path.join(cfg.DATA_PATH, n) for n in os.listdir(cfg.DATA_PATH)]
dir_paths = []
for p in paths:
if os.path.isdir(p):
dir_paths.append(p)
return dir_paths
def gather_tubs(cfg, tub_names):
'''
takes as input the configuration, and the comma seperated list of tub paths
returns a list of Tub objects initialized to each path
'''
from donkeycar.parts.datastore import Tub
tub_paths = gather_tub_paths(cfg, tub_names)
tubs = [Tub(p) for p in tub_paths]
return tubs
"""
Training helpers
"""
def get_image_index(fnm):
sl = os.path.basename(fnm).split('_')
return int(sl[0])
def get_record_index(fnm):
sl = os.path.basename(fnm).split('_')
return int(sl[1].split('.')[0])
def gather_records(cfg, tub_names, opts=None, verbose=False):
tubs = gather_tubs(cfg, tub_names)
records = []
for tub in tubs:
if verbose:
print(tub.path)
record_paths = tub.gather_records()
records += record_paths
return records
def get_model_by_type(model_type, cfg):
'''
given the string model_type and the configuration settings in cfg
create a Keras model and return it.
'''
from donkeycar.parts.keras import KerasRNN_LSTM, KerasBehavioral, KerasCategorical, KerasIMU, KerasLinear, Keras3D_CNN, KerasLocalizer, KerasLatent
from donkeycar.parts.tflite import TFLitePilot
if model_type is None:
model_type = cfg.DEFAULT_MODEL_TYPE
print("\"get_model_by_type\" model Type is: {}".format(model_type))
input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)
roi_crop = (cfg.ROI_CROP_TOP, cfg.ROI_CROP_BOTTOM)
if model_type == "tflite_linear":
kl = TFLitePilot()
elif model_type == "localizer" or cfg.TRAIN_LOCALIZER:
kl = KerasLocalizer(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), num_locations=cfg.NUM_LOCATIONS, input_shape=input_shape)
elif model_type == "behavior" or cfg.TRAIN_BEHAVIORS:
kl = KerasBehavioral(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), input_shape=input_shape)
elif model_type == "imu":
kl = KerasIMU(num_outputs=2, num_imu_inputs=6, input_shape=input_shape)
elif model_type == "linear":
kl = KerasLinear(input_shape=input_shape, roi_crop=roi_crop)
elif model_type == "tensorrt_linear":
# Aggressively lazy load this. This module imports pycuda.autoinit which causes a lot of unexpected things
# to happen when using TF-GPU for training.
from donkeycar.parts.tensorrt import TensorRTLinear
kl = TensorRTLinear(cfg=cfg)
elif model_type == "3d":
kl = Keras3D_CNN(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH)
elif model_type == "rnn":
kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH)
elif model_type == "categorical":
kl = KerasCategorical(input_shape=input_shape, throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE, roi_crop=roi_crop)
elif model_type == "latent":
kl = KerasLatent(input_shape=input_shape)
else:
raise Exception("unknown model type: %s" % model_type)
return kl
def get_test_img(model):
'''
query the input to see what it likes
make an image capable of using with that test model
'''
assert(len(model.inputs) > 0)
try:
count, h, w, ch = model.inputs[0].get_shape()
seq_len = 0
except Exception as e:
count, seq_len, h, w, ch = model.inputs[0].get_shape()
#generate random array in the right shape
img = np.random.rand(int(h), int(w), int(ch))
return img
def train_test_split(data_list, shuffle=True, test_size=0.2):
'''
take a list, split it into two sets while selecting a
random element in order to shuffle the results.
use the test_size to choose the split percent.
shuffle is always True, left there to be backwards compatible
'''
assert(shuffle==True)
train_data = []
target_train_size = len(data_list) * (1. - test_size)
i_sample = 0
while i_sample < target_train_size and len(data_list) > 1:
i_choice | |
m.b148*m.b435 + m.b148*m.b443 + m.b148*m.b445 + m.b148*m.b446 + m.b148*m.b464 + m.b148*m.b479 +
m.b148*m.b489 + m.b148*m.b494 + m.b148*m.b551 + m.b148*m.b564 + m.b148*m.b568 + m.b148*m.b573 +
m.b148*m.b593 + m.b148*m.b598 + m.b148*m.b601 + m.b148*m.b613 + m.b148*m.b620 + m.b148*m.b622 +
m.b148*m.b625 + m.b148*m.b643 + m.b148*m.b647 + 0.5*m.b149*m.b151 + 0.5*m.b149*m.b162 + m.b149*
m.b169 + m.b149*m.b174 + 0.5*m.b149*m.b176 + 0.5*m.b149*m.b178 + 0.5*m.b149*m.b183 + 0.5*m.b149*
m.b184 + 0.5*m.b149*m.b186 + 0.5*m.b150*m.b152 + 0.5*m.b150*m.b154 + 0.5*m.b150*m.b155 + 0.5*
m.b150*m.b158 + 0.5*m.b150*m.b161 + 0.5*m.b150*m.b163 + 0.5*m.b150*m.b164 + 0.5*m.b150*m.b165 +
m.b150*m.b166 + 0.5*m.b150*m.b180 + 0.5*m.b150*m.b181 + 0.5*m.b150*m.b252 + 0.5*m.b150*m.b253 +
0.5*m.b150*m.b265 + 0.5*m.b150*m.b298 + 0.5*m.b150*m.b300 + 0.5*m.b150*m.b318 + 0.5*m.b150*m.b334
+ 0.5*m.b150*m.b337 + 0.5*m.b150*m.b357 + 0.5*m.b150*m.b374 + 0.5*m.b150*m.b383 + 0.5*m.b150*
m.b397 + 0.5*m.b150*m.b402 + 0.5*m.b150*m.b410 + 0.5*m.b150*m.b411 + 0.5*m.b150*m.b505 + 0.5*
m.b150*m.b509 + 0.5*m.b150*m.b510 + 0.5*m.b150*m.b530 + 0.5*m.b150*m.b536 + 0.5*m.b150*m.b544 +
0.5*m.b150*m.b547 + 0.5*m.b150*m.b553 + 0.5*m.b150*m.b562 + 0.5*m.b150*m.b569 + 0.5*m.b150*m.b574
+ 0.5*m.b150*m.b576 + 0.5*m.b150*m.b583 + 0.5*m.b150*m.b586 + 0.5*m.b150*m.b591 + 0.5*m.b150*
m.b602 + 0.5*m.b150*m.b605 + 0.5*m.b150*m.b641 + 0.5*m.b150*m.b645 + 0.5*m.b150*m.b648 + 0.5*
m.b150*m.b650 + 0.5*m.b150*m.b656 + 0.5*m.b150*m.b658 + 0.5*m.b150*m.b662 + 0.5*m.b150*m.b666 +
m.b150*m.x842 + m.b151*m.b162 + 0.5*m.b151*m.b169 + 0.5*m.b151*m.b174 + m.b151*m.b184 + 0.5*
m.b151*m.b251 + 0.5*m.b151*m.b263 + 0.5*m.b151*m.b271 + 0.5*m.b151*m.b284 + m.b152*m.b154 + 0.5*
m.b152*m.b155 + 0.5*m.b152*m.b158 + 0.5*m.b152*m.b161 + 0.5*m.b152*m.b163 + 0.5*m.b152*m.b164 +
m.b152*m.b165 + 0.5*m.b152*m.b166 + 0.5*m.b152*m.b180 + 0.5*m.b152*m.b181 + 0.5*m.b152*m.b252 +
0.5*m.b152*m.b253 + 0.5*m.b152*m.b265 + 0.5*m.b152*m.b298 + 0.5*m.b152*m.b300 + 0.5*m.b152*m.b318
+ 0.5*m.b152*m.b334 + 0.5*m.b152*m.b337 + 0.5*m.b152*m.b357 + 0.5*m.b152*m.b374 + 0.5*m.b152*
m.b383 + 0.5*m.b152*m.b397 + 0.5*m.b152*m.b402 + 0.5*m.b152*m.b410 + 0.5*m.b152*m.b411 + 0.5*
m.b152*m.b431 + 0.5*m.b152*m.b505 + 0.5*m.b152*m.b507 + 0.5*m.b152*m.b509 + 0.5*m.b152*m.b510 +
0.5*m.b152*m.b519 + 0.5*m.b152*m.b530 + 0.5*m.b152*m.b533 + 0.5*m.b152*m.b536 + 0.5*m.b152*m.b544
+ 0.5*m.b152*m.b547 + 0.5*m.b152*m.b553 + 0.5*m.b152*m.b562 + 0.5*m.b152*m.b569 + 0.5*m.b152*
m.b574 + 0.5*m.b152*m.b576 + 0.5*m.b152*m.b583 + 0.5*m.b152*m.b586 + 0.5*m.b152*m.b591 + 0.5*
m.b152*m.b602 + 0.5*m.b152*m.b605 + 0.5*m.b152*m.b616 + 0.5*m.b152*m.b641 + 0.5*m.b152*m.b642 +
0.5*m.b152*m.b645 + 0.5*m.b152*m.b648 + 0.5*m.b152*m.b650 + 0.5*m.b152*m.b652 + 0.5*m.b152*m.b656
+ 0.5*m.b152*m.b658 + 0.5*m.b152*m.b662 + 0.5*m.b152*m.b666 + 0.5*m.b153*m.b156 + 0.5*m.b153*
m.b157 + 0.5*m.b153*m.b159 + 0.5*m.b153*m.b160 + m.b153*m.b167 + 0.5*m.b153*m.b168 + 0.5*m.b153*
m.b170 + m.b153*m.b171 + 0.5*m.b153*m.b172 + 0.5*m.b153*m.b173 + 0.5*m.b153*m.b175 + 0.5*m.b153*
m.b176 + 0.5*m.b153*m.b177 + 0.5*m.b153*m.b178 + 0.5*m.b153*m.b179 + 0.5*m.b153*m.b183 + 0.5*
m.b153*m.b186 + 0.5*m.b153*m.b251 + 0.5*m.b153*m.b263 + 0.5*m.b153*m.b271 + 0.5*m.b153*m.b284 +
0.5*m.b154*m.b155 + 0.5*m.b154*m.b158 + 0.5*m.b154*m.b161 + 0.5*m.b154*m.b163 + 0.5*m.b154*m.b164
+ m.b154*m.b165 + 0.5*m.b154*m.b166 + 0.5*m.b154*m.b180 + 0.5*m.b154*m.b181 + 0.5*m.b154*m.b252
+ 0.5*m.b154*m.b253 + 0.5*m.b154*m.b265 + 0.5*m.b154*m.b298 + 0.5*m.b154*m.b300 + 0.5*m.b154*
m.b318 + 0.5*m.b154*m.b334 + 0.5*m.b154*m.b337 + 0.5*m.b154*m.b357 + 0.5*m.b154*m.b374 + 0.5*
m.b154*m.b383 + 0.5*m.b154*m.b397 + 0.5*m.b154*m.b402 + 0.5*m.b154*m.b410 + 0.5*m.b154*m.b411 +
0.5*m.b154*m.b431 + 0.5*m.b154*m.b505 + 0.5*m.b154*m.b507 + 0.5*m.b154*m.b509 + 0.5*m.b154*m.b510
+ 0.5*m.b154*m.b519 + 0.5*m.b154*m.b530 + 0.5*m.b154*m.b533 + 0.5*m.b154*m.b536 + 0.5*m.b154*
m.b544 + 0.5*m.b154*m.b547 + 0.5*m.b154*m.b553 + 0.5*m.b154*m.b562 + 0.5*m.b154*m.b569 + 0.5*
m.b154*m.b574 + 0.5*m.b154*m.b576 + 0.5*m.b154*m.b583 + 0.5*m.b154*m.b586 + 0.5*m.b154*m.b591 +
0.5*m.b154*m.b602 + 0.5*m.b154*m.b605 + 0.5*m.b154*m.b616 + 0.5*m.b154*m.b641 + 0.5*m.b154*m.b642
+ 0.5*m.b154*m.b645 + 0.5*m.b154*m.b648 + 0.5*m.b154*m.b650 + 0.5*m.b154*m.b652 + 0.5*m.b154*
m.b656 + 0.5*m.b154*m.b658 + 0.5*m.b154*m.b662 + 0.5*m.b154*m.b666 + 0.5*m.b155*m.b158 + 0.5*
m.b155*m.b161 + 0.5*m.b155*m.b163 + m.b155*m.b164 + 0.5*m.b155*m.b165 + 0.5*m.b155*m.b166 +
m.b155*m.b180 + 0.5*m.b155*m.b181 + 0.5*m.b155*m.b252 + 0.5*m.b155*m.b253 + 0.5*m.b155*m.b265 +
0.5*m.b155*m.b298 + 0.5*m.b155*m.b300 + 0.5*m.b155*m.b318 + 0.5*m.b155*m.b334 + 0.5*m.b155*m.b337
+ 0.5*m.b155*m.b357 + 0.5*m.b155*m.b374 + 0.5*m.b155*m.b383 + 0.5*m.b155*m.b397 + 0.5*m.b155*
m.b402 + 0.5*m.b155*m.b410 + 0.5*m.b155*m.b411 + 0.5*m.b155*m.b505 + 0.5*m.b155*m.b509 + 0.5*
m.b155*m.b510 + 0.5*m.b155*m.b530 + 0.5*m.b155*m.b536 + 0.5*m.b155*m.b544 + 0.5*m.b155*m.b547 +
0.5*m.b155*m.b553 + 0.5*m.b155*m.b562 + 0.5*m.b155*m.b569 + 0.5*m.b155*m.b574 + 0.5*m.b155*m.b576
+ 0.5*m.b155*m.b583 + 0.5*m.b155*m.b586 + 0.5*m.b155*m.b591 + 0.5*m.b155*m.b602 + 0.5*m.b155*
m.b605 + 0.5*m.b155*m.b641 + 0.5*m.b155*m.b645 + 0.5*m.b155*m.b648 + 0.5*m.b155*m.b650 + 0.5*
m.b155*m.b656 + 0.5*m.b155*m.b658 + 0.5*m.b155*m.b662 + 0.5*m.b155*m.b666 + m.b155*m.x843 +
m.b156*m.b157 + 0.5*m.b156*m.b167 + m.b156*m.b168 + 0.5*m.b156*m.b171 + 0.5*m.b156*m.b172 +
m.b156*m.b175 + 0.5*m.b156*m.b176 + 0.5*m.b156*m.b178 + 0.5*m.b156*m.b183 + 0.5*m.b156*m.b186 +
0.5*m.b156*m.b251 + 0.5*m.b156*m.b263 + 0.5*m.b156*m.b271 + 0.5*m.b156*m.b284 + m.b156*m.x846 +
0.5*m.b157*m.b167 + m.b157*m.b168 + 0.5*m.b157*m.b171 + 0.5*m.b157*m.b172 + m.b157*m.b175 + 0.5*
m.b157*m.b176 + 0.5*m.b157*m.b178 + 0.5*m.b157*m.b183 + 0.5*m.b157*m.b186 + 0.5*m.b157*m.b251 +
0.5*m.b157*m.b263 + 0.5*m.b157*m.b271 + 0.5*m.b157*m.b284 + m.b157*m.x846 + 0.5*m.b158*m.b161 +
0.5*m.b158*m.b163 + 0.5*m.b158*m.b164 + 0.5*m.b158*m.b165 + 0.5*m.b158*m.b166 + 0.5*m.b158*m.b180
+ 0.5*m.b158*m.b181 + 0.5*m.b158*m.b252 + 0.5*m.b158*m.b253 + 0.5*m.b158*m.b265 + 0.5*m.b158*
m.b298 + 0.5*m.b158*m.b300 + 0.5*m.b158*m.b318 + 0.5*m.b158*m.b334 + 0.5*m.b158*m.b337 + 0.5*
m.b158*m.b357 + 0.5*m.b158*m.b374 + 0.5*m.b158*m.b383 + 0.5*m.b158*m.b397 + 0.5*m.b158*m.b402 +
0.5*m.b158*m.b410 + 0.5*m.b158*m.b411 + 0.5*m.b158*m.b505 + 0.5*m.b158*m.b509 + 0.5*m.b158*m.b510
+ 0.5*m.b158*m.b530 + 0.5*m.b158*m.b536 + 0.5*m.b158*m.b544 + 0.5*m.b158*m.b547 + 0.5*m.b158*
m.b553 + 0.5*m.b158*m.b562 + 0.5*m.b158*m.b569 + 0.5*m.b158*m.b574 + 0.5*m.b158*m.b576 + 0.5*
m.b158*m.b583 + 0.5*m.b158*m.b586 + 0.5*m.b158*m.b591 + 0.5*m.b158*m.b602 + 0.5*m.b158*m.b605 +
0.5*m.b158*m.b641 + 0.5*m.b158*m.b645 + 0.5*m.b158*m.b648 + 0.5*m.b158*m.b650 + 0.5*m.b158*m.b656
+ 0.5*m.b158*m.b658 + 0.5*m.b158*m.b662 + 0.5*m.b158*m.b666 + m.b158*m.x847 + 0.5*m.b159*m.b160
+ 0.5*m.b159*m.b167 + 0.5*m.b159*m.b170 + 0.5*m.b159*m.b171 + 0.5*m.b159*m.b173 + 0.5*m.b159*
m.b177 + 0.5*m.b159*m.b179 + 0.5*m.b159*m.b336 + 0.5*m.b159*m.b358 + 0.5*m.b159*m.b370 + 0.5*
m.b159*m.b382 + 0.5*m.b159*m.b387 + 0.5*m.b159*m.b389 + 0.5*m.b159*m.b403 + 0.5*m.b159*m.b416 +
0.5*m.b159*m.b422 + 0.5*m.b159*m.b425 + 0.5*m.b159*m.b426 + 0.5*m.b159*m.b437 + 0.5*m.b159*m.b468
+ 0.5*m.b159*m.b469 + 0.5*m.b159*m.b476 + 0.5*m.b159*m.b478 + 0.5*m.b159*m.b498 + 0.5*m.b159*
m.b501 + 0.5*m.b159*m.b502 + 0.5*m.b159*m.b512 + 0.5*m.b159*m.b522 + 0.5*m.b159*m.b525 + 0.5*
m.b159*m.b529 + 0.5*m.b159*m.b538 + 0.5*m.b159*m.b571 + 0.5*m.b159*m.b604 + 0.5*m.b159*m.b610 +
0.5*m.b159*m.b629 + 0.5*m.b159*m.b637 + 0.5*m.b159*m.b653 + 0.5*m.b160*m.b167 + m.b160*m.b170 +
0.5*m.b160*m.b171 + 0.5*m.b160*m.b173 + 0.5*m.b160*m.b177 + 0.5*m.b160*m.b179 + m.b161*m.b163 +
0.5*m.b161*m.b164 + 0.5*m.b161*m.b165 + 0.5*m.b161*m.b166 + 0.5*m.b161*m.b180 + m.b161*m.b181 +
0.5*m.b161*m.b252 + 0.5*m.b161*m.b253 + 0.5*m.b161*m.b265 + 0.5*m.b161*m.b298 + 0.5*m.b161*m.b300
+ 0.5*m.b161*m.b318 + 0.5*m.b161*m.b334 + 0.5*m.b161*m.b337 + 0.5*m.b161*m.b357 + 0.5*m.b161*
m.b374 + 0.5*m.b161*m.b383 + 0.5*m.b161*m.b397 + 0.5*m.b161*m.b402 + 0.5*m.b161*m.b410 + 0.5*
m.b161*m.b411 + 0.5*m.b161*m.b505 + 0.5*m.b161*m.b509 + 0.5*m.b161*m.b510 + 0.5*m.b161*m.b530 +
0.5*m.b161*m.b536 + 0.5*m.b161*m.b544 + 0.5*m.b161*m.b547 + 0.5*m.b161*m.b553 + 0.5*m.b161*m.b562
+ 0.5*m.b161*m.b569 + 0.5*m.b161*m.b574 + 0.5*m.b161*m.b576 + 0.5*m.b161*m.b583 + 0.5*m.b161*
m.b586 + 0.5*m.b161*m.b591 + 0.5*m.b161*m.b602 + 0.5*m.b161*m.b605 + 0.5*m.b161*m.b641 + 0.5*
m.b161*m.b645 + 0.5*m.b161*m.b648 + 0.5*m.b161*m.b650 + 0.5*m.b161*m.b656 + 0.5*m.b161*m.b658 +
0.5*m.b161*m.b662 + 0.5*m.b161*m.b666 + 0.5*m.b162*m.b169 + 0.5*m.b162*m.b174 + m.b162*m.b184 +
0.5*m.b162*m.b251 + 0.5*m.b162*m.b263 + 0.5*m.b162*m.b271 + 0.5*m.b162*m.b284 + 0.5*m.b163*m.b164
+ 0.5*m.b163*m.b165 + 0.5*m.b163*m.b166 + 0.5*m.b163*m.b180 + m.b163*m.b181 + 0.5*m.b163*m.b252
+ 0.5*m.b163*m.b253 + 0.5*m.b163*m.b265 + 0.5*m.b163*m.b298 + 0.5*m.b163*m.b300 + 0.5*m.b163*
m.b318 + 0.5*m.b163*m.b334 + 0.5*m.b163*m.b337 + 0.5*m.b163*m.b357 + 0.5*m.b163*m.b374 + 0.5*
m.b163*m.b383 + 0.5*m.b163*m.b397 + 0.5*m.b163*m.b402 + 0.5*m.b163*m.b410 + 0.5*m.b163*m.b411 +
0.5*m.b163*m.b505 + 0.5*m.b163*m.b509 + 0.5*m.b163*m.b510 + 0.5*m.b163*m.b530 + 0.5*m.b163*m.b536
+ 0.5*m.b163*m.b544 + 0.5*m.b163*m.b547 + 0.5*m.b163*m.b553 + 0.5*m.b163*m.b562 + 0.5*m.b163*
m.b569 + 0.5*m.b163*m.b574 + 0.5*m.b163*m.b576 + 0.5*m.b163*m.b583 + 0.5*m.b163*m.b586 + 0.5*
m.b163*m.b591 + 0.5*m.b163*m.b602 + 0.5*m.b163*m.b605 + 0.5*m.b163*m.b641 + 0.5*m.b163*m.b645 +
0.5*m.b163*m.b648 + 0.5*m.b163*m.b650 + 0.5*m.b163*m.b656 + 0.5*m.b163*m.b658 + 0.5*m.b163*m.b662
+ 0.5*m.b163*m.b666 + 0.5*m.b164*m.b165 + 0.5*m.b164*m.b166 + m.b164*m.b180 + 0.5*m.b164*m.b181
+ 0.5*m.b164*m.b252 + 0.5*m.b164*m.b253 + 0.5*m.b164*m.b265 + 0.5*m.b164*m.b298 + 0.5*m.b164*
m.b300 + 0.5*m.b164*m.b318 + 0.5*m.b164*m.b334 + 0.5*m.b164*m.b337 + 0.5*m.b164*m.b357 + 0.5*
m.b164*m.b374 + 0.5*m.b164*m.b383 + 0.5*m.b164*m.b397 + 0.5*m.b164*m.b402 + 0.5*m.b164*m.b410 +
0.5*m.b164*m.b411 + 0.5*m.b164*m.b505 + 0.5*m.b164*m.b509 + 0.5*m.b164*m.b510 + 0.5*m.b164*m.b530
+ 0.5*m.b164*m.b536 + 0.5*m.b164*m.b544 + 0.5*m.b164*m.b547 + 0.5*m.b164*m.b553 + 0.5*m.b164*
m.b562 + 0.5*m.b164*m.b569 + 0.5*m.b164*m.b574 + 0.5*m.b164*m.b576 + 0.5*m.b164*m.b583 + 0.5*
m.b164*m.b586 + 0.5*m.b164*m.b591 + 0.5*m.b164*m.b602 + 0.5*m.b164*m.b605 + 0.5*m.b164*m.b641 +
0.5*m.b164*m.b645 + 0.5*m.b164*m.b648 + 0.5*m.b164*m.b650 + 0.5*m.b164*m.b656 + 0.5*m.b164*m.b658
+ 0.5*m.b164*m.b662 + 0.5*m.b164*m.b666 + m.b164*m.x843 + 0.5*m.b165*m.b166 + 0.5*m.b165*m.b180
+ 0.5*m.b165*m.b181 + 0.5*m.b165*m.b252 + 0.5*m.b165*m.b253 + 0.5*m.b165*m.b265 + 0.5*m.b165*
m.b298 + 0.5*m.b165*m.b300 + 0.5*m.b165*m.b318 + 0.5*m.b165*m.b334 + 0.5*m.b165*m.b337 + 0.5*
m.b165*m.b357 + 0.5*m.b165*m.b374 + 0.5*m.b165*m.b383 + 0.5*m.b165*m.b397 + 0.5*m.b165*m.b402 +
0.5*m.b165*m.b410 + 0.5*m.b165*m.b411 + 0.5*m.b165*m.b431 + 0.5*m.b165*m.b505 + 0.5*m.b165*m.b507
+ 0.5*m.b165*m.b509 + 0.5*m.b165*m.b510 + 0.5*m.b165*m.b519 + 0.5*m.b165*m.b530 + 0.5*m.b165*
m.b533 + 0.5*m.b165*m.b536 + 0.5*m.b165*m.b544 + 0.5*m.b165*m.b547 + 0.5*m.b165*m.b553 + 0.5*
m.b165*m.b562 + 0.5*m.b165*m.b569 + 0.5*m.b165*m.b574 + 0.5*m.b165*m.b576 + 0.5*m.b165*m.b583 +
0.5*m.b165*m.b586 + 0.5*m.b165*m.b591 + 0.5*m.b165*m.b602 + 0.5*m.b165*m.b605 + 0.5*m.b165*m.b616
+ 0.5*m.b165*m.b641 + 0.5*m.b165*m.b642 + 0.5*m.b165*m.b645 + 0.5*m.b165*m.b648 + 0.5*m.b165*
m.b650 + 0.5*m.b165*m.b652 + 0.5*m.b165*m.b656 + 0.5*m.b165*m.b658 + 0.5*m.b165*m.b662 + 0.5*
m.b165*m.b666 + 0.5*m.b166*m.b180 + 0.5*m.b166*m.b181 + 0.5*m.b166*m.b252 + 0.5*m.b166*m.b253 +
0.5*m.b166*m.b265 + 0.5*m.b166*m.b298 + 0.5*m.b166*m.b300 + | |
#!/usr/bin/env python
########################################################################################
########################################################################################
# THIS FILE IS DEPRECATED, USE arm_moveit2.py INSTEAD
########################################################################################
########################################################################################
import sys
import os
import rospy
import moveit_commander
import moveit_msgs.msg
import moveit_msgs.srv
import geometry_msgs.msg
import std_msgs.msg
import wpi_jaco_msgs.msg
import wpi_jaco_msgs.srv
from math import pi, floor, ceil, fabs
print("*"*80)
print("THIS FILE IS DEPRECATED, USE arm_moveit2.py INSTEAD")
print("*"*80)
class ArmMoveIt:
def __init__(self, planning_frame='base_link', default_planner="RRTConnectkConfigDefault", orientation_tolerance=None):
# Make sure the moveit service is up and running
rospy.logwarn("Waiting for MoveIt! to load")
try:
rospy.wait_for_service('compute_ik')
except rospy.ROSException, e:
rospy.logerr("No moveit service detected. Exiting")
exit()
else:
rospy.loginfo("MoveIt detected: arm planner loading")
# Check if we're using the 7dof
if os.environ.get("ROBOT_NAME") == 'poli2':
is_7dof = True
else:
is_7dof = os.environ['VECTOR_HAS_KINOVA_7DOF_ARM']
# self.pose = geometry_msgs.msg.PoseStamped()
## Instantiate a RobotCommander object. This object is an interface to
## the robot as a whole.
self.robot = moveit_commander.RobotCommander()
## Instantiate a PlanningSceneInterface object. This object is an interface
## to the world surrounding the robot.
self.scene = moveit_commander.PlanningSceneInterface()
## Instantiate a MoveGroupCommander object. This object is an interface
## to one group of joints. In this case the group is the joints in the left
## arm. This interface can be used to plan and execute motions on the left
## arm.
self.group = [moveit_commander.MoveGroupCommander("arm")]
# Set orientation tolerance if provided
if orientation_tolerance is not None:
rospy.loginfo("Setting orientation tolerance to {}".format(orientation_tolerance))
self.group[0].set_goal_orientation_tolerance(orientation_tolerance)
# Set the planner
self.planner = default_planner
# Set the planning pose reference frame
self.group[0].set_pose_reference_frame(planning_frame)
# Set continuous joint names
if is_7dof:
self.continuous_joints = ['joint_1','joint_3','joint_5','joint_7']
# NOTE: order that moveit currently is configured
# ['joint_1, joint_2, joint_3, joint_4, joint_5, joint_6, joint_7']
self.continuous_joints_list = [0,2,4,6] # joints that are continous
else:
self.continuous_joints = ['shoulder_pan_joint','wrist_1_joint','wrist_2_joint','wrist_3_joint']
# NOTE: order that moveit currently is configured
# ['right_shoulder_pan_joint', 'right_shoulder_lift_joint', 'right_elbow_joint', 'right_wrist_1_joint', 'right_wrist_2_joint', 'right_wrist_3_joint']
self.continuous_joints_list = [0,3,4,5] # joints that are continous
def get_IK(self, newPose, root = None):
## from a defined newPose (geometry_msgs.msg.Pose()), retunr its correspondent joint angle(list)
rospy.wait_for_service('compute_ik')
compute_ik = rospy.ServiceProxy('compute_ik', moveit_msgs.srv.GetPositionIK)
wkPose = geometry_msgs.msg.PoseStamped()
if root is None:
wkPose.header.frame_id = self.group[0].get_planning_frame() # name:odom
else:
wkPose.header.frame_id = root
wkPose.header.stamp=rospy.Time.now()
wkPose.pose=newPose
msgs_request = moveit_msgs.msg.PositionIKRequest()
msgs_request.group_name = self.group[0].get_name() # name: arm
# msgs_request.robot_state = robot.get_current_state()
msgs_request.pose_stamped = wkPose
msgs_request.timeout.secs = 2
msgs_request.avoid_collisions = False
try:
jointAngle=compute_ik(msgs_request)
ans=list(jointAngle.solution.joint_state.position[2:9])
if jointAngle.error_code.val == -31:
print 'No IK solution'
return None
return ans
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def get_FK(self, root = 'base_link'):
rospy.wait_for_service('compute_fk')
compute_fk = rospy.ServiceProxy('compute_fk', moveit_msgs.srv.GetPositionFK)
header = std_msgs.msg.Header()
header.frame_id = root
header.stamp = rospy.Time.now()
fk_link_names = ['j2s7s300_ee_link']
robot_state = self.robot.get_current_state()
try:
reply=compute_fk(header,fk_link_names,robot_state)
return reply.pose_stamped
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def get_FK_wpi(self, joints = None):
rospy.wait_for_service('/jaco_arm/kinematics/fk')
compute_fk = rospy.ServiceProxy('/jaco_arm/kinematics/fk', wpi_jaco_msgs.srv.JacoFK)
if joints is None:
joints = [pi,pi,pi,pi,pi,pi,pi]
try:
pose=compute_fk(joints)
return pose
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def plan_targetInput(self, target, joint_flag):
'''Generic target planner that what type is specified'''
try:
if (joint_flag):
self.group[0].set_joint_value_target(self._simplify_joints(target))
else:
self.group[0].set_pose_target(target)
self.group[0].set_planner_id(self.planner)
planAns=self.group[0].plan()
return planAns
except:
print 'No plan found, see the moveit terminal for the error'
print("Unexpected error:", sys.exc_info()[0])
return None
def plan_targetInputWaypoint(self, targets, joint_flag, merged=False, current_joints=None):
'''Generic target planner that what type is specified'''
## input: list of pose (geometry_msgs.msg.Pose())
## output: plan from current pose all of the target poses
## If merge true - then a single large plan is returned
## If merge is false - then several plans in an array are returned
# Plan each pose individually and stitch together
try:
full_plan = []
points = []
current_state = self.robot.get_current_state()
if current_joints is not None:
current_state = self.set_robot_state_joint_dict(current_joints)
for target in targets:
self.group[0].set_start_state(current_state)
plan = self.plan_targetInput(target, joint_flag)
if plan is not None:
full_plan.append(plan)
if merged:
points = self.merge_points(points, plan.joint_trajectory.points)
traj = plan.joint_trajectory
current_state = self.set_robot_state_pose(traj)
else:
print 'No full plan found, see the moveit terminal for the error'
return None
if merged:
plan = full_plan[0]
plan.joint_trajectory.points = points
return plan
else:
return full_plan
except:
print 'No plan found, see the moveit terminal for the error'
print("Unexpected error:", sys.exc_info()[0])
return None
def set_robot_state_pose(self, traj):
'''Gets the current robot state pose and sets it to the joint pose'''
cur_robot_state = self.robot.get_current_state()
last_point = traj.points[-1].positions
# convert the joints to array
joints = [x for x in cur_robot_state.joint_state.position]
for i in xrange(len(traj.joint_names)):
# Find index of joint
joint_name = traj.joint_names[i]
idx = cur_robot_state.joint_state.name.index(joint_name)
joints[idx] = last_point[i]
# Set full joint tuple now
cur_robot_state.joint_state.position = joints
return cur_robot_state
def set_robot_state_joint_dict(self, joint_dict):
cur_robot_state = self.robot.get_current_state()
joints = [x for x in cur_robot_state.joint_state.position]
for joint_name in joint_dict:
idx = cur_robot_state.joint_state.name.index(joint_name)
joints[idx] = joint_dict[joint_name]
cur_robot_state.joint_state.position = joints
return cur_robot_state
def merge_points(self, points, new_points):
'''Merge two sets of points and taking into account time'''
# Check if this is the first set
if len(points) < 1:
return new_points
all_points = points
# Pull out the last time from current points
last_point_time = points[-1].time_from_start+rospy.Duration(0.1)
for point in new_points:
point.time_from_start = point.time_from_start+last_point_time
all_points = all_points + [point]
return all_points
def _simplify_angle(self, angle):
# Very simple function that makes sure the angles are between -pi and pi
if angle > pi:
while angle > pi:
angle -= 2*pi
elif angle < -pi:
while angle < -pi:
angle += 2*pi
return angle
def _simplify_joints(self, joint_dict):
# Helper function to convert a dictionary of joint values
if isinstance(joint_dict, dict):
simplified_joints = dict()
for joint in joint_dict:
# Pull out the name of the joint
joint_name = '_'.join(joint.split('_')[1::])
if joint_name in self.continuous_joints:
simplified_joints[joint] = self._simplify_angle(joint_dict[joint])
else:
simplified_joints[joint] = joint_dict[joint]
elif isinstance(joint_dict, list):
simplified_joints = []
for i in xrange(len(joint_dict)):
a = joint_dict[i]
if i in self.continuous_joints_list:
simplified_joints.append(self._simplify_angle(a))
else:
simplified_joints.append(a)
return simplified_joints
'''Older functions - left for backwards compatibility'''
def plan_jointTargetInput(self,target_joint):
## input: target joint angles (list) of the robot
## output: plan from current joint angles to the target one
try:
self.group[0].set_joint_value_target(self._simplify_joints(target_joint))
self.group[0].set_planner_id(self.planner)
planAns=self.group[0].plan()
return planAns
except:
print 'No plan found, see the moveit terminal for the error'
print("Unexpected error:", sys.exc_info()[0])
return None
def plan_poseTargetInput(self,target_pose):
## input: tart pose (geometry_msgs.msg.Pose())
## output: plan from current pose to the target one
try:
self.group[0].set_pose_target(target_pose)
self.group[0].set_planner_id(self.planner)
planAns=self.group[0].plan()
return planAns
except:
print 'No plan found, see the moveit terminal for the error'
print("Unexpected error:", sys.exc_info()[0])
return None
def box_table_scene(self) :
#Scene : add box
# after loading this object/scene, need to do "add" ==> "planning scene"
# in the rviz interface if one want to see the box
rospy.sleep(2)
self.scene.remove_world_object("table_box")
p = geometry_msgs.msg.PoseStamped()
p.header.frame_id = self.robot.get_planning_frame()
p.pose.position.x = 1.64
p.pose.position.y = 0.0
p.pose.position.z = 0.25
p.pose.orientation.w = 0.0
self.scene.add_box("table_box",p,(0.75, 1, 0.5))
rospy.sleep(5)
def wayPointIK(self, wps, numSteps = None, ik_root = None):
if numSteps is None:
numSteps = 3
jointWps = []
for i in range(0, len(wps)):
jointP = self.get_IK(wps[i], ik_root)
if jointP is None:
jointWps = None
break
jointWps.append(jointP)
return jointWps
def ask_scene_integration(arm):
# Ask the user if want to integrate a box scene
answer= input("""\n Integrate a box as a table from code ? (1 or 0)
(if 1: box can't be displaced nor resized by user, if 0: no scene (can always do add from rviz interface) ) \n""")
if answer == 1:
arm.box_table_scene()
print "\n Box inserted; to see it ==> rviz interface ==> add button==> planning scene "
return
else:
print "\n No scene added"
return
def ask_position(arm,tarPose):
#Ask the user the values of the target position
while True:
try:
inputPosition=input(""" \n Target position coord. (format: x,y,z or write -1 to take the robot current position ): """)
if inputPosition == -1:
inputPosition=tarPose.position
return inputPosition
except (ValueError,IOError,NameError):
print("\n Please, enter the coordinate in the following format: x,y,z ")
continue
else:
if len(list(inputPosition)) == 3:
poseTmp= geometry_msgs.msg.Pose()
poseTmp.position.x=inputPosition[0]
poseTmp.position.y=inputPosition[1]
poseTmp.position.z=inputPosition[2]
return poseTmp.position
else:
print("\n Please, enter the coordinate in the following format: x,y,z ")
continue
def ask_orientation(arm,tarPose):
# Ask the user the values of the target quaternion
while True:
try:
inputQuat=input(""" \n Target quaternion coordi. (format: qx,qy,qz,qw or write -1 to take the robot current quaternion ):""")
if inputQuat == -1:
inputQuat=arm.group[0].get_current_pose().pose.orientation
return | |
:math:`\mathit{h}^2` constrained to ``i / 100``. The values for
``1 <= i <= 99`` are normalized to sum to 1, and ``a[0]`` and ``a[100]``
are set to ``nan``.
Returns
-------
:class:`ndarray` of :obj:`float64`
Normalized likelihood values for :math:`\mathit{h}^2`.
"""
log_lkhd = np.zeros(101, dtype=np.float64)
log_lkhd[0], log_lkhd[100] = np.nan, np.nan
for h2 in range(1, 100):
gamma = h2 / (100.0 - h2)
log_lkhd[h2] = -self.compute_neg_log_reml(np.log(gamma))
log_lkhd -= np.max(log_lkhd[1:-1])
lkhd = np.exp(log_lkhd)
lkhd /= np.sum(lkhd[1:-1])
return lkhd
@typecheck_method(pa_t_path=str,
a_t_path=nullable(str),
partition_size=nullable(int))
def fit_alternatives(self, pa_t_path, a_t_path=None, partition_size=None):
r"""Fit and test alternative model for each augmented design matrix in parallel.
Notes
-----
The alternative model is fit using REML constrained to the value of
:math:`\gamma` set by :meth:`fit`.
The likelihood ratio test of fixed effect parameter :math:`\beta_\star`
uses (non-restricted) maximum likelihood:
.. math::
\chi^2 = 2 \log\left(\frac{
\max_{\beta_\star, \beta, \sigma^2}\mathrm{N}
(y \, | \, x_\star \beta_\star + X \beta; \sigma^2(K + \gamma^{-1}I)}
{\max_{\beta, \sigma^2} \mathrm{N}
(y \, | \, x_\star \cdot 0 + X \beta; \sigma^2(K + \gamma^{-1}I)}
\right)
The p-value is given by the tail probability under a chi-squared
distribution with one degree of freedom.
The resulting table has the following fields:
.. list-table::
:header-rows: 1
* - Field
- Type
- Value
* - `idx`
- int64
- Index of augmented design matrix.
* - `beta`
- float64
- :math:`\beta_\star`
* - `sigma_sq`
- float64
- :math:`\sigma^2`
* - `chi_sq`
- float64
- :math:`\chi^2`
* - `p_value`
- float64
- p-value
:math:`(P_r A)^T` and :math:`A^T` (if given) must have the same number
of rows (augmentations). These rows are grouped into partitions for
parallel processing. The number of partitions equals the ceiling of
``n_rows / partition_size``, and should be at least the number or cores
to make use of all cores. By default, there is one partition per row of
blocks in :math:`(P_r A)^T`. Setting the partition size to an exact
(rather than approximate) divisor or multiple of the block size reduces
superfluous shuffling of data.
The number of columns in each block matrix must be less than :math:`2^{31}`.
Warning
-------
The block matrices must be stored in row-major format, as results
from :meth:`.BlockMatrix.write` with ``force_row_major=True`` and from
:meth:`.BlockMatrix.write_from_entry_expr`. Otherwise, this method
will produce an error message.
Parameters
----------
pa_t_path: :obj:`str`
Path to block matrix :math:`(P_r A)^T` with shape :math:`(m, r)`.
Each row is a projected augmentation :math:`P_r x_\star` of :math:`P_r X`.
a_t_path: :obj:`str`, optional
Path to block matrix :math:`A^T` with shape :math:`(m, n)`.
Each row is an augmentation :math:`x_\star` of :math:`X`.
Include for low-rank inference.
partition_size: :obj:`int`, optional
Number of rows to process per partition.
Default given by block size of :math:`(P_r A)^T`.
Returns
-------
:class:`.Table`
Table of results for each augmented design matrix.
"""
from hail.table import Table
self._check_dof(self.f + 1)
if self.low_rank and a_t_path is None:
raise ValueError('model is low-rank so a_t is required.')
elif not (self.low_rank or a_t_path is None):
raise ValueError('model is full-rank so a_t must not be set.')
if self._scala_model is None:
self._set_scala_model()
backend = Env.spark_backend('LinearMixedModel.fit_alternatives')
jfs = backend.fs._jfs
if partition_size is None:
block_size = Env.hail().linalg.BlockMatrix.readMetadata(jfs, pa_t_path).blockSize()
partition_size = block_size
elif partition_size <= 0:
raise ValueError(f'partition_size must be positive, found {partition_size}')
jpa_t = Env.hail().linalg.RowMatrix.readBlockMatrix(jfs, pa_t_path, partition_size)
if a_t_path is None:
maybe_ja_t = None
else:
maybe_ja_t = Env.hail().linalg.RowMatrix.readBlockMatrix(jfs, a_t_path, partition_size)
return Table._from_java(backend._jbackend.pyFitLinearMixedModel(
self._scala_model, jpa_t, maybe_ja_t))
@typecheck_method(pa=np.ndarray, a=nullable(np.ndarray), return_pandas=bool)
def fit_alternatives_numpy(self, pa, a=None, return_pandas=False):
r"""Fit and test alternative model for each augmented design matrix.
Notes
-----
This Python-only implementation runs serially on leader (master). See
the scalable implementation :meth:`fit_alternatives` for documentation
of the returned table.
Parameters
----------
pa: :class:`ndarray`
Projected matrix :math:`P_r A` of alternatives with shape :math:`(r, m)`.
Each column is a projected augmentation :math:`P_r x_\star` of :math:`P_r X`.
a: :class:`ndarray`, optional
Matrix :math:`A` of alternatives with shape :math:`(n, m)`.
Each column is an augmentation :math:`x_\star` of :math:`X`.
Required for low-rank inference.
return_pandas: :obj:`bool`
If true, return pandas dataframe. If false, return Hail table.
Returns
-------
:class:`.Table` or :class:`.pandas.DataFrame`
Table of results for each augmented design matrix.
"""
self._check_dof(self.f + 1)
if not self._fitted:
raise Exception("null model is not fit. Run 'fit' first.")
n_cols = pa.shape[1]
assert pa.shape[0] == self.r
if self.low_rank:
assert a.shape[0] == self.n and a.shape[1] == n_cols
data = [(i,) + self._fit_alternative_numpy(pa[:, i], a[:, i]) for i in range(n_cols)]
else:
data = [(i,) + self._fit_alternative_numpy(pa[:, i], None) for i in range(n_cols)]
df = pd.DataFrame.from_records(data, columns=['idx', 'beta', 'sigma_sq', 'chi_sq', 'p_value'])
if return_pandas:
return df
else:
return Table.from_pandas(df, key='idx')
def _fit_alternative_numpy(self, pa, a):
from scipy.linalg import solve, LinAlgError
from scipy.stats.distributions import chi2
gamma = self.gamma
dpa = self._d_alt * pa
# single thread => no need to copy
ydy = self._ydy_alt
xdy = self._xdy_alt
xdx = self._xdx_alt
if self.low_rank:
xdy[0] = self.py @ dpa + gamma * (self.y @ a)
xdx[0, 0] = pa @ dpa + gamma * (a @ a)
xdx[0, 1:] = self.px.T @ dpa + gamma * (self.x.T @ a)
else:
xdy[0] = self.py @ dpa
xdx[0, 0] = pa @ dpa
xdx[0, 1:] = self.px.T @ dpa
try:
beta = solve(xdx, xdy, assume_a='pos') # only uses upper triangle
residual_sq = ydy - xdy.T @ beta
sigma_sq = residual_sq / self._dof_alt
chi_sq = self.n * np.log(self._residual_sq / residual_sq) # division => precision
p_value = chi2.sf(chi_sq, 1)
return beta[0], sigma_sq, chi_sq, p_value
except LinAlgError:
return tuple(4 * [float('nan')])
def _set_scala_model(self):
from hail.utils.java import Env
from hail.linalg import _jarray_from_ndarray, _breeze_from_ndarray
if not self._fitted:
raise Exception("null model is not fit. Run 'fit' first.")
self._scala_model = Env.hail().stats.LinearMixedModel.pyApply(
self.gamma,
self._residual_sq,
_jarray_from_ndarray(self.py),
_breeze_from_ndarray(self.px),
_jarray_from_ndarray(self._d_alt),
self._ydy_alt,
_jarray_from_ndarray(self._xdy_alt),
_breeze_from_ndarray(self._xdx_alt),
_jarray_from_ndarray(self.y) if self.low_rank else None,
_breeze_from_ndarray(self.x) if self.low_rank else None
)
def _check_dof(self, f=None):
if f is None:
f = self.f
dof = self.n - f
if dof <= 0:
raise ValueError(f"{self.n} {plural('observation', self.n)} with {f} fixed {plural('effect', f)} "
f"implies {dof} {plural('degree', dof)} of freedom. Must be positive.")
@classmethod
@typecheck_method(y=np.ndarray,
x=np.ndarray,
k=np.ndarray,
p_path=nullable(str),
overwrite=bool)
def from_kinship(cls, y, x, k, p_path=None, overwrite=False):
r"""Initializes a model from :math:`y`, :math:`X`, and :math:`K`.
Examples
--------
>>> from hail.stats import LinearMixedModel
>>> y = np.array([0.0, 1.0, 8.0, 9.0])
>>> x = np.array([[1.0, 0.0],
... [1.0, 2.0],
... [1.0, 1.0],
... [1.0, 4.0]])
>>> k = np.array([[ 1. , -0.8727875 , 0.96397335, 0.94512946],
... [-0.8727875 , 1. , -0.93036112, -0.97320323],
... [ 0.96397335, -0.93036112, 1. , 0.98294169],
... [ 0.94512946, -0.97320323, 0.98294169, 1. ]])
>>> model, p = LinearMixedModel.from_kinship(y, x, k)
>>> model.fit()
>>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK
0.2525148830695317
>>> model.s # doctest: +SKIP_OUTPUT_CHECK
array([3.83501295, 0.13540343, 0.02454114, 0.00504248])
Truncate to a rank :math:`r=2` model:
>>> r = 2
>>> s_r = model.s[:r]
>>> p_r = p[:r, :]
>>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x)
>>> model.fit()
>>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK
0.25193197591429695
Notes
-----
This method eigendecomposes :math:`K = P^T S P` on the leader (master)
and returns ``LinearMixedModel(p @ y, p @ x, s)`` and ``p``.
The performance of eigendecomposition depends critically on the number
of leader (master) cores and the NumPy / SciPy configuration, viewable
with ``np.show_config()``. For Intel machines, we recommend installing
the `MKL <https://anaconda.org/anaconda/mkl>`__ package for Anaconda.
`k` must be positive semi-definite; symmetry is not checked as only the
lower triangle is used.
Parameters
----------
y: :class:`ndarray`
:math:`n` vector of observations.
x: :class:`ndarray`
:math:`n \times p` matrix of fixed effects.
k: :class:`ndarray`
:math:`n \times n` positive semi-definite kernel :math:`K`.
p_path: :obj:`str`, optional
Path at which to write :math:`P` as a block matrix.
overwrite: :obj:`bool`
If ``True``, overwrite an existing file at `p_path`.
Returns
-------
model: :class:`LinearMixedModel`
Model constructed from :math:`y`, :math:`X`, and :math:`K`.
p: :class:`ndarray`
Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
"""
_check_dims(y, "y", 1)
_check_dims(x, "x", 2)
_check_dims(k, "k", 2)
n = k.shape[0]
if k.shape[1] != n:
raise ValueError("from_kinship: 'k' must be a square matrix")
if y.shape[0] != n:
raise ValueError("from_kinship: 'y' and 'k' must have the same "
"number of rows")
if x.shape[0] != n:
| |
})
for item in items:
setResponseTimeLimit()
itemModel.remove(item, progress=progress, **kwargs)
if progress:
progress.update(increment=1, message='Deleted item %s' % item['name'])
# subsequent operations take a long time, so free the cursor's resources
items.close()
# Delete all child folders
folders = self.find({
'parentId': folder['_id'],
'parentCollection': 'folder'
})
for subfolder in folders:
self.remove(subfolder, progress=progress, **kwargs)
folders.close()
def remove(self, folder, progress=None, **kwargs):
"""
Delete a folder recursively.
:param folder: The folder document to delete.
:type folder: dict
:param progress: A progress context to record progress on.
:type progress: girder.utility.progress.ProgressContext or None.
"""
# Remove the contents underneath this folder recursively.
from .upload import Upload
self.clean(folder, progress, **kwargs)
# Delete pending uploads into this folder
uploadModel = Upload()
uploads = uploadModel.find({
'parentId': folder['_id'],
'parentType': 'folder'
})
for upload in uploads:
uploadModel.remove(upload, progress=progress, **kwargs)
uploads.close()
# Delete this folder
AccessControlledModel.remove(self, folder, progress=progress, **kwargs)
if progress:
progress.update(increment=1, message='Deleted folder %s' %
folder['name'])
def childItems(self, folder, limit=0, offset=0, sort=None, filters=None,
**kwargs):
"""
Generator function that yields child items in a folder. Passes any
kwargs to the find function.
:param folder: The parent folder.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
:param filters: Additional query operators.
"""
from .item import Item
q = {
'folderId': folder['_id']
}
q.update(filters or {})
return Item().find(q, limit=limit, offset=offset, sort=sort, **kwargs)
def childFolders(self, parent, parentType, user=None, limit=0, offset=0,
sort=None, filters=None, **kwargs):
"""
This generator will yield child folders of a user, collection, or
folder, with access policy filtering. Passes any kwargs to the find
function.
:param parent: The parent object.
:type parentType: Type of the parent object.
:param parentType: The parent type.
:type parentType: 'user', 'folder', or 'collection'
:param user: The user running the query. Only returns folders that this
user can see.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
:param filters: Additional query operators.
"""
if not filters:
filters = {}
parentType = parentType.lower()
if parentType not in ('folder', 'user', 'collection'):
raise ValidationException('The parentType must be folder, collection, or user.')
q = {
'parentId': parent['_id'],
'parentCollection': parentType
}
q.update(filters)
cursor = self.findWithPermissions(
q, sort=sort, user=user, level=AccessType.READ, limit=limit, offset=offset, **kwargs)
return iter(cursor)
def createFolder(self, parent, name, description='', parentType='folder',
public=None, creator=None, allowRename=False, reuseExisting=False):
"""
Create a new folder under the given parent.
:param parent: The parent document. Should be a folder, user, or
collection.
:type parent: dict
:param name: The name of the folder.
:type name: str
:param description: Description for the folder.
:type description: str
:param parentType: What type the parent is:
('folder' | 'user' | 'collection')
:type parentType: str
:param public: Public read access flag.
:type public: bool or None to inherit from parent
:param creator: User document representing the creator of this folder.
:type creator: dict
:param allowRename: if True and a folder or item of this name exists,
automatically rename the folder.
:type allowRename: bool
:param reuseExisting: If a folder with the given name already exists
under the given parent, return that folder rather than creating a
new one.
:type reuseExisting: bool
:returns: The folder document that was created.
"""
if reuseExisting:
existing = self.findOne({
'parentId': parent['_id'],
'name': name,
'parentCollection': parentType
})
if existing:
return existing
parentType = parentType.lower()
if parentType not in ('folder', 'user', 'collection'):
raise ValidationException('The parentType must be folder, collection, or user.')
if parentType == 'folder':
if 'baseParentId' not in parent:
pathFromRoot = self.parentsToRoot(
parent, user=creator, force=True)
parent['baseParentId'] = pathFromRoot[0]['object']['_id']
parent['baseParentType'] = pathFromRoot[0]['type']
else:
parent['baseParentId'] = parent['_id']
parent['baseParentType'] = parentType
now = datetime.datetime.utcnow()
if creator is None:
creatorId = None
else:
creatorId = creator.get('_id', None)
folder = {
'name': name,
'description': description,
'parentCollection': parentType,
'baseParentId': parent['baseParentId'],
'baseParentType': parent['baseParentType'],
'parentId': ObjectId(parent['_id']),
'creatorId': creatorId,
'created': now,
'updated': now,
'size': 0,
'meta': {}
}
if parentType in ('folder', 'collection'):
self.copyAccessPolicies(src=parent, dest=folder, save=False)
if creator is not None:
self.setUserAccess(folder, user=creator, level=AccessType.ADMIN,
save=False)
# Allow explicit public flag override if it's set.
if public is not None and isinstance(public, bool):
self.setPublic(folder, public, save=False)
if allowRename:
self.validate(folder, allowRename=True)
# Now validate and save the folder.
return self.save(folder)
def updateFolder(self, folder):
"""
Updates a folder.
:param folder: The folder document to update
:type folder: dict
:returns: The folder document that was edited.
"""
folder['updated'] = datetime.datetime.utcnow()
# Validate and save the folder
return self.save(folder)
def filter(self, doc, user=None, additionalKeys=None):
"""
Overrides the parent ``filter`` method to add an empty meta field
(if it doesn't exist) to the returned folder.
"""
filteredDoc = super(Folder, self).filter(doc, user, additionalKeys=additionalKeys)
if 'meta' not in filteredDoc:
filteredDoc['meta'] = {}
return filteredDoc
def parentsToRoot(self, folder, curPath=None, user=None, force=False, level=AccessType.READ):
"""
Get the path to traverse to a root of the hierarchy.
:param folder: The folder whose root to find
:type folder: dict
:returns: an ordered list of dictionaries from root to the current folder
"""
curPath = curPath or []
curParentId = folder['parentId']
curParentType = folder['parentCollection']
if curParentType in ('user', 'collection'):
curParentObject = ModelImporter.model(curParentType).load(
curParentId, user=user, level=level, force=force)
if force:
parentFiltered = curParentObject
else:
parentFiltered = ModelImporter.model(curParentType).filter(curParentObject, user)
return [{
'type': curParentType,
'object': parentFiltered
}] + curPath
else:
curParentObject = self.load(curParentId, user=user, level=level, force=force)
curPath = [{
'type': curParentType,
'object': curParentObject if force else self.filter(curParentObject, user)
}] + curPath
return self.parentsToRoot(curParentObject, curPath, user=user, force=force)
def countItems(self, folder):
"""
Returns the number of items within the given folder.
"""
return self.childItems(folder, fields=()).count()
def countFolders(self, folder, user=None, level=None):
"""
Returns the number of subfolders within the given folder. Access
checking is optional; to circumvent access checks, pass ``level=None``.
:param folder: The parent folder.
:type folder: dict
:param user: If performing access checks, the user to check against.
:type user: dict or None
:param level: The required access level, or None to return the raw
subfolder count.
"""
fields = () if level is None else ('access', 'public')
folders = self.findWithPermissions({
'parentId': folder['_id'],
'parentCollection': 'folder'
}, fields=fields, user=user, level=level)
return folders.count()
def subtreeCount(self, folder, includeItems=True, user=None, level=None):
"""
Return the size of the subtree rooted at the given folder. Includes
the root folder in the count.
:param folder: The root of the subtree.
:type folder: dict
:param includeItems: Whether to include items in the subtree count, or
just folders.
:type includeItems: bool
:param user: If filtering by permission, the user to filter against.
:param level: If filtering by permission, the required permission level.
:type level: AccessLevel
"""
count = 1
if includeItems:
count += self.countItems(folder)
folders = self.findWithPermissions({
'parentId': folder['_id'],
'parentCollection': 'folder'
}, fields='access', user=user, level=level)
count += sum(self.subtreeCount(subfolder, includeItems=includeItems,
user=user, level=level)
for subfolder in folders)
return count
def fileList(self, doc, user=None, path='', includeMetadata=False,
subpath=True, mimeFilter=None, data=True):
"""
This function generates a list of 2-tuples whose first element is the
relative path to the file from the folder's root and whose second
element depends on the value of the `data` flag. If `data=True`, the
second element will be a generator that will generate the bytes of the
file data as stored in the assetstore. If `data=False`, the second
element is the file document itself.
:param doc: The folder to list.
:param user: The user used for access.
:param path: A path prefix to add to the results.
:type path: str
:param includeMetadata: if True and there is any metadata, include a
result which is the JSON string of the
metadata. This is given a name of
metadata[-(number).json that is distinct from
any file within the folder.
:type includeMetadata: bool
:param subpath: if True, add the folder's name to the path.
:type subpath: bool
:param mimeFilter: Optional list of MIME types to filter by. Set to
None to include all files.
:type mimeFilter: `list or tuple`
:param data: If True return raw content of each file as stored in the
assetstore, otherwise return file document.
:type data: bool
:returns: Iterable over files in this folder, where each element is a
tuple of (path name of the file, | |
import mahjongpy
class MahjongPlayer:
"""
プレイヤーを表すクラス。各プレイヤーの情報などを保持する
Attributes
----------
hands : list
プレイヤーの手牌。MahjongTileのリスト
discards : list
プレイヤーの河。MahjongTileのリスト
melds : list
プレイヤーが鳴いた牌のリストのリスト。
ポン、カン、チーをしたときに3枚または4枚のMahjongTileのリストが追加される。
oya : bool
プレイヤーが親かどうか
points : int
プレイヤーの持ち点
wind : str
プレイヤーの自風。東:'ton' 南:'nan' 西:'sha' 北:'pei'
latest_tile : MahjongTile
一番最後に引いた牌(一番最新の牌)
table : MahjongTable
プレイヤーがゲームを行っている卓
turn : int
局が始まってから経過したターン数
riichi_turn : int
リーチした時のターン数
is_tumo : bool
ツモしたかどうか
id_ron : bool
ロンしたかどうか
ankans : list
暗槓のリスト。MahjongTile4枚のリスト(暗槓)のリスト
minkans :list
明槓のリスト。MahjongTile4枚のリスト(明槓)のリスト
minkos :list
明刻のリスト。MahjongTile3枚のリスト(明刻)のリスト
is_riichi : bool
リーチしているかどうか
is_doubleriichi : bool
和了った時にダブルリーチ(役)がつく状態かどうか
is_rinsyankaihou : bool
和了った時に嶺上開花(役)がつく状態かどうか
score_cache : int
プレイヤーの点数のキャッシュ
score_without_tsumibo_cache : int
プレイヤーの点数(積み棒分を除く)のキャッシュ
yakus_cache : list
プレイヤーの手牌でできる役のリストのキャッシュ
tilse_cache : list
手牌のキャッシュ
"""
TILE_TYPES = ['pinzu', 'manzu', 'souzu', 'ton', 'nan', 'sha', 'pei', 'haku', 'hatu', 'tyun']
KYOMU_TILE = mahjongpy.MahjongTile(None)
def __init__(self, hands=[], discards=[], melds=[], oya=False, points=25000, wind='ton', latest_tile=KYOMU_TILE,
table=None, turn=0, is_tumo=False, ankans=[], minkans=[], minkos=[]):
if len(hands)+len(sum(melds,[])) not in [13, 14]: raise ValueError('amout of hands is not 13 or 14.')
self.hands = hands[:]
self.discards = discards[:]
self.melds = melds[:]
self.oya = oya
self.points = points
self.wind = wind
self.latest_tile = latest_tile
self.is_riichi = False
self.turn = turn
self.riichi_turn = 100
self.is_tumo = is_tumo
self.is_ron = False
self.ankans = ankans[:]
self.minkans = minkans[:]
self.minkos = minkos[:]
self.is_doubleriichi = False
self.is_rinsyankaihou = False
self.table = table
self.score_cache = 0
self.score_without_tsumibo_cache = 0
self.yakus_cache = []
self.tiles_cache = []
self.sort()
def sort(self):
"""
プレイヤーの手牌を種類、番号順にソートする
Notes
-----
self.hands を 破壊的に ソートするので注意
"""
self.hands = sorted(self.hands)
def hands_display(self):
"""
プレイヤーの手牌すべてを実際の牌のような感じで表示
"""
for i in self.hands:
print(i.display)
def hands_name_jp(self):
"""
プレイヤーの手牌すべての名前を表示
"""
for i in self.hands:
print(i.name_jp)
def discards_display(self):
"""
プレイヤーの河すべてを実際の牌のような感じで表示
"""
for i in self.discards:
print(i.display)
def discards_name_jp(self):
"""
プレイヤーの河すべての名前を表示
"""
for i in self.discards:
print(i.name_jp)
def shanten(self):
"""
プレイヤーのシャンテン数を計算。一向聴で1、二向聴で2…… を返す
Returns
-------
count : int
プレイヤーのシャンテン数
"""
counts = [100]
tiles = self.hands[:]
mentus = []
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
self.make_zyantou(tiles, mentus)
tmp = tiles[:]
count = 0
for i in self.TILE_TYPES[:3]:
for j in range(1,8):
if mahjongpy.MahjongTile(i,j) in tmp:
if mahjongpy.MahjongTile(i,j+1) in tmp:
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j)))
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j+1)))
count += 1
if mahjongpy.MahjongTile(i,j+2) in tmp:
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j)))
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j+2)))
count += 1
for i in self.TILE_TYPES:
for j in range(1,8):
if tmp.count(mahjongpy.MahjongTile(i,j)) == 2:
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j)))
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j)))
count += 1
if len(tmp)-count == 2:
tmp.pop(0)
count += 1
if len(tmp) == count: counts.append(count)
tiles = self.hands[:]
mentus = []
self.make_zyantou(tiles, mentus)
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
tmp = tiles[:]
count = 0
for i in self.TILE_TYPES[:3]:
for j in range(1,8):
if mahjongpy.MahjongTile(i,j) in tmp:
if mahjongpy.MahjongTile(i,j+1) in tmp:
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j)))
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j+1)))
count += 1
if mahjongpy.MahjongTile(i,j+2) in tmp:
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j)))
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j+2)))
count += 1
for i in self.TILE_TYPES:
for j in range(1,8):
if tmp.count(mahjongpy.MahjongTile(i,j)) == 2:
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j)))
tmp.pop(tmp.index(mahjongpy.MahjongTile(i,j)))
count += 1
if len(tmp)-count == 2:
tmp.pop(0)
count += 1
if len(tmp) == count: counts.append(count)
tmp = tiles[:]
count = 0
for i in self.TILE_TYPES:
for j in range(1,10):
if tmp.count(mahjongpy.MahjongTile(i,j)) == 2: count += 1
counts.append(7-count) # 七対子用
tiles = self.hands[:]
tmp = []
tmp.append(tiles.count(mahjongpy.MahjongTile('pinzu',1)))
tmp.append(tiles.count(mahjongpy.MahjongTile('pinzu',9)))
tmp.append(tiles.count(mahjongpy.MahjongTile('manzu',1)))
tmp.append(tiles.count(mahjongpy.MahjongTile('manzu',9)))
tmp.append(tiles.count(mahjongpy.MahjongTile('souzu',1)))
tmp.append(tiles.count(mahjongpy.MahjongTile('souzu',9)))
tmp.append(tiles.count(mahjongpy.MahjongTile('ton')))
tmp.append(tiles.count(mahjongpy.MahjongTile('nan')))
tmp.append(tiles.count(mahjongpy.MahjongTile('sha')))
tmp.append(tiles.count(mahjongpy.MahjongTile('pei')))
tmp.append(tiles.count(mahjongpy.MahjongTile('haku')))
tmp.append(tiles.count(mahjongpy.MahjongTile('hatu')))
tmp.append(tiles.count(mahjongpy.MahjongTile('tyun')))
if tmp.count(1) == 13: counts.append(1)
elif tmp.count(2) > 1: counts.append(13-tmp.count(1))
else: counts.append(13-tmp.count(1)+1)
return(min(counts)-1)
def is_tenpai(self):
"""
Returns
-------
is_tenpai : bool
プレイヤーがテンパイかどうか
"""
return(self.shanten() == 0)
def is_furiten(self):
"""
Returns
-------
is_furiten : bool
プレイヤーがフリテン状態かどうか
"""
is_furiten = False
return(is_furiten)
def is_kyusyukyuhai(self):
"""
Returns
-------
is_kyusyukyuhai : bool
手牌が九種九牌かどうか
"""
count = 0
for i in self.hands:
if i.number in [1, 9, None]:
count += 1
return(count > 8)
def is_hora(self):
"""
Returns
-------
is_hora : bool
プレイヤーの手牌が和了形かどうか
"""
is_hora = False
tiles = self.hands[:]
mentus = []
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
is_hora = self.is_zyantou(tiles, mentus)
tiles = self.hands[:]
mentus = []
self.make_zyantou(tiles, mentus)
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
is_hora = is_hora or (len(tiles) == 0)
return(is_hora or self.is_chitoitu() or self.is_kokushimusou())
def zyantou(self):
"""
プレイヤーの手牌の内、雀頭を返す
Returns
-------
tiles : list
雀頭(2枚のMahjongTileのリスト)。雀頭がない場合ダミータイル(self.KYOMU_TILE)1枚のみのリストが返る
"""
is_hora = False
tiles = self.hands[:]
mentus = []
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
is_hora = self.is_zyantou(tiles, mentus)
if is_hora: return(mentus[-1])
tiles = self.hands[:]
mentus = []
self.make_zyantou(tiles, mentus)
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
is_hora = is_hora or (len(tiles) == 0)
if is_hora: return(mentus[0])
return([self.KYOMU_TILE])
def make_shuntus(self, tiles, mentus):
"""
順子を作る
Parameters
----------
tiles : list
未処理のMahjongTileのリスト(手牌)
mentus: list
処理済みのMahjongTileのリスト。このリストに順子が、MahjongTile3枚のリストとして追加される
Notes
-----
引数のtiles、およびmentusを 破壊的に 変更するので注意
"""
for _ in range(2):
for i in range(1,8):
for j in self.TILE_TYPES[:3]:
if mahjongpy.MahjongTile(j,i) in tiles and mahjongpy.MahjongTile(j,i+1) in tiles and mahjongpy.MahjongTile(j,i+2) in tiles:
tmp = []
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i))))
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i+1))))
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i+2))))
mentus.append(tmp)
def make_kotus(self, tiles, mentus):
"""
刻子を作る
Parameters
----------
tiles : list
未処理のMahjongTileのリスト(手牌)
mentus: list
処理済みのMahjongTileのリスト。このリストに刻子が、MahjongTile3枚のリストとして追加される
Notes
-----
引数のtiles、およびmentusを 破壊的に 変更するので注意
"""
for i in range(1,10):
for j in self.TILE_TYPES:
if tiles.count(mahjongpy.MahjongTile(j,i)) == 3:
tmp = []
for _ in range(3):
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i))))
mentus.append(tmp)
def make_zyantou(self, tiles, mentus):
"""
雀頭を作る
Parameters
----------
tiles : list
未処理のMahjongTileのリスト(手牌)
mentus: list
処理済みのMahjongTileのリスト。このリストに雀頭が、MahjongTile2枚のリストとして追加される
Notes
-----
引数のtiles、およびmentusを 破壊的に 変更するので注意
"""
for i in range(1,10):
for j in self.TILE_TYPES:
if tiles.count(mahjongpy.MahjongTile(j,i)) == 2:
tmp = []
for _ in range(2):
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i))))
mentus.append(tmp)
return(None)
def is_zyantou(self, tiles, mentus):
"""
残りの牌が雀頭かどうかを判定する
Parameters
----------
tiles : list
未処理のMahjongTileのリスト
mentus: list
処理済みのMahjongTileのリスト。雀頭判定されたとき、MahjongTile2枚がリストとして追加される
Notes
-----
引数のtiles、およびmentusを 破壊的に 変更するので注意
"""
if len(tiles) == 2 and tiles[0] == tiles[1]:
mentus.append([tiles[0], tiles[1]])
return(True)
else:
return(False)
def is_chitoitu(self):
"""
Returns
-------
is_chitoitu : bool
プレイヤーの手牌が七対子かどうか
"""
if not self.is_menzen(): return(False)
mentus = []
tiles = self.hands[:]
for i in range(1,10):
for j in self.TILE_TYPES:
if tiles.count(mahjongpy.MahjongTile(j,i)) == 2:
tmp = []
for _ in range(2):
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i))))
mentus.append(tmp)
return(len(tiles) == 0)
def is_kokushimusou(self):
"""
Returns
-------
is_kokushimusou : bool
プレイヤーの手牌が国士無双かどうか
"""
if not self.is_menzen(): return(False)
tmp = []
tiles = self.hands[:]
tmp.append(tiles.count(mahjongpy.MahjongTile('pinzu',1)))
tmp.append(tiles.count(mahjongpy.MahjongTile('pinzu',9)))
tmp.append(tiles.count(mahjongpy.MahjongTile('manzu',1)))
tmp.append(tiles.count(mahjongpy.MahjongTile('manzu',9)))
tmp.append(tiles.count(mahjongpy.MahjongTile('souzu',1)))
tmp.append(tiles.count(mahjongpy.MahjongTile('souzu',9)))
tmp.append(tiles.count(mahjongpy.MahjongTile('ton')))
tmp.append(tiles.count(mahjongpy.MahjongTile('nan')))
tmp.append(tiles.count(mahjongpy.MahjongTile('sha')))
tmp.append(tiles.count(mahjongpy.MahjongTile('pei')))
tmp.append(tiles.count(mahjongpy.MahjongTile('haku')))
tmp.append(tiles.count(mahjongpy.MahjongTile('hatu')))
tmp.append(tiles.count(mahjongpy.MahjongTile('tyun')))
return(tmp.count(1) == 12 and tmp.count(2) == 1)
def is_chanta(self):
"""
Returns
-------
is_kokushimusou : bool
プレイヤーの手牌がチャンタかどうか
"""
is_hora = False
tiles = self.hands[:] + sum(self.melds, [])
mentus = []
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
is_hora = self.is_zyantou(tiles, mentus)
count = 0
for i in mentus:
for j in i:
if j.number in [1, 9, None]:
count += 1
break
if is_hora and count == 5: return(True)
count = 0
tiles = self.hands[:]
mentus = []
self.make_zyantou(tiles, mentus)
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
is_hora = is_hora or (len(tiles) == 0)
for i in mentus:
for j in i:
if j.number in [1, 9, None]:
count += 1
break
if is_hora and count == 5: return(True)
return(False)
def is_zyuntyan(self):
"""
Returns
-------
is_kokushimusou : bool
プレイヤーの手牌がジュンチャンかどうか
"""
is_hora = False
tiles = self.hands[:] + sum(self.melds, [])
mentus = []
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
is_hora = self.is_zyantou(tiles, mentus)
count = 0
for i in mentus:
for j in i:
if j.number in [1, 9]:
count += 1
break
if is_hora and count == 5: return(True)
count = 0
tiles = self.hands[:]
mentus = []
self.make_zyantou(tiles, mentus)
self.make_shuntus(tiles, mentus)
self.make_kotus(tiles, mentus)
is_hora = is_hora or (len(tiles) == 0)
for i in mentus:
for j in i:
if j.number in [1, 9]:
count += 1
break
if is_hora and count == 5: return(True)
return(False)
def is_ipeikou(self, tiles, mentus):
"""
Returns
-------
is_kokushimusou : bool
プレイヤーの手牌が一盃口かどうか
"""
if not self.is_menzen(): return(False)
count = []
for _ in range(2):
for i in range(1,8):
for j in self.TILE_TYPES[:3]:
if mahjongpy.MahjongTile(j,i) in tiles and mahjongpy.MahjongTile(j,i+1) in tiles and mahjongpy.MahjongTile(j,i+2) in tiles:
tmp = []
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i))))
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i+1))))
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i+2))))
mentus.append(tmp)
count.append(len(tiles))
return(count == [5,2])
def is_ryanpeikou(self, tiles, mentus):
"""
Returns
-------
is_kokushimusou : bool
プレイヤーの手牌が二盃口かどうか
"""
if not self.is_menzen(): return(False)
count = []
for _ in range(2):
for i in range(1,8):
for j in self.TILE_TYPES[:3]:
if mahjongpy.MahjongTile(j,i) in tiles and mahjongpy.MahjongTile(j,i+1) in tiles and mahjongpy.MahjongTile(j,i+2) in tiles:
tmp = []
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i))))
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i+1))))
tmp.append(tiles.pop(tiles.index(mahjongpy.MahjongTile(j,i+2))))
mentus.append(tmp)
count.append(len(tiles))
return(count == [8,2])
def displayed_doras(self):
"""
手牌の内ドラ表示牌の次の牌の数
Returns
-------
count : int
手牌のドラ牌の数
"""
count = 0
tiles = self.hands[:] + sum(self.melds, [])
doras = None if self.table is None else self.table.dora_tiles
for i in tiles:
for j in doras:
if i == j: count += 1
return(count)
def akadoras(self):
"""
手牌の赤ドラの数
Returns
-------
count : int
手牌の赤ドラの数
"""
count = 0
tiles = self.hands[:] + sum(self.melds, [])
for i in tiles:
| |
<reponame>mjc87/SHTOOLS
"""
Functions for reading and writing spherical harmonic coefficients from text
files that are formatted as [degree, order, value].
"""
import os as _os
import io as _io
import gzip as _gzip
import zipfile as _zipfile
import numpy as _np
import requests as _requests
import shutil as _shutil
def read_dov(filename, lmax=None, error=False, header=False, header2=False,
skip=0, encoding=None):
"""
Read spherical harmonic coefficients from a text file formatted as
[degree, order, value].
Usage
-----
coeffs, [errors], lmaxout, [header], [header2] = read_dov(
filename, [error=True, header=True, header2=True, lmax, skip,
encoding])
Returns
-------
coeffs : ndarray, size(2, lmaxout+1, lmaxout+1)
The spherical harmonic coefficients.
errors : ndarray, size(2, lmaxout+1, lmaxout+1)
The errors associated with the spherical harmonic coefficients.
lmaxout : int
The maximum spherical harmonic degree read from the file.
header : list of type str
A list of values in the header line found before the start of the
spherical harmonic coefficients.
header2 : list of type str
A list of values in the second header line found before the start of
the spherical harmonic coefficients.
Parameters
----------
filename : str
File name or URL that contains the text-formatted spherical harmonic
coefficients. filename will be treated as a URL if it starts with
'http://', 'https://', or 'ftp://'. If filename ends with '.gz' or
'.zip', the file will be uncompressed before parsing.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read from the file. The
default is to read the entire file.
error : bool, optional, default = False
If True, return the errors associated with the spherical harmonic
coefficients as a separate array.
header : bool, optional, default = False
If True, return a list of values in the header line found before the
start of the spherical harmonic coefficients.
header2 : bool, optional, default = False
If True, return a list of values in the second header line found before
the start of the spherical harmonic coefficients.
skip : int, optional, default = 0
The number of lines to skip before parsing the file.
encoding : str, optional, default = None
Encoding of the input file. The default is to use the system default.
Notes
-----
This function will read spherical harmonic coefficients from a 'dov'-
formatted text file. The errors associated with the spherical
harmonic coefficients, as well as the values in one or two header lines,
can be read optionally by setting the parameters error, header, and header2
to True. The optional parameter skip specifies how many lines should be
skipped before attempting to parse the file, and the optional parameter
lmax specifies the maximum degree to read from the file. Both real and
complex spherical harmonic coefficients are supported.
The spherical harmonic coefficients in the file should be formatted as
l, m, coeffs[0, l, m]
l, -m, coeffs[1, l, m]
where l and m are the spherical harmonic degree and order, respectively.
If the errors are to be read, the line should be formatted as
l, m, coeffs[0, l, m], errors[0, l, m]
l, -m, coeffs[0, l, m], errors[1, l, m]
For each value of increasing l, all the angular orders are listed in
pairs with inceasing abs(order), from 0 to l.
If one or two header lines are to be read, they should be located directly
after the first lines to be skipped, before the start of the spherical
harmonic coefficents. The header values are returned as a list, where each
value is formatted as a string. Comment lines will be ignored, where
comments start with '#' or the line is all whitespace.
If filename starts with 'http://', 'https://', or 'ftp://', the file will
be treated as a URL. In this case, the file will be downloaded in its
entirety before it is parsed.
If the filename ends with '.gz' or '.zip', the file will be automatically
uncompressed before parsing. For zip files, archives with only a single
file are supported. Note that reading '.gz' and '.zip' files will be
extremely slow if lmax is not specified.
"""
if _isurl(filename):
_response = _requests.get(filename)
if filename[-4:] == '.zip':
zf = _zipfile.ZipFile(_io.BytesIO(_response.content))
if len(zf.namelist()) > 1:
raise Exception('read_dov can only process zip archives '
'that contain a single file. Archive '
'contents:\n{}'.format(zf.namelist()))
elif filename[-4:] == '.zip':
zf = _zipfile.ZipFile(filename, 'r')
if len(zf.namelist()) > 1:
raise Exception('read_dov can only process zip archives that '
'contain a single file. Archive contents: \n'
'{}'.format(zf.namelist()))
# If lmax is None, determine lmax by reading last line of the file that
# is not a comment. (Note that this is very slow for zipped and gzipped
# files. Consider using indexed_gzip when SEEK_END is supported.)
if lmax is None:
if _isurl(filename):
f = _io.BytesIO(_response.content)
if filename[-4:] == '.zip':
f = zf.open(zf.namelist()[0])
elif filename[-3:] == '.gz':
f = _gzip.open(filename, mode='rb')
elif filename[-4:] == '.zip':
f = zf.open(zf.namelist()[0])
else:
f = open(filename, 'rb')
# determine lmax by reading the last non-comment line of the file
with f:
line = ''
if f.seek(0, _os.SEEK_END) == 0:
raise RuntimeError('File is empty.')
else:
f.seek(-1, _os.SEEK_CUR)
# read backwards to end of preceding line and then read the line
while _iscomment(line):
while f.read(1) != b'\n':
try:
f.seek(-2, _os.SEEK_CUR)
except:
f.seek(-1, _os.SEEK_CUR) # beginning of file
break
if f.tell() <= 1:
line = f.readline().decode()
line = line.replace(',', ' ')
if _iscomment(line):
raise RuntimeError('Encountered beginning of file '
'while attempting to determine '
'lmax.')
break
else:
line = f.readline().decode()
line = line.replace(',', ' ')
try:
f.seek(-len(line)-2, _os.SEEK_CUR)
except:
raise RuntimeError('Encountered beginning of file '
'while attempting to determine '
'lmax.')
lmaxout = int(line.split()[0])
else:
lmaxout = lmax
# open file, skip lines, read header, determine lstart, and then read
# coefficients one line at a time
if _isurl(filename):
if encoding is not None:
_response.encoding = encoding
f = _io.StringIO(_response.text)
if filename[-4:] == '.zip':
f = _io.TextIOWrapper(zf.open(zf.namelist()[0]), encoding=encoding)
elif filename[-3:] == '.gz':
f = _gzip.open(filename, mode='rt', encoding=encoding)
elif filename[-4:] == '.zip':
f = _io.TextIOWrapper(zf.open(zf.namelist()[0]), encoding=encoding)
else:
f = open(filename, 'r', encoding=encoding)
with f:
if skip != 0:
for i in range(skip):
line = f.readline()
if line == '':
raise RuntimeError('End of file encountered when '
'skipping lines.')
# read headers
if header is True:
line = f.readline()
if line == '':
raise RuntimeError('End of file encountered when '
'reading header line.')
while _iscomment(line):
line = f.readline()
if line == '':
raise RuntimeError('End of file encountered when '
'reading header line.')
line = line.replace(',', ' ')
header_list = line.split()
if header2 is True:
line = f.readline()
if line == '':
raise RuntimeError('End of file encountered when '
'reading second header line.')
while _iscomment(line):
line = f.readline()
if line == '':
raise RuntimeError('End of file encountered when '
'reading second header line.')
line = line.replace(',', ' ')
header2_list = line.split()
# determine the starting degree
start_position = f.tell()
line = f.readline()
if line == '':
raise RuntimeError('End of file encountered when determining '
'value of lstart.')
while _iscomment(line):
line = f.readline()
if line == '':
raise RuntimeError('End of file encountered when determining '
'value of lstart.')
line = line.replace(',', ' ')
lstart = int(line.split()[0])
# determine if the coefficients are real or complex
try:
num = float(line.split()[2]) # noqa F841
coeffs = _np.zeros((2, lmaxout+1, lmaxout+1))
kind = 'real'
if error is True:
errors = _np.zeros((2, lmaxout+1, lmaxout+1))
except ValueError:
try:
num = _np.complex128(line.split()[2]) # noqa F841
coeffs = _np.zeros((2, lmaxout+1, lmaxout+1),
dtype=_np.complex128)
kind = 'complex'
if error is True:
errors = _np.zeros((2, lmaxout+1, lmaxout+1),
dtype=_np.complex128)
except ValueError:
raise ValueError('Coefficients can not be converted to '
'either float or complex. Coefficient '
'is {:s}\n'.format(line.split()[2]) +
'Unformatted string is {:s}'.format(line))
# rewind one line and read coefficients one line at a time
f.seek(start_position)
for degree in range(lstart, lmaxout+1):
for order in range(degree+1):
line = f.readline()
if line == '':
raise RuntimeError('End of file encountered at '
'degree and order {:d}, {:d}.'
.format(degree, order))
while _iscomment(line):
line | |
inventory_currentmachinesnapshot as cms on (msaa.machinesnapshot_id = cms.machine_snapshot_id)"
" join inventory_source as s on (cms.source_id = s.id)"
f" {wheres_str}"
" group by aa.id, aa.display_name, aa.version_name, aa.version_code, s.name, s.id"
") select display_name, name_ms_count ms_count,"
"jsonb_agg("
" jsonb_build_object("
" 'pk', id,"
" 'version_name', version_name,"
" 'version_code', version_code,"
" 'source_name', source_name,"
" 'source_pk', source_id,"
" 'ms_count', version_ms_count"
")) versions,"
"count(*) over () as full_count "
"from aaa "
"group by display_name, ms_count "
f"order by {order_str}"
)
return query, args
class DebPackageSearchForm(BaseAppSearchForm):
name = forms.CharField(label="Package name", max_length=64,
widget=forms.TextInput(attrs={"autofocus": "true", "placeholder": "Package name"}),
required=False)
order_mapping = {"n": "name",
"mc": "ms_count"}
default_order = ("name", "ASC")
title = "Debian packages"
app_headers = (
("name", "name", True, "Package"),
)
version_headers = (
("version", False, "Version"),
("source_name", True, "Source"),
)
def get_ms_query_filters(self, result, version=None):
filters = super().get_ms_query_filters(result, version)
filter_kwargs = {"name": result["name"]}
if version:
filter_kwargs["value"] = version["pk"]
filters.append((DebPackageFilter, filter_kwargs))
return filters
def get_query_and_args(self):
args = []
# filtering
wheres = []
name = self.cleaned_data.get("name")
if name:
args.append(name)
wheres.append("dp.name ~* %s")
source = self.get_source()
if source:
args.append(source.id)
wheres.append("s.id = %s")
last_seen = self.get_last_seen()
if last_seen:
args.append(last_seen)
wheres.append("cms.last_seen >= %s")
if wheres:
wheres_str = "where {}".format(" and ".join(wheres))
else:
wheres_str = ""
# ordering
order_attr, order_dir = self._get_current_order()
order_str = f"{order_attr} {order_dir}"
if order_attr == "ms_count":
order_str = f"{order_str}, name ASC"
query = (
"with adp as ("
" select dp.id, dp.name, dp.version, s.name source_name, s.id source_id,"
" sum(count(*)) over (partition by dp.name) name_ms_count,"
" count(*) version_ms_count"
" from inventory_debpackage as dp"
" join inventory_machinesnapshot_deb_packages as msdp on (dp.id = msdp.debpackage_id)"
" join inventory_currentmachinesnapshot as cms on (msdp.machinesnapshot_id = cms.machine_snapshot_id)"
" join inventory_source as s on (cms.source_id = s.id)"
f" {wheres_str}"
" group by dp.id, dp.name, dp.version, s.name, s.id"
") select name, name_ms_count ms_count,"
"jsonb_agg("
" jsonb_build_object("
" 'pk', id,"
" 'version', version,"
" 'source_name', source_name,"
" 'source_pk', source_id,"
" 'ms_count', version_ms_count"
")) versions,"
"count(*) over () as full_count "
"from adp "
"group by name, ms_count "
f"order by {order_str}"
)
return query, args
class IOSAppSearchForm(BaseAppSearchForm):
name = forms.CharField(label="Name", max_length=64,
widget=forms.TextInput(attrs={"autofocus": "true", "placeholder": "Name"}),
required=False)
order_mapping = {"n": "name",
"mc": "ms_count"}
default_order = ("name", "ASC")
title = "iOS apps"
app_headers = (
("name", "name", True, "Name"),
("identifier", None, False, "Identifier"),
)
version_headers = (
("version", False, "Version"),
("short_version", False, "Short version"),
("source_name", True, "Source"),
)
version_sort_keys = ("version", "short_version")
def get_ms_query_filters(self, result, version=None):
filters = super().get_ms_query_filters(result, version)
filter_kwargs = {"name": result["name"]}
if version:
filter_kwargs["value"] = version["pk"]
filters.append((IOSAppFilter, filter_kwargs))
return filters
def get_query_and_args(self):
args = []
# filtering
wheres = []
name = self.cleaned_data.get("name")
if name:
args.append(name)
wheres.append("ia.name ~* %s")
source = self.get_source()
if source:
args.append(source.id)
wheres.append("s.id = %s")
last_seen = self.get_last_seen()
if last_seen:
args.append(last_seen)
wheres.append("cms.last_seen >= %s")
if wheres:
wheres_str = "where {}".format(" and ".join(wheres))
else:
wheres_str = ""
# ordering
order_attr, order_dir = self._get_current_order()
order_str = f"{order_attr} {order_dir}"
if order_attr == "ms_count":
order_str = f"{order_str}, name ASC"
query = (
"with aia as ("
" select ia.id, ia.name, ia.identifier, ia.version, ia.short_version, s.name source_name, s.id source_id,"
" sum(count(*)) over (partition by ia.name, ia.identifier) name_ms_count,"
" count(*) version_ms_count"
" from inventory_iosapp as ia"
" join inventory_machinesnapshot_ios_apps as msia on (ia.id = msia.iosapp_id)"
" join inventory_currentmachinesnapshot as cms on (msia.machinesnapshot_id = cms.machine_snapshot_id)"
" join inventory_source as s on (cms.source_id = s.id)"
f" {wheres_str}"
" group by ia.id, ia.name, ia.identifier, ia.version, ia.short_version, s.name, s.id"
") select name, identifier, name_ms_count ms_count,"
"jsonb_agg("
" jsonb_build_object("
" 'pk', id,"
" 'version', version,"
" 'short_version', short_version,"
" 'source_name', source_name,"
" 'source_pk', source_id,"
" 'ms_count', version_ms_count"
")) versions,"
"count(*) over () as full_count "
"from aia "
"group by name, identifier, ms_count "
f"order by {order_str}"
)
return query, args
class MacOSAppSearchForm(BaseAppSearchForm):
bundle_name = forms.CharField(label='Bundle name', max_length=64,
widget=forms.TextInput(attrs={"autofocus": "true", "placeholder": "Bundle name"}),
required=False)
order_mapping = {"bn": "bundle_name",
"mc": "ms_count"}
default_order = ("bundle_name", "ASC")
title = "macOS apps"
app_headers = (
("bundle_name", "bundle_name", True, "Bundle"),
("bundle_id", None, False, "Bundle ID"),
)
version_headers = (
("bundle_version", False, "Bundle version"),
("bundle_version_str", False, "Bundle version str"),
("source_name", True, "Source"),
)
version_sort_keys = ("bundle_version", "bundle_version_str")
def get_ms_query_filters(self, result, version=None):
filters = super().get_ms_query_filters(result, version)
bundle_name = result["bundle_name"]
bundle_id = result["bundle_id"]
if bundle_name or bundle_id:
if bundle_name:
filter_kwargs = {"bundle_name": bundle_name}
else:
filter_kwargs = {"bundle_id": bundle_id}
if version:
filter_kwargs["value"] = version["pk"]
filters.append((BundleFilter, filter_kwargs))
return filters
def get_query_and_args(self):
args = []
# filtering
wheres = []
bundle_name = self.cleaned_data.get("bundle_name")
if bundle_name:
args.append(bundle_name)
wheres.append("a.bundle_name ~* %s")
source = self.get_source()
if source:
args.append(source.id)
wheres.append("s.id = %s")
last_seen = self.get_last_seen()
if last_seen:
args.append(last_seen)
wheres.append("cms.last_seen >= %s")
if wheres:
wheres_str = "where {}".format(" and ".join(wheres))
else:
wheres_str = ""
# ordering
order_attr, order_dir = self._get_current_order()
order_str = f"{order_attr} {order_dir}"
if order_attr == "ms_count":
order_str = f"{order_str}, bundle_name ASC"
query = (
"with ama as ("
" select a.id, a.bundle_id, a.bundle_name, a.bundle_version, a.bundle_version_str,"
" s.name source_name, s.id source_id,"
" sum(count(*)) over (partition by a.bundle_id, a.bundle_name) bundle_ms_count,"
" count(*) version_ms_count"
" from inventory_osxapp as a"
" join inventory_osxappinstance as ai on (ai.app_id = a.id)"
" join inventory_machinesnapshot_osx_app_instances as msoai on(msoai.osxappinstance_id = ai.id)"
" join inventory_currentmachinesnapshot as cms on (msoai.machinesnapshot_id = cms.machine_snapshot_id)"
" join inventory_source as s on (cms.source_id = s.id)"
f" {wheres_str}"
" group by a.id, a.bundle_id, a.bundle_name, a.bundle_version, a.bundle_version_str, s.name, s.id"
") select bundle_id, bundle_name, bundle_ms_count ms_count,"
"jsonb_agg("
"jsonb_build_object("
" 'pk', id,"
" 'bundle_version', bundle_version,"
" 'bundle_version_str', bundle_version_str,"
" 'source_name', source_name,"
" 'source_pk', source_id,"
" 'ms_count', version_ms_count"
")) versions,"
"count(*) over () as full_count "
"from ama "
"group by bundle_id, bundle_name, ms_count "
f"order by {order_str}"
)
return query, args
class ProgramsSearchForm(BaseAppSearchForm):
name = forms.CharField(label='Name', max_length=64,
widget=forms.TextInput(attrs={"autofocus": "true", "placeholder": "Name"}),
required=False)
order_mapping = {"n": "name",
"mc": "ms_count"}
default_order = ("name", "ASC")
title = "Programs"
app_headers = (
("name", "name", True, "Name"),
("identifying_number", None, False, "Identifying number"),
)
version_headers = (
("version", False, "Version"),
("source_name", True, "Source"),
)
def get_ms_query_filters(self, result, version=None):
filters = super().get_ms_query_filters(result, version)
filter_kwargs = {"name": result["name"]}
if version:
filter_kwargs["value"] = version["pk"]
filters.append((ProgramFilter, filter_kwargs))
return filters
def get_query_and_args(self):
args = []
# filtering
wheres = []
name = self.cleaned_data.get("name")
if name:
args.append(name)
wheres.append("p.name ~* %s")
source = self.get_source()
if source:
args.append(source.id)
wheres.append("s.id = %s")
last_seen = self.get_last_seen()
if last_seen:
args.append(last_seen)
wheres.append("cms.last_seen >= %s")
if wheres:
wheres_str = "where {}".format(" and ".join(wheres))
else:
wheres_str = ""
# ordering
order_attr, order_dir = self._get_current_order()
order_str = f"{order_attr} {order_dir}"
if order_attr == "ms_count":
order_str = f"{order_str}, name ASC"
query = (
"with ap as ("
" select p.id, p.name, p.identifying_number, p.version,"
" s.name source_name, s.id source_id,"
" sum(count(*)) over (partition by p.name, p.identifying_number) program_ms_count,"
" count(*) version_ms_count"
" from inventory_program as p"
" join inventory_programinstance as pi on (pi.program_id = p.id)"
" join inventory_machinesnapshot_program_instances as mspi on(mspi.programinstance_id = pi.id)"
" join inventory_currentmachinesnapshot as cms on (mspi.machinesnapshot_id = cms.machine_snapshot_id)"
" join inventory_source as s on (cms.source_id = s.id)"
f" {wheres_str}"
" group by p.id, p.name, p.identifying_number, p.version, s.name, s.id"
") select name, identifying_number, program_ms_count ms_count,"
"jsonb_agg("
"jsonb_build_object("
" 'pk', id,"
" 'version', version,"
" 'source_name', source_name,"
" 'source_pk', source_id,"
" 'ms_count', version_ms_count"
")) versions,"
"count(*) over () as full_count "
"from ap "
"group by name, identifying_number, ms_count "
f"order by {order_str}"
)
return query, args
class EnrollmentSecretForm(forms.ModelForm):
class Meta:
model = EnrollmentSecret
fields = ("meta_business_unit", "tags", "serial_numbers", "udids", "quota")
def __init__(self, *args, **kwargs):
self.no_restrictions = kwargs.pop("no_restrictions", False)
self.no_serial_numbers = kwargs.pop("no_serial_numbers", False)
self.no_udids = kwargs.pop("no_udids", False)
self.meta_business_unit = kwargs.pop("meta_business_unit", None)
super().__init__(*args, **kwargs)
mbu_field = self.fields["meta_business_unit"]
mbu_field.queryset = MetaBusinessUnit.objects.available_for_api_enrollment()
if self.meta_business_unit:
mbu_field.queryset = mbu_field.queryset.filter(pk=self.meta_business_unit.pk)
mbu_field.initial = self.meta_business_unit.pk
mbu_field.widget = forms.HiddenInput()
self.fields['tags'].queryset = Tag.objects.available_for_meta_business_unit(self.meta_business_unit)
if self.no_restrictions:
for field_name in ("serial_numbers", "udids", "quota"):
self.fields[field_name].widget = forms.HiddenInput()
else:
if self.no_serial_numbers:
self.fields["serial_numbers"].widget = forms.HiddenInput()
if self.no_udids:
self.fields["udids"].widget = forms.HiddenInput()
def clean(self):
super().clean()
meta_business_unit = self.cleaned_data["meta_business_unit"] or self.meta_business_unit
if meta_business_unit:
tag_set = set(self.cleaned_data['tags'])
wrong_tag_set = tag_set - set(Tag.objects.available_for_meta_business_unit(meta_business_unit))
if wrong_tag_set:
raise forms.ValidationError(
"Tag{} {} not available for this business unit".format(
"" if len(wrong_tag_set) == 1 else "s",
", ".join(str(t) for t in wrong_tag_set)
)
)
return self.cleaned_data
def save(self, *args, **kwargs):
commit = kwargs.pop("commit", | |
:param aging: enable aging or not
:type re_c: boolean
:param re_c: recount centers of the circles or not
"""
maps = []
for i in clust:
mapy, _, _, _ = cluster(data,i,level,levels=levels, border = border, aging=False, re_c=False)
maps.append(mapy)
return maps
def update_mg(pay, time, last_t, m, v, p, value,norm=l1_norm, tok=False, aging=True, re_c=True):
"""
Update of maps from new transaction
:type pay: numeric array
:param pay: [latitude, lontitude] of the new transaction
:type time: string
:param time: date and time of th transaction
:type last_t: string
:param last_t: date and time of the latest transaction
:type m: string list
:param m: list with centers of circles
:type v: numeric list
:param v: list with values, corresponding to already existing circles
:type p: numeric list
:param p: list with probabilities corresponding to the existing circles
:type value: numeric
:param value: value, corresponding to the transaction
:type norm: function
:param norm: distance function
:type tok: boolean
:param tok: first iteration flag
:type aging: boolean
:param aging: enable aging or not
:type re_c: boolean
:param re_c: recount centers of the circles or not
"""
massive, values_massive, probs_massive = list(m), list(v), list(p)
i=pay[0]
j=pay[1]
f=False
if(tok):
for o in range(len(massive)):
massive[o][0]=' '.join([str(i),str(j)])
values_massive[o][0]=10.0
probs_massive[o][0]=1.0
last_t=time
else:
if(aging):
alpha = sigmoid_dt_(last_t,time)
else:
alpha=1
last_t=time
for o in range(len(massive)):
idx = []
la = i
lo = j
val = value*(0.1**(o))
if(val==0):
val=0.1
gigy=0
is_in=False
for samp in massive[o]:
if(check_incl(samp, la, lo, val, norm=norm)):
is_in=True
if(re_c):
massive[o][gigy]=re_count(s = samp,x = la, y = lo,mas = massive,val= val,l1=o,l2=o+1, norm=norm)
idx.append(gigy)
gigy+=1
if(not is_in):
f=True
for k in range(o,len(massive)):
massive[k] = np.append(massive[k],' '.join([str(i),str(j)]))
values_massive[k]*=alpha
values_massive[k] = np.append(values_massive[k], 10.0)
probs_massive[k] = np.append(probs_massive[k], 0.0)
else:
values_massive[o]*alpha
for z in idx:
pass
values_massive[o][z]+=10.0
if(f):
probs_massive = update_pg(values_massive, probs_massive)
break
return last_t, np.array(massive), np.array(values_massive), np.array(probs_massive)
def update_pg(mas_v,probs_massive):
"""
Probabilities update after new transaction
:type mas_v: numeric list
:param mas_v: list with values of circles
:type probs_massive: numeric list
:param probs_massive: list with probabilities to update afer new transaction
"""
for i in range(len(mas_v)):
for j in range(len(mas_v[i])):
probs_massive[i][j]=(mas_v[i][j]*1.0/sum(mas_v[i]))
return probs_massive
def global_map(usr1, levels=2, border=100, re_c=True, aging=True, sq=True):
"""
Builds local maps< if no activity encountered dring the period it will be replaced by global map
:type usr1: pandas DataFrame
:param usr1: dataframe with data of the specified client
:type levels: int
:param levels: number of levels on the local map
:type aging: boolean
:param aging: enable aging or not
:type sq: boolean
:param: use sqrt during calculation
:type re_c: boolean
:param re_c: recount centers of the circles or not
"""
gm,gl,gp = None, None, None
glob = False
gmassive = list(np.ndarray(shape=(35,levels,1), dtype=object))
gvalues = list(np.ndarray(shape=(35,levels,1), dtype=float))
gprobs = list(np.ndarray(shape=(35,levels,1), dtype=float))
VALUE = 10.0
NORM = l2_norm
opp = 0
for day in range(1,8):
ss = usr1.loc[usr1['Day_week']==day]
l = ss.shape[0]
for part_day in range(5):
last_t=0
tmp = (usr1.loc[(usr1['Part_day']==part_day) & (usr1['Day_week']==day)]).iloc[:,:-2]
fl =True
if(len(tmp)!=0):
for i in tmp.index:
dt = ' '.join(tmp.loc[i,['Time','Date']].values)
p = tmp.loc[i,['lat','lon']].values
if(fl):
tok=True
fl=False
else:
tok=False
last_t, gmassive[opp], gvalues[opp], gprobs[opp] = update_mg(p, dt, last_t, gmassive[opp],
gvalues[opp],
gprobs[opp],
border,
norm=l2_norm,
tok=tok,
re_c=re_c, aging=aging)
else:
if(not glob):
glob=True
gm,gv,gp = cluster3(usr1,levels,border,re_c=re_c, aging=aging, sq=sq)
gmassive[opp], gvalues[opp], gprobs[opp] = gm, gv, gp
opp+=1
return gmassive, gvalues, gprobs
def cluster(data, uid, level, levels = 4, border = 100, aging=True, local=True, re_c=True):
"""
Builds global mapfor the client with specified uid
:type data: pandas dataFrame
:param data: dataframe with all data
:type uid: string
:param uid: index of the client
:type level: int
:param level: number of level on which to visualize map
:type levels: int
:param levels: number of levels on the global map
:type border: numeric
:param border: radius of the largest circle
:type aging: boolean
:param aging: enable aging or not
:type local: boolean
:param local: to localize time or not
:type re_c: boolean
:param re_c: recount centers of the circles or not
"""
usr1 = data.loc[data['user_id']==uid]
usr1 = usr1.groupby(['Date', 'Time']).apply(pd.DataFrame.reset_index)
usr1 = usr1.iloc[:,[1,2,3,4,5]].reset_index(drop=True)
if(local):
dataFrame_localization(usr1)
last_t=0
massive = np.ndarray(shape=(levels,1), dtype=object)
values = np.ndarray(shape=(levels,1), dtype=float)
probs = np.ndarray(shape=(levels,1), dtype=float)
VALUE = 10.0
BORDER_VALUE = border
NORM = l2_norm
for i in range(usr1.shape[0]):
dt = ' '.join(usr1.loc[i,['Time','Date']].values)
p = usr1.loc[i,['lat','lon']].values
if(i==0):
tok=True
else:
tok=False
last_t, massive, values, probs = update_mg(p, dt, last_t, massive, values, probs,
BORDER_VALUE,
norm=l2_norm,
tok=tok, aging=aging, re_c = re_c)
lat = usr1['lat']
lon = usr1['lon']
VALUE = 10.0
BORDER_VALUE*= 1000
map3 = folium.Map(location=[lat.values[0],lon.values[0]], zoom_start = 12)
level=level
if level>(levels-1) or level<0:
level=(levels-1)
t=level-levels
val = BORDER_VALUE*(0.1**(level))
s=1
for i in zip(lat,lon):
folium.Marker(location=i,
radius = 10,
popup=usr1.loc[s-1,'Time']+' '+usr1.loc[s-1,'Date'],
icon=folium.Icon(color = 'green')).add_to(map3)
s+=1
idx = 0
for i in massive[level]:
ss = i.split(' ')
la = float(ss[0])
lo = float(ss[1])
folium.vector_layers.Circle([la,lo],
val,
popup=None,
color=color_change(probs[t][idx]),
opacity=probs[t][idx],
fill_color=color_change(probs[t][idx]),
fill_opacity=probs[t][idx]).add_to(map3)
idx+=1
map3.save("map3.html")
return map3, massive, values, probs
def cluster2(usr1, border = 10, level=-1):
"""
Builds global mapfor the client with specified client on the specified layer
:type usr1: pandas dataFrame
:param usr1: dataframe slice with client's data
:type level: int
:param level: number of level on which to build map
:type border: numeric
:param border: radius of the circle
"""
massive = np.ndarray(shape=(1,1), dtype=object)
values = np.ndarray(shape=(1,1), dtype=float)
probs = np.ndarray(shape=(1,1), dtype=float)
last_t=0.0
VALUE = 10.0
BORDER_VALUE = border*10
NORM = l2_norm
for i in range(usr1.shape[0]):
dt = ' '.join(usr1.loc[i,['Time','Date']].values)
p = usr1.loc[i,['lat','lon']].values
if(i==0):
tok=True
else:
tok=False
last_t, massive, values, probs = update_mg(p, dt, last_t, massive, values, probs,
BORDER_VALUE,
norm=l2_norm,
tok=tok)
return massive[level], values[level], probs[level]
def cluster3(usr1, levels, border, re_c=True, aging=True, sq=True):
"""
Builds global map for the client with specified client
:type usr1: pandas dataFrame
:param usr1: dataframe slice with client's data
:type levels: int
:param levels: number of levels on the map
:type border: numeric
:param border: radius of the largest circle
:type aging: boolean
:param aging: enable aging or not
:type aging: boolean
:param aging: to enable aging or not
:type re_c: boolean
:param re_c: recount centers of the circles or not
"""
last_t=0
massive = np.ndarray(shape=(levels,1), dtype=object)
values = np.ndarray(shape=(levels,1), dtype=float)
probs = np.ndarray(shape=(levels,1), dtype=float)
VALUE = 10.0
BORDER_VALUE = border
NORM = l2_norm
for i in range(usr1.shape[0]):
dt = ' '.join(usr1.loc[i,['Time','Date']].values)
p = usr1.loc[i,['lat','lon']].values
if(i==0):
tok=True
else:
tok=False
last_t, massive, values, probs = update_mg(p, dt, last_t, massive, values, probs,
BORDER_VALUE,
norm=l2_norm,
tok=tok,
re_c=re_c, aging=aging)
return massive, values, probs
def low_level_transaction(lat,lon, mp, pbs, pogr=100):
"""
low level check of validity of the transaction based on history
:type lat: numeric
:param lat: latitude of the transaction
:type lon: numeric
:param lon: lontitude of the transaction
:type mp: string list
:param mp: list with centers of circles of the selected level
:type pbs: numeric list
:param pbs: list with probabilities corresponding to the circles
:type pogr: numeric
:param pogr:value used to count probability
"""
prob = 0.0
for i,j in zip(mp,pbs):
s = i.split(' ')
lt,ln = float(s[0]),float(s[1])
rng = l2_norm(lat, lon, lt, ln)
if(rng<1):
prob += j/np.log2(pogr)
else:
prob += j/rng
print(prob)
return prob
def transaction(data, lat,lon,time, date, uid, local=False):
"""
Checks full legitimicy of the transaction
:type data: pandas dataFrame
:param data: dataFrame withall the data
:type lat: numeric
:param lat: latitude of the transaction
:type lon: numeric
:param lon: lontitude of the transaction
:type time: string
:param time: time of the transaction
:type date: string
:param date: date of the transaction
:type uid: string
:param uid: index of the client
| |
only, to, training after the moment.
:param model:
:param optimizer:
:param train_queue:
:param valid_queue:
:param args:
:param search_space:
:return: res_dict, best_valids, best_tests from evlutionary search
"""
# merge the evaluation phase inside.
# optimizer = self.optimizer
# scheduler = self.scheduler
# model = self.parallel_model
# reset the random seed for reproducibility
# evaluate before the post-search phase.
if not self.args.evaluate_after_search:
# return 0, None
fitness_dict = self.evaluate(self.epoch, self._datasets[2], train_queue=self._datasets[0])
self.save_results(self.epoch, rank_details=True)
ep_k = [k for k in self.ranking_per_epoch.keys()][-1]
best_id = self.ranking_per_epoch[ep_k][-1][1].geno_id
return best_id, self.search_space.topologies[best_id]
logging.info(">=== Post Search Phase ====")
epoch = self.epoch
self.save_duplicate_arch_pool('valid', epoch)
tr, va, te = self.load_dataset(shuffle_test=False)
utils.torch_random_seed(self.args.seed)
tr, va, te = self.load_dataset(shuffle_test=True)
nb_batch_per_eval = self.args.evaluate_nb_batch
if self.args.neweval_num_train_batches > 0:
query_fn = partial(procedure_ops._query_model_with_train_further, nb_batch=nb_batch_per_eval,
model=self.parallel_model,
train_queue=tr, valid_queue=va, test_queue=te, policy=self)
logging.info("> Finetune before evaluation query_fn.")
else:
query_fn = partial(procedure_ops._query_model_by_eval,
nb_batch=nb_batch_per_eval,
model=self.parallel_model,
valid_queue=va, test_queue=te, search_space=self.search_space)
logging.info("> normal evaluation query_fn.")
logging.info("> Using sampler {}".format(self.args.evaluate_sampler))
if self.args.evaluate_sampler == 'random':
res = procedure_ops.run_random_search_over_cnn_search(self.search_space, self.args, query_fn)
elif self.args.evaluate_sampler == 'evolutionary':
res = procedure_ops.run_evolutionary_search_on_search_space(self.search_space, self.args, query_fn)
else:
raise NotImplementedError(f"self.args.evaluate_sampler {self.args.evaluate_sampler} not yet.")
search_res_path = self.args.main_path + '/' + self.args.evaluate_sampler + '_results.json'
search_arch_path = self.args.main_path + '/' + self.args.evaluate_sampler + '_archs.json'
# implement this outside the loop ...
# test if all is id
res_dict = res[0]
res_specs = res[-1]
mids = list(res_dict.keys())
est_perfs = [res_dict[i]['test_accuracy'] for i in mids]
if all(map(lambda x: isinstance(x, int), list(mids))):
utils.save_json(res, search_res_path)
# compute the kendall tau and do the best
gt_ranks = [self.search_space.rank_by_mid[i] for i in mids]
logging.info("Kendall tau of given search: {}".format(kendalltau(gt_ranks, est_perfs).correlation))
if self.args.neweval_num_train_batches > 0:
est_perfs_old = [res_dict[i]['test_accuracy_before'] for i in mids]
logging.info("Kendall tau of given search before finetune: {}".format(kendalltau(gt_ranks, est_perfs_old).correlation))
# post search top 5 results
top_K_indices = np.argsort(est_perfs)[::-1][:5]
best_ids = [mids[i] for i in top_K_indices]
best_specs = [self.search_space.topologies[i] for i in best_ids]
utils.save_json([best_ids, [str(s) for s in best_specs]], search_arch_path)
# let pick 5 best models using res.
else:
# with new architecture, we just simply return the top 5 by the validation accuracy...
top_K_indices = np.argsort(est_perfs)[::-1][:5]
best_ids = [mids[i] if isinstance(mids[i], int) else -1 for i in top_K_indices]
best_specs = [res_specs[mids[i]] for i in top_K_indices]
utils.save_json([best_ids, [str(s) for s in best_specs]], search_arch_path)
return best_ids, best_specs
###########################
### Utility functions
###########################
def cpu(self):
self.model = self.model.cpu()
def initialize_search_space(self):
""" This function should align with dataset """
if 'nasbench201' in self.args.search_space:
# NB201 search space
self.search_space = search_space.NASBench201SearchSpace(self.args)
self.model_fn = self.search_space.model_fn
elif 'darts_nds' in self.args.search_space:
self.search_space = search_space.DARTSSearchSpaceNDS(self.args)
self.model_fn = self.search_space.model_fn
elif 'nasbench101' in self.args.search_space:
from utils import DictAttr, wrap_config
# from nasws.cnn.search_space.nasbench101 import nasbench_build_config
from nasws.cnn.search_space.nasbench101.lib.config import build_config as nasbench_build_config
nasbench_search_config = DictAttr(nasbench_build_config())
wrap_config(nasbench_search_config, 'nasbench', args=self.args,
keys=[
'module_vertices',
# add more if necessary.
])
self.args.nasbench_config = nasbench_search_config
if 'fixchannel' in self.args.search_space:
self.search_space = search_space.NasBenchSearchSpaceFixChannels(self.args)
else:
self.search_space = search_space.NASbenchSearchSpace(self.args)
self.model_fn = self.search_space.model_fn
else:
raise NotImplementedError(f'initialize search space not supported for {self.args.search_space}')
self._change_model_fn = self.search_space.change_model_spec
def initialize_run(self, sub_dir_path=None):
tr, va, te = self.load_dataset()
logging.info('Creating the loss function with CrossEntropy')
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
if self.args.label_smooth > 0:
self._eval_loss = criterion
logging.info(f'Label smoothing enabled with {self.args.label_smooth}')
criterion = CrossEntropyLabelSmooth(self.num_classes, self.args.label_smooth)
criterion = criterion.cuda()
self._loss = criterion
return tr, va, te, criterion
def initialize_model(self, resume=True):
"""
Initialize model, may change across different model.
:return:
"""
args = self.args
model = self.model_fn(args)
num_gpus = torch.cuda.device_count()
if args.apex_enable:
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = num_gpus
args.world_size = 1
args.learning_rate = args.learning_rate*float(args.batch_size*args.world_size)/1024
model = model.cuda().to(memory_format=torch.contiguous_format)
else:
if num_gpus > 0:
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
logging.info("Creating SGD : init_lr {} weight_decay {}, momentum {}".format(
args.learning_rate, args.momentum, args.weight_decay))
elif args.optimizer == 'rmsprop':
from nasws.cnn.search_space.nasbench101.optimizer import RMSprop as RMSpropTF
optimizer = RMSpropTF(
model.parameters(),
args.learning_rate,
eps=1.0,
weight_decay=args.weight_decay,
momentum=args.momentum,
)
logging.info("Creating RMSProp : init_lr {} weight_decay {}, momentum {}".format(
args.learning_rate, args.momentum, args.weight_decay))
elif args.optimizer == 'adam':
raise ValueError("todo later")
else:
raise NotImplementedError('optimizer not supported...')
if args.apex_enable:
import apex.amp as amp
model, optimizer = amp.initialize(
model, optimizer,
opt_level='O1', # official mixed precision
# keep_batchnorm_fp32=True, # bn 32 to accelarate further.
loss_scale=None) # do not scale
args.apex_opt_level='O1'
self.amp = amp
self.model = model # this is a normal cpu model...
self.parallel_model = nn.DataParallel(model) if num_gpus > 1 else model
lr_epochs = args.epochs if args.epochs_lr <= 0 else args.epochs_lr
logging.info(f'Creating learning rate scheduler: {args.learning_rate_scheduler} with max epoch {lr_epochs}')
if args.learning_rate_scheduler == 'cosine':
# scheduler as Cosine.
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(lr_epochs), eta_min=args.learning_rate_min)
elif args.learning_rate_scheduler == 'cosinewarm':
# scheduler as Cosine With Warmup
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer, int(lr_epochs / 2), eta_min=args.learning_rate_min
)
elif args.learning_rate_scheduler == 'cosineimagenet':
from nasws.cnn.operations.lr_scheduler import CosineWarmupImagenetScheduler
scheduler = CosineWarmupImagenetScheduler(
optimizer, args.supernet_warmup_epoch, float(lr_epochs), eta_min=args.learning_rate_min
)
elif args.learning_rate_scheduler == 'step':
# step wise lr setting
# scheduler = torch.optim.lr_scheduler.StepLR(
# optimizer, step_size=int(args.epochs * 0.6 / 3), gamma=0.1, last_epoch=int(args.epochs * 0.7)
# )
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(lr_epochs * i) for i in [0.3, 0.6, 1.0]], gamma=0.1)
elif args.learning_rate_scheduler == 'constant':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.epochs, gamma=1.0)
else:
raise ValueError("LR Scheduler {} not supported.".format(args.learning_rate_scheduler))
self.optimizer = optimizer
self.scheduler = scheduler
# here it is, but lets try to reload this.
if args.resume and resume:
self.resume_from_checkpoint()
return self.parallel_model, optimizer, scheduler
def load_dataset(self, shuffle_test=False):
if self._datasets:
return self._datasets
args = self.args
if args.dataset == 'cifar10':
train_queue, valid_queue, test_queue = load_supernet_cifar10(args, shuffle_test)
self.num_classes = 10
elif args.dataset == 'imagenet':
# train_queue, valid_queue, test_queue = load_supernet_cifar10(args, shuffle_test, debug_imgnet=True)
train_queue, valid_queue, test_queue = load_supernet_imagenet(args)
self.num_classes = 1000
else:
raise NotImplementedError("Temporary not used.")
self._datasets = train_queue, valid_queue, test_queue
return self._datasets
@staticmethod
def next_batches(dataloader, num_batches):
queue = []
_batch_count = 0
for data in dataloader:
_batch_count += 1
queue.append(data)
if _batch_count > num_batches:
# process iteration
break
return queue
def op_sampler(self, model, architect, args):
return self.search_space.op_sampler(model, architect, args)
def random_sampler(self, model, architect, args):
return self.search_space.random_sampler(model, architect, args)
@staticmethod
def _compute_kendall_tau(ranking_per_epoch, compute_across_time=False):
"""
Compute Kendall tau given the ranking per epochs.
:param ranking_per_epoch:
:param compute_across_time: True for ranking-per-epoch always fixed, False for dynamic list of models.
:return: kd_tau dict{epoch_key: KendallTau}
"""
if compute_across_time:
# Compute Kendall tau for every epochs and save them into result.
epoch_keys = [k for k in reversed(ranking_per_epoch.keys())]
epoch_keys.insert(0, 10000000)
kd_tau = {}
for ind, k in enumerate(epoch_keys[:-1]):
elem = []
if ind == 0:
# Sort the ground-truth ranking
p = sorted([elem[1] for elem in ranking_per_epoch[epoch_keys[ind + 1]]], key=itemgetter(3))
rank_1 = np.array([elem.geno_id for elem in p], dtype=np.uint)
else:
rank_1 = np.array([elem[1].geno_id for elem in ranking_per_epoch[k]], dtype=np.uint)
for j in epoch_keys[ind + 1:]:
rank_2 = np.array([elem[1].geno_id for elem in ranking_per_epoch[j]], dtype=np.uint)
elem.append(kendalltau(rank_1, rank_2))
kd_tau[k] = elem
logging.info("Latest Kendall Tau (ground-truth vs {}): {}".format(epoch_keys[1], kd_tau[10000000][0]))
return kd_tau, kd_tau[10000000][0].correlation
else:
# Dynamic ranking per epoch size, thus only compute the KDT against the final ranking.
epoch_keys = [k for k in reversed(ranking_per_epoch.keys())]
kd_tau = {}
# only sort across the ground-truth
for ind, k in enumerate(epoch_keys):
p = sorted([elem[1] for elem in ranking_per_epoch[k]], key=itemgetter(3))
rank_gt = np.array([elem.geno_id for elem in p], dtype=np.uint)
rank_2 = np.array([elem[1].geno_id for elem in ranking_per_epoch[k]], dtype=np.uint)
kd_tau[k] = kendalltau(rank_gt, rank_2)
# IPython.embed(header='check kendall tau computation again for DARTS search space')
kd_tau[10000000] = kd_tau[epoch_keys[0]]
logging.info("Latest Kendall Tau (ground-truth vs {}): {}".format(epoch_keys[0], kd_tau[epoch_keys[0]][0]))
return kd_tau, kd_tau[epoch_keys[0]][0]
def _save_ranking_results(self, save_data, epoch,
prefix=None,
compute_kdt_before=False,
sparse_kdt=True, sparse_kdt_threshold=1e-3,
percentile=True, percentile_top_K=(3, 5, 10, 20),
random_kdt=True, random_kdt_numrepeat=5, random_kdt_num_archs=(10, 20, 50, 100)):
"""
Save the ranking results if necessary.
13.09.2019: Adding the sparse kendall tau, percentile, random kendall tau.
:param save_data:
:param epoch:
:param prefix: Prefix to the tensorboard scalar.
:param compute_kdt_before: True to compute the kendall tau for additional evaluation approachs.
:param sparse_kdt: True to compute the sparse kendall tau based on the GT accuracy.
:param percentile: True to compute the top K model percentile.
:param percentile_top_K: Number of top K architectures for percentile.
:param random_kdt: True to compute the random K architectures's kendall tau.
:param random_kdt_numrepeat: Number of repeated times for this random | |
<reponame>mkelcb/knet<filename>knet/com/application/logic/knet/knet_manager.py
# -*- coding: utf-8 -*-
#MIT License
#Copyright (c) 2017 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from ....application.utils import plotgen
from ....application.utils import geno_qc
from ....application.logic.knet import knet_main
from ....io import knet_IO
import gc
import numpy as np
from numpy.linalg import norm
from scipy import stats
from pathlib import Path
import random
import os
lastLayerSize_MAX = int(1000) # int(4096 /2)
# delta = (Ve/Vg)
# delta = (1-h2) / h2
#args, args.epochs, args.learnRate, args.momentum, args.evalFreq, args.savFreq, args.predictPheno, args.loadWeights, args.saveWeights, args.randomSeed, args.hidCount, args.hidl2, args.hidAct
# V(G) 0.168545 0.004763
#V(e) 0.006826 0.002168
def addActivation(myNet, hidAct):
if hidAct == 1 : H_Act = knet_main.knSigmoid( myNet)
elif hidAct == 2 : H_Act = knet_main.knRELU( myNet)
elif hidAct == 3 : print("no activatioN")
elif hidAct == 5 : H_Act = knet_main.knLeakyRELU( myNet)
else : H_Act = knet_main.knSoftplus( myNet)
def getNetworkStructure(myNet) :
layernum = 0
for layerIndex in range(0,len(myNet.layers)) :
layer = myNet.layers[layerIndex]
if type(layer) == knet_main.knnLayer: # for non input types, we have
if layer.Weights_W is not None :
layernum += 1
print("layer " + str(layernum) + " has weight matrix shaped: " + str(layer.Weights_W.shape))
def runKnet(args) :
hLayerCount = args.hidCount
hiddenShrinkage = args.hidl2
# default QC settings used for all non AMBLUP versions
_minObserved = 0.95
_minMAF = 0.01
_minVariance = 0.02
# load plink binary / phenotypes want to load them here, so that we can release the memory once the raw data is no longer used
cc = True
if args.cc == 0 : cc = False
recodecc = True
if args.recodecc == 0 : recodecc = False
genotypeData = knet_IO.loadPLINK(args.knet, loadPhenos = False)
M = genotypeData["M"]
irsIds = genotypeData["rsid"]
IDs = genotypeData["IDs"]
indicesKept = np.asarray( range(M.shape[1]) )
del genotypeData ; gc.collect() # dont need this
y = knet_IO.loadPLINKPheno(args.pheno, caseControl = cc, recodeCaseControl = recodecc)
y = stats.zscore(y) # zscore it so that Beta -> h2 computations work
# if we have a validation set
M_validation = None
y_validation = None
if args.validSet :
genotypeData = knet_IO.loadPLINK(args.validSet, loadPhenos = False, replaceMissing = True) # want to replace -1 with 0s, as we otherwise would have -1s, as later we just delete indices that failed QC for the training set, but won't care for individual missing datas
M_validation = genotypeData["M"]
IDs_validation = genotypeData["IDs"]
print("Loaded number of people for validatin: ", len(M_validation), flush=True )
del genotypeData ; gc.collect() # dont need this
if args.validPhen :
y_validation = knet_IO.loadPLINKPheno(args.validPhen, caseControl = cc, recodeCaseControl = recodecc)
y_validation = stats.zscore(y_validation) # zscore it so that Beta -> h2 computations work
if args.inference == 0 :
# 1. standardise data
if args.qc == 1 :
qc_data = geno_qc.genoQC_all(M, rsIds = irsIds, minObserved = _minObserved, minMAF = _minMAF, minVariance = _minVariance) # we MUST perform QC with the EXACT SAME settings as the 'region scanner' otherwise the region coordinates will be mismatched
#M = qc_data["X"]
rsIds_qc = qc_data["rsIds"] # save away the surviving SNP list that we have used
indicesToRemove = qc_data["indicesToRemove"]
indicesKept = qc_data["indicesKept"]
irsIds = rsIds_qc.tolist()
del qc_data; gc.collect() # overwrite
qc_data = geno_qc.removeList(M, indicesToRemove)
M = qc_data["X"]
del qc_data; gc.collect() # overwrite
else : print("Skipping internal QC", flush=True)
M, mns, sstd = geno_qc.standardise_Genotypes(M) ; gc.collect()
print("After standardising, training data in MBs is: ",geno_qc.getSizeInMBs(M) )
else :
print("Inference data QC", flush=True)
if args.snpIndices is not None :
indicesToKeep = knet_IO.loadIndices(args.snpIndices)
M = M[:,indicesToKeep]
mns = knet_IO.loadVectorFromDisk( args.mns , 'float32') # these are always float32 even in 64 runs
sstd = knet_IO.loadVectorFromDisk( args.sstd , 'float32')
snpIDs = knet_IO.loadsnpIDs(args.snpIDs)
M = M.astype('float32')
M -= mns
M /= sstd
# load final list of RSids
# load mean /SDs
#M = geno_qc.standardise_Genotypes(M) ; gc.collect()
#print("After standardising, training data in MBs is: ",geno_qc.getSizeInMBs(M) )
# get Zscores: have to standardise ONLY over the training, and not the training+ validation together: https://blog.slavv.com/37-reasons-why-your-neural-network-is-not-working-4020854bd607
# will have to implement this for genetic data
if M_validation is not None :
if args.qc == 1 :
# depending on if we are in inference mode, make sure we have the same set of SNPs
if args.inference == 0 : M_validation = np.delete(M_validation, indicesToRemove, axis=1)
else : M_validation = M_validation[:,indicesToKeep]
#qc_data = geno_qc.removeList(M_validation, indicesToRemove)
M_validation = M_validation.astype('float32')
M_validation -= mns
M_validation /= sstd
indices_validation = np.asarray( range(len(M_validation)) ) # is used for storting
print("After standardising, validation data in MBs is: ",geno_qc.getSizeInMBs(M_validation) )
# Pre-process data:
evalTrainResults = True
BNEnabled = int(args.bnorm) == 1
decay_Enabled = int(args.lr_decay) == 1
# Shuffle data before producing the minibatches to avoid having all-case or all-control minibatches
np.random.seed(args.randomSeed)
random.seed(args.randomSeed)
indices = np.asarray( range(len(M)) ) # is used for storting
random.shuffle(indices)
M = M[indices]
y = y[indices]
IDs[0] = np.array(IDs[0])
IDs[1] = np.array(IDs[1])
IDs[0] = IDs[0][indices]
IDs[1] = IDs[1][indices]
# reshape data to be the right dimensions for Convolutions
if args.convLayers > 0 :
M = M.reshape(M.shape[0], 1 , 1, M.shape[1])
if M_validation is not None :
M_validation = M_validation.reshape(M_validation.shape[0], 1 , 1, M_validation.shape[1])
# 2. create minibatch list
numIndividuals = M.shape[0]
numSNPs = M.shape[1] # numSNPs = bed.get_nb_markers(), as we may have removed SNPs, we want to know how many are left
len_M = len(M)
len_M_validation = 0
train_GWAS = list()
train_y = list()
minibatch_size = args.batch_size #M.shape[0] # 64
if args.batch_size == 0 : minibatch_size = len(M)
num_batches = len(M) // minibatch_size
# scale the delta by minibatch_size, if we dont have minibatches
ratio = float(minibatch_size) / numIndividuals # this is 1 if there are no minibatches
print("orig L2 Regularizer : " + str(hiddenShrinkage) + " minibatches scaled to " + str(hiddenShrinkage * ratio) )
hiddenShrinkage *= ratio
start = 0
end = minibatch_size
# for i in range(num_batches) :
# train_GWAS.append(M[start:end] )
# train_y.append(y[start:end])
# print("adding batch " + str(i) + " , start/end: " + str(start) + "/" + str(end) )
# start = end
# end += minibatch_size
y_batched = y.copy()
# do this in a more RAM efficient way: keep deleting the bits from the original matrix to free up space as we go along otherwise this step would double the RAM requirements temporarily
for i in range(num_batches) :
train_GWAS.append(M[0:minibatch_size] )
M = M[minibatch_size:len(M)]
train_y.append(y_batched[0:minibatch_size])
y_batched = y_batched[minibatch_size:len(y_batched)]
print("adding batch " + str(i) + ", minibatch size: " + str(minibatch_size) + " / num left in pool: " + str(len(M)) )
gc.collect()
print("train_GWAS[0].shape: " + str( train_GWAS[0].shape) + " // train_y.shape: " + str( train_y[0].shape) )
del M; gc.collect() # free up memory
if M_validation is not None :
len_M_validation = len(M_validation)
if args.batch_size == 0 : minibatch_size = len(M_validation)
test_GWAS = list()
test_y = list()
evalResults = True
num_batches = len(M_validation) // minibatch_size
print("len_M_validation is: " + str(len_M_validation) + ", minibatch size: " + str(minibatch_size) + " args.batch_size: " + str(args.batch_size) + " num_batches is: " + str(num_batches))
start = 0
end = minibatch_size
for i in | |
<gh_stars>0
# Copyright (c) 2010-2011 Lazy 8 Studios, LLC.
# All rights reserved.
import hashlib, pkg_resources
from mako import lookup
from front import models
from front.lib import db, xjson, urls, gametime
from front.backend import deferred
from front.data import load_json, schemas, assets
from front.models import chips, region
from front.callbacks import run_callback, MISSION_CB
import logging
logger = logging.getLogger(__name__)
# The path relative to this package where the mission data is stored.
MISSION_DEFINITIONS = pkg_resources.resource_filename('front', 'data/mission_definitions.json')
# Template cache.
_template_lookup = lookup.TemplateLookup(input_encoding='utf-8', output_encoding='utf-8')
# Fields in speciesList which support Mako templating.
TEMPLATE_FIELDS = ['title', 'summary', 'description']
def add_mission(ctx, user, mission_definition, mission_parent=None, **kwargs):
"""
Create a new Mission Model object for the given user.
Note: specifics is for configuring generic missions for the user's specific needs, and is serialized
JSON stored as a string in the database. The specifics_hash is populated with the hash of the specifics
as the name implies; it's there so that the user can't do the exact same mission twice.
The mission_id is the combination of the mission_definition and the specifics, with the parent_id
having a similar formulation.
NOTE: If the given mission_definition has already been added to this user, then this function will log
a warning and return None indicating the mission already existed. This behavior exists so that if the ordering
of when missions are added is changed on the live system to reflect for instance a change in the story, then
if a user had already received a mission in the previous version of the story it will not raise an exception here,
hopefully allowing a smoother migration experience for existing users to the new story version.
:param ctx: The database context.
:param user: The User who will own this Mission.
:param mission_definition: str The key which identifies this mission type.
Defined in mission_definitions.json.
:param mission_parent: Mission The optional Mission instance which is the parent of this Mission.
:param kwargs: dict All other keyword arguments will be passed through to the creation_hook for this
Mission's mission_callbacks callback. These might be useful when creating the mission specifics.
"""
# As we change the story script, sometimes we change the order when a mission is being added
# to the game. This guard is intended to make that migration more smooth. NOTE: It is critical
# that a given MIS_ key always refers to the same 'mission concept'.
if user.missions.get_only_by_definition(mission_definition) is not None:
logger.warning("Refusing to add exising mission_definition to user. [%s][%s]", mission_definition, user.user_id)
return None
# Determine the md5 hashes of the mission specifics for this Mission instance by running
# the create_specifics callback for the mission code associated with this mission_definition.
specifics = run_callback(MISSION_CB, "create_specifics", mission_definition, ctx=ctx, user=user, **kwargs)
dumped_specifics = xjson.dumps(specifics)
md5 = hashlib.md5()
md5.update(dumped_specifics)
specifics_hash = md5.hexdigest()
if mission_parent:
parent_hash = mission_parent.specifics_hash
else:
parent_hash = ''
params = {}
params['mission_definition'] = mission_definition
params['specifics'] = dumped_specifics
params['specifics_hash'] = specifics_hash
params['parent_hash'] = parent_hash
params['done'] = 0
params['done_at'] = None
params['started_at'] = user.epoch_now
params['viewed_at'] = None
# We need to snapshot the existing list of regions for this user as once the
# new mission is added to the gamestate it will potentially influence the list
# of regions, which will potentially make the check if it is a new region later
# in this factory function fail.
current_user_regions = user.regions.keys()
with db.conn(ctx) as ctx:
db.run(ctx, "insert_mission", user_id=user.user_id, created=gametime.now(), **params)
new_mission = user.missions.create_child(mission_parent=mission_parent, user=user, **params)
# If we have a parent, add ourselves to the hierarchy.
if mission_parent != None:
mission_parent.parts.append(new_mission)
# Issue ADD chips for any regions defined/available for this mission at creation time.
for region_id, constructor_args in new_mission.region_list_callback():
# Adding a region more than once may cause inconsistencies with ADD/DELETE chips.
assert region_id not in current_user_regions
region.add_region_to_user(ctx, user, region_id, **constructor_args)
current_user_regions.append(region_id)
# Issue the mission ADD chip.
new_mission.send_chips(ctx, user)
# If this mission has any children, add them now.
child_parts = run_callback(MISSION_CB, "create_parts", mission_definition, mission=new_mission)
for child_mission_def in child_parts:
add_mission(ctx, user, child_mission_def, mission_parent=new_mission, **kwargs)
# Trigger the was_created callback.
run_callback(MISSION_CB, "was_created", mission_definition, ctx=ctx, user=user, mission=new_mission)
return new_mission
class Mission(chips.Model, models.UserChild):
# These fields come from the mission definitions JSON file.
DEFINITION_FIELDS = frozenset(['title', 'summary', 'description', 'done_notice', 'parent_definition', 'type', 'sort',
'title_icon', 'description_icon'])
id_field = 'mission_id'
fields = frozenset(['mission_definition', 'done', 'done_at', 'specifics', 'specifics_hash', 'started_at', 'viewed_at',
'region_ids', 'parent_id', 'parent_hash', 'mission_parent', 'parts']).union(DEFINITION_FIELDS)
# The list of region_ids being provided to the gamestate by the mission, in its current
# done/not done state. Provided to the gamestate in the 'region_ids' field.
# Note that because LazyFields values are cached, if the 'done' state of this mission changes
# the value of region_ids might need refreshing.
region_ids = chips.LazyField("region_ids", lambda m: m._region_list_ids())
computed_fields = {
'started_at_date': models.EpochDatetimeField('started_at'),
'viewed_at_date': models.EpochDatetimeField('viewed_at'),
}
server_only_fields = frozenset(['parts', 'parent_hash', 'specifics_hash', 'mission_parent'])
unmanaged_fields = frozenset(['mission_parent'])
# user_id, created and updated are database only fields.
def __init__(self, mission_definition, parent_hash, done, specifics, specifics_hash, user,
mission_parent=None, user_id=None, created=None, updated=None, **params):
# Populate the fields which come from the mission definition.
definition = get_mission_definition(mission_definition)
for field in self.DEFINITION_FIELDS:
params[field] = definition.get(field, None)
# This comes from the mission definition file.
parent_definition = params.get('parent_definition')
if mission_parent is not None:
params['parent_id'] = mission_parent.mission_id
elif parent_definition is not None and parent_hash is not None:
params['parent_id'] = "%s-%s" % (parent_definition, parent_hash)
else:
params['parent_id'] = None
params['parts'] = []
# Convert the specifics hash which is stored in the DB as serialized JSON back into
# a Python dict.
specifics = xjson.loads(specifics)
# Construct the mission id which is the definition name and the md5 hash of the specifics.
mission_id = make_mission_id(mission_definition, specifics_hash)
# Render the title, summary and description fields.
params['title'] = _render_template(mission_definition, 'title', {'user': user})
if params['summary'] is not None:
params['summary'] = _render_template(mission_definition, 'summary', {'user': user})
if params['description'] is not None:
params['description'] = _render_template(mission_definition, 'description', {'user': user})
super(Mission, self).__init__(mission_id=mission_id, mission_definition=mission_definition,
mission_parent = mission_parent, parent_hash=parent_hash, done=done,
specifics=specifics, specifics_hash=specifics_hash, **params)
@property
def user(self):
# self.parent is user.missions, the parent of that is the User itself
return self.parent.parent
def is_root_mission(self):
""" Returns True if this mission is a 'root' mission, either a childless single mission or the parent
mission for one or more children.
NOTE: This is not called is_root as that method already exists in chips.Model """
return self.parent_id == None
def is_done(self):
return self.done == 1
def was_viewed(self):
return self.viewed_at != None
@property
def url_title_icon(self):
definition = assets.mission_icon_definition(self.title_icon)
return definition['done'] if self.is_done() else definition['active']
@property
def url_description_icon(self):
definition = assets.mission_icon_definition(self.description_icon)
return definition['done'] if self.is_done() else definition['active']
def next_step(self):
""" Return the next step/sibling mission to this mission. This asserts that this
mission has a parent. If there is no next step, None is returned. """
assert self.mission_parent != None # We should have a parent.
# Will raise ValueError if somehow this mission is not known to the parent.
index = self.mission_parent.parts.index(self)
# We are the first step, there is not previous.
if index == len(self.mission_parent.parts) - 1:
return None
else:
return self.mission_parent.parts[index + 1]
def previous_step(self):
""" Return the previous step/sibling mission to this mission. This asserts that this
mission has a parent. If there is no previous step, None is returned. """
assert self.mission_parent != None # We should have a parent.
# Will raise ValueError if somehow this mission is not known to the parent.
index = self.mission_parent.parts.index(self)
# We are the first step, there is not previous.
if index == 0:
return None
else:
return self.mission_parent.parts[index - 1]
def siblings(self):
""" Return the array of all other siblings, not including self. """
assert self.mission_parent != None # We should have a parent.
return [part for part in self.mission_parent.parts if part != self]
def done_siblings(self):
""" Return the array of done siblings, not including self. """
assert self.mission_parent != None # We should have a parent.
return [part for part in self.mission_parent.parts if part != self and part.is_done()]
def mark_done(self):
# As we change the story script, sometimes we change the order when a mission is being marked done
# in the game. This | |
<gh_stars>10-100
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" Attribute interface """
import radical.utils as ru
import radical.utils.signatures as rus
from . import exceptions as se
# ------------------------------------------------------------------------------
import datetime
import datetime
import traceback
import inspect
import string
import copy
import re
from pprint import pprint
# FIXME: add a tagging 'Monitorable' interface, which enables callbacks.
now = datetime.datetime.now
never = datetime.datetime.min
# ------------------------------------------------------------------------------
#
# define a couple of constants for the attribute API, mostly for registering
# attributes.
#
# type enums
ANY = 'any' # any python type can be set
URL = 'url' # URL type (string + URL parser checks)
INT = 'int' # Integer type
FLOAT = 'float' # float type
STRING = 'string' # string, duh!
BOOL = 'bool' # True or False or Maybe
ENUM = 'enum' # value is any one of a list of candidates
TIME = 'time' # seconds since epoch, or any py time thing
# which can be converted into such
# FIXME: conversion not implemented
# mode enums
WRITEABLE = 'writeable' # the consumer of the interface can change
# the attrib value
READONLY = 'readonly' # the consumer of the interface can not
# change the attrib value. The
# implementation can still change it.
FINAL = 'final' # neither consumer nor implementation can
# change the value anymore
ALIAS = 'alias' # variable is deprecated, and alias'ed to
# a different variable.
# attrib extensions
EXTENDED = 'extended' # attribute added as extension
PRIVATE = 'private' # attribute added as private
# flavor enums
SCALAR = 'scalar' # the attribute value is a single data element
DICT = 'dict' # the attribute value is a dict of data elements
VECTOR = 'vector' # the attribute value is a list of data elements
# ------------------------------------------------------------------------------
#
# Callback (Abstract) Class
#
class Callback () :
"""
Callback base class.
All objects using the Attribute Interface allow to register a callback for
any changes of its attributes, such as 'state' and 'state_detail'. Those
callbacks can be python call'ables, or derivates of this callback base
class. Instances which inherit this base class MUST implement (overload)
the cb() method.
The callable, or the callback's cb() method is what is invoked whenever the
SAGA implementation is notified of an change on the monitored object's
attribute.
The cb instance receives three parameters upon invocation:
- obj: the watched object instance
- key: the watched attribute (e.g. 'state' or 'state_detail')
- val: the new value of the watched attribute
If the callback returns 'True', it will remain registered after invocation,
to monitor the attribute for the next subsequent state change. On returning
'False' (or nothing), the callback will not be called again.
To register a callback on a object instance, use::
class MyCallback (saga.Callback):
def __init__ (self):
pass
def cb (self, obj, key, val) :
print(" %s\\n %s (%s) : %s" % self._msg, obj, key, val)
jd = saga.job.Description ()
jd.executable = "/bin/date"
js = saga.job.Service ("fork://localhost/")
job = js.create_job(jd)
cb = MyCallback()
job.add_callback(saga.STATE, cb)
job.run()
See documentation of the :class:`saga.Attribute` interface for further
details and examples.
"""
def __call__ (self, obj, key, val) :
return self.cb (obj, key, val)
def cb (self, obj, key, val) :
""" This is the method that needs to be implemented by the application
Keyword arguments::
obj: the watched object instance
key: the watched attribute
val: the new value of the watched attribute
Return::
keep: bool, signals to keep (True) or remove (False) the callback
after invocation
Callback invocation MAY (and in general will) happen in a separate
thread -- so the application need to make sure that the callback
code is thread-safe.
The boolean return value is used to signal if the callback should
continue to listen for events (return True) , or if it rather should
get unregistered after this invocation (return False).
"""
pass
# ------------------------------------------------------------------------------
#
class _AttributesBase (object) :
"""
This class only exists to host properties -- as object itself does *not* have
properties! This class is not part of the public attribute API.
"""
# --------------------------------------------------------------------------
#
@rus.takes ('_AttributesBase')
@rus.returns (rus.nothing)
def __init__ (self) :
pass
# ------------------------------------------------------------------------------
#
class Attributes (_AttributesBase, ru.DictMixin) :
"""
Attribute Interface Class
The Attributes interface implements the attribute semantics of the SAGA Core
API specification (http://ogf.org/documents/GFD.90.pdf). Additionally, this
implementation provides that semantics the python property interface. Note
that a *single* set of attributes is internally managed, no matter what
interface is used for access.
A class which uses this interface can internally specify which attributes
can be set, and what type they have. Also, default values can be specified,
and the class provides a rudimentary support for converting scalar
attributes into vector attributes and back.
Also, the consumer of this API can register callbacks, which get triggered
on changes to specific attribute values.
Example use case::
# --------------------------------------------------------------------------------
class Transliterator ( saga.Attributes ) :
def __init__ (self, *args, **kwargs) :
# setting attribs to non-extensible will cause the cal to init below to
# complain if attributes are specified. Default is extensible.
# self._attributes_extensible (False)
# pass args to base class init (requires 'extensible')
super (Transliterator, self).__init__ (*args, **kwargs)
# setup class attribs
self._attributes_register ('apple', 'Appel', URL, SCALAR, WRITEABLE)
self._attributes_register ('plum', 'Pruim', STRING, SCALAR, READONLY)
# setting attribs to non-extensible at *this* point will have allowed
# custom user attribs on __init__ time (via args), but will then forbid
# any additional custom attributes.
# self._attributes_extensible (False)
# --------------------------------------------------------------------------------
if __name__ == "__main__":
# define a callback method. This callback can get registered for
# attribute changes later.
# ----------------------------------------------------------------------------
def cb (key, val, obj) :
# the callback gets information about what attribute was changed
# on what object:
print("called: %s - %s - %s" % (key, str(val), type (obj)))
# returning True will keep the callback registered for further
# attribute changes.
return True
# ----------------------------------------------------------------------------
# create a class instance and add a 'cherry' attribute/value on
# creation.
trans = Transliterator (cherry='Kersche')
# use the property interface to mess with the pre-defined
# 'apple' attribute
print("\\n -- apple")
print(trans.apple)
trans.apple = 'Abbel'
print(trans.apple)
# add our callback to the apple attribute, and trigger some changes.
# Note that the callback is also triggered when the attribute's
# value changes w/o user control, e.g. by some internal state
# changes.
trans.add_callback ('apple', cb)
trans.apple = 'Apfel'
# Setting an attribute final is actually an internal method, used by
# the implementation to signal that no further changes on that
# attribute are expected. We use that here for demonstrating the
# concept though. Callback is invoked on set_final().
trans._attributes_set_final ('apple')
trans.apple = 'Abbel'
print(trans.apple)
# mess around with the 'plum' attribute, which was marked as
# ReadOnly on registration time.
print("\\n -- plum")
print(trans.plum)
# trans.plum = 'Pflaume' # raises readonly exception
# trans['plum'] = 'Pflaume' # raises readonly exception
print(trans.plum)
# check if the 'cherry' attribute exists, which got created on
# instantiation time.
print("\\n -- cherry")
print(trans.cherry)
# as we have 'extensible' set, we can add a attribute on the fly,
# via either the property interface, or via the GFD.90 API of
# course.
print("\\n -- peach")
print(trans.peach)
trans.peach = 'Birne'
print(trans.peach)
This example will result in::
-- apple
Appel
Appel
Abbel
called: apple - Abbel Appel - <class '__main__.Transliterator'>
called: apple - Apfel - <class '__main__.Transliterator'>
called: apple - Apfel - <class '__main__.Transliterator'>
Apfel
-- plum
Pruim
Pruim
-- cherry
Kersche
-- peach
Berne
Birne
"""
# internally used constants to distinguish API from adaptor calls
_UP = '_up'
_DOWN = '_down'
# two regexes for converting CamelCase into under_score_casing, as static
# class vars to avoid frequent recompilation
_camel_case_regex_1 = re.compile('(.)([A-Z][a-z]+)')
_camel_case_regex_2 = re.compile('([a-z0-9])([A-Z])')
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
rus.anything,
rus.anything)
@rus.returns (rus.nothing)
def __init__ (self, *args, **kwargs) :
"""
This method is not supposed to be directly called by the consumer of
this API -- it should be called via derived object construction.
_attributes_t_init makes sure that the basic structures are in place | |
= int(round_up_to_even(number_finger / (fan_factor ** i)) / 2)
dp_num_tot = dp_num_tot + dp_num_half_fan[2 * i] + dp_num_half_fan[2 * i + 1]
dp_num_half_fan.sort()
for i in range(number_fan * 2):
if (i == 0):
dp_index_first_half_fan[i] = 0
dp_index_end_half_fan[i] = dp_num_half_fan[i] - 1
else:
dp_index_first_half_fan[i] = dp_index_first_half_fan[i - 1] + dp_num_half_fan[i - 1]
dp_index_end_half_fan[i] = dp_index_end_half_fan[i - 1] + dp_num_half_fan[i]
for i in range(number_fan):
dp_index_first_half_fan_odd[i] = dp_index_first_half_fan[2 * i]
dp_index_first_half_fan_even[i] = dp_index_first_half_fan[2 * i + 1]
al_num_half_fan = ['' for i in range(number_fan * 2)]
al_index_first_half_fan = ['' for i in range(number_fan * 2)]
al_index_end_half_fan = ['' for i in range(number_fan * 2)]
al_index_first_half_fan_odd = ['' for i in range(number_fan)]
al_index_first_half_fan_even = ['' for i in range(number_fan)]
al_num_tot = 0
for i in range(number_fan):
if (i == 0):
al_num_half_fan[i] = int(number_finger_al / 2)
al_num_half_fan[i + 1] = int(number_finger_al / 2)
else:
al_num_half_fan[2 * i] = int(round_up_to_even(number_finger_al / (fan_factor ** i)) / 2)
al_num_half_fan[2 * i + 1] = int(round_up_to_even(number_finger_al / (fan_factor ** i)) / 2)
al_num_tot = al_num_tot + al_num_half_fan[2 * i] + al_num_half_fan[2 * i + 1]
al_num_half_fan.sort()
for i in range(number_fan * 2):
if (i == 0):
al_index_first_half_fan[i] = 0
al_index_end_half_fan[i] = al_num_half_fan[i] - 1
else:
al_index_first_half_fan[i] = al_index_first_half_fan[i - 1] + al_num_half_fan[i - 1]
al_index_end_half_fan[i] = al_index_end_half_fan[i - 1] + al_num_half_fan[i]
for i in range(number_fan):
al_index_first_half_fan_odd[i] = al_index_first_half_fan[2 * i]
al_index_first_half_fan_even[i] = al_index_first_half_fan[2 * i + 1]
# placements parameters
al_row_layout = ['' for i in range(al_num_tot)]
dp_row_layout = ['' for i in range(dp_num_tot)]
cm_ref_layout = ['' for i in range(int(number_finger_cm_ref / 2))]
cm_row_layout = ['' for i in range(cm_num_tot)]
max_finger = max(al_num_tot, dp_num_tot, cm_num_tot)
guardring_n = ['' for i in range(max_finger + int(number_finger_cm_ref * 0.5) + number_fan * 4)]
guardring_p = ['' for i in range(max_finger + int(number_finger_cm_ref * 0.5) + number_fan * 4)]
# placements
# always place reference current mirror first
for i in range(int(number_finger_cm_ref / 2)):
if (i == 0):
cm_ref_layout[0] = laygen.relplace(templatename=['currentmirror_ref_n'], gridname=pb, xy=[-2, 0],
direction='left')
else:
cm_ref_layout[i] = laygen.relplace(templatename=['currentmirror_ref_n'], gridname=pb,
refobj=cm_ref_layout[i - 1], direction='left')
# if finger number of current mirror is the greatest
if (number_finger_cm >= 2 * number_finger and number_finger_cm >= 2 * number_finger_al):
# current mirror placement
flag1 = 0
for i in range(int(cm_num_tot)):
if (i == 0):
cm_row_layout[0] = laygen.relplace(templatename=['currentmirror_single_n'], gridname=pb,
refobj=cm_ref_layout[0], xy=[2, 0], direction='right')
elif (i in cm_index_first_eachfan):
cm_row_layout[i] = laygen.relplace(templatename=['currentmirror_single_n'], gridname=pb,
refobj=cm_row_layout[i - 1], xy=[8, 0], direction='right')
flag1 = flag1 + 1
else:
cm_row_layout[i] = laygen.relplace(templatename=['currentmirror_single_n'], gridname=pb,
refobj=cm_row_layout[i - 1], direction='right')
# if number_finger >= number_finger_al
if (number_finger >= number_finger_al):
# diff pair placement
flag2 = 0
for i in range(dp_num_tot):
if (i in dp_index_first_half_fan_odd):
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=cm_row_layout[cm_index_first_eachfan[flag2]],
direction='top')
flag2 = flag2 + 1
elif (i in dp_index_first_half_fan_even):
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=dp_row_layout[i - 1], xy=[2, 0], direction='right')
else:
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=dp_row_layout[i - 1], direction='right')
# active load placement
flag3 = 0
for i in range(al_num_tot):
if (i in al_index_first_half_fan):
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=dp_row_layout[dp_index_first_half_fan[flag3]],
direction='top')
flag3 = flag3 + 1
else:
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=al_row_layout[i - 1], direction='right')
# if number_finger < number_finger_al
else:
# active load placement
flag3 = 0
for i in range(al_num_tot):
if (i in al_index_first_half_fan_odd):
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=cm_row_layout[cm_index_first_eachfan[flag3]], xy=[0, 1],
direction='top')
flag3 = flag3 + 1
elif (i in al_index_first_half_fan_even):
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=al_row_layout[i - 1], xy=[2, 0], direction='right')
else:
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=al_row_layout[i - 1], direction='right')
# diff pair placement
flag2 = 0
for i in range(dp_num_tot):
if (i in dp_index_first_half_fan):
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=al_row_layout[al_index_first_half_fan[flag2]],
direction='bottom')
flag2 = flag2 + 1
else:
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=dp_row_layout[i - 1], direction='right')
# if finger number of diff pair is the greatest
elif (2 * number_finger >= number_finger_cm and number_finger >= number_finger_al):
# diff pair placement
for i in range(dp_num_tot):
if (i == 0):
dp_row_layout[0] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=cm_ref_layout[0], xy=[4, 0], direction='top')
elif (i in dp_index_first_half_fan_odd):
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=dp_row_layout[i - 1], xy=[8, 0], direction='right')
elif (i in dp_index_first_half_fan_even):
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=dp_row_layout[i - 1], xy=[2, 0], direction='right')
else:
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=dp_row_layout[i - 1], direction='right')
# current mirror placement
flag1 = 0
for i in range(cm_num_tot):
if (i in cm_index_first_eachfan):
cm_row_layout[i] = laygen.relplace(templatename=['currentmirror_single_n'], gridname=pb,
refobj=dp_row_layout[dp_index_first_half_fan_odd[flag1]],
direction='bottom')
flag1 = flag1 + 1
else:
cm_row_layout[i] = laygen.relplace(templatename=['currentmirror_single_n'], gridname=pb,
refobj=cm_row_layout[i - 1], direction='right')
# active load placement
flag2 = 0
for i in range(al_num_tot):
if (i in al_index_first_half_fan):
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=dp_row_layout[dp_index_first_half_fan[flag2]],
direction='top')
flag2 = flag2 + 1
else:
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=al_row_layout[i - 1], direction='right')
# if finger number of active load is the greatest
else:
# active load placement
for i in range(al_num_tot):
if (i == 0):
al_row_layout[0] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=cm_ref_layout[0], xy=[4, 1], direction='top')
elif (i in al_index_first_half_fan_odd):
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=al_row_layout[i - 1], xy=[8, 0], direction='right')
elif (i in al_index_first_half_fan_even):
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=al_row_layout[i - 1], xy=[2, 0], direction='right')
else:
al_row_layout[i] = laygen.relplace(templatename=['activeload_two_finger_p'], gridname=pb,
refobj=al_row_layout[i - 1], direction='right')
# diff pair placement
flag1 = 0
for i in range(dp_num_tot):
if (i in dp_index_first_half_fan):
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=al_row_layout[al_index_first_half_fan[flag1]],
direction='bottom')
flag1 = flag1 + 1
else:
dp_row_layout[i] = laygen.relplace(templatename=['diffpair_two_finger_n'], gridname=pb,
refobj=dp_row_layout[i - 1], direction='right')
# current mirror placement
flag2 = 0
for i in range(cm_num_tot):
if (i in cm_index_first_eachfan):
cm_row_layout[i] = laygen.relplace(templatename=['currentmirror_single_n'], gridname=pb,
refobj=dp_row_layout[dp_index_first_half_fan_odd[flag2]],
direction='bottom')
flag2 = flag2 + 1
else:
cm_row_layout[i] = laygen.relplace(templatename=['currentmirror_single_n'], gridname=pb,
refobj=cm_row_layout[i - 1], direction='right')
# Guard Ring placement
for i in range(max_finger + int(number_finger_cm_ref * 0.5) + number_fan * 4):
if (i == 0):
guardring_n[i] = laygen.relplace(templatename=['guard_ring_nwell'], gridname=pb,
refobj=cm_ref_layout[int(number_finger_cm_ref / 2) - 1], xy=[0, 2],
direction='top')
guardring_p[i] = laygen.relplace(templatename=['guard_ring_psub'], gridname=pb,
refobj=cm_ref_layout[int(number_finger_cm_ref / 2) - 1],
direction='bottom')
else:
guardring_n[i] = laygen.relplace(templatename=['guard_ring_nwell'], gridname=pb, refobj=guardring_n[i - 1],
direction='right')
guardring_p[i] = laygen.relplace(templatename=['guard_ring_psub'], gridname=pb, refobj=guardring_p[i - 1],
direction='right')
# routes
# current mirror self routing
idc = laygen.route(xy0=[0, 0], xy1=[-2, 0], gridname0=rg23, refobj0=cm_ref_layout[-1][0].pins['D'],
refobj1=cm_ref_layout[-1][0].pins['D'], via0=[0, 0])
vss = laygen.route(xy0=[0, 0], xy1=[1, 0], gridname0=rg12, refobj0=cm_ref_layout[-1][0].pins['S0'],
refobj1=cm_ref_layout[-1][0].pins['S0'])
laygen.route(xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=cm_ref_layout[-1][0].pins['S0'],
refobj1=cm_row_layout[-1][0].pins['S1'])
laygen.route(xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=cm_ref_layout[-1][0].pins['G'],
refobj1=cm_row_layout[-1][0].pins['G'])
for i in range(int(number_finger_cm_ref / 2) - 1):
laygen.route(xy0=[0, 0], xy1=[0, 0], gridname0=rg23, refobj0=cm_ref_layout[i][0].pins['D'],
refobj1=cm_ref_layout[i + 1][0].pins['D'], via0=[0, 0], via1=[0, 0])
for i in range(number_fan):
for j in range(cm_num_eachfan[i] - 1):
laygen.route(xy0=[0, 0], xy1=[0, 0], gridname0=rg23,
refobj0=cm_row_layout[cm_index_first_eachfan[i] + j][0].pins['D'],
refobj1=cm_row_layout[cm_index_first_eachfan[i] + j + 1][0].pins['D'], via0=[0, 0],
via1=[0, 0])
# diff pair routing
# diff pair two inputs and two outputs
for i in range(2 * number_fan):
if (i == 0):
inp = laygen.route(xy0=[0, 0], xy1=[-2, 0], gridname0=rg12, refobj0=dp_row_layout[0][0].pins['G'],
refobj1=dp_row_layout[0][0].pins['G'], via0=[0, 0])
elif (i == 1):
inm = laygen.route(xy0=[0, 0], xy1=[-2, 0], gridname0=rg12,
refobj0=dp_row_layout[dp_index_first_half_fan[i]][0].pins['G'],
refobj1=dp_row_layout[dp_index_first_half_fan[i]][0].pins['G'], via0=[0, 0])
else:
laygen.route(xy0=[0, 0], xy1=[-2, 0], gridname0=rg12,
refobj0=dp_row_layout[dp_index_first_half_fan[i]][0].pins['G'],
refobj1=dp_row_layout[dp_index_first_half_fan[i]][0].pins['G'], via0=[0, 0])
outm = laygen.route(xy0=[0, 0], xy1=[0, 2], gridname0=rg23,
refobj0=dp_row_layout[dp_index_end_half_fan[-2]][0].pins['D'],
refobj1=dp_row_layout[dp_index_end_half_fan[-2]][0].pins['D'], via0=[0, 0])
outp = laygen.route(xy0=[0, 0], xy1=[0, 2], gridname0=rg23, refobj0=dp_row_layout[-1][0].pins['D'],
refobj1=dp_row_layout[-1][0].pins['D'], via0=[0, 0])
# diff pair gate and drain self connection
for i in range(2 * number_fan):
for j in range(dp_num_half_fan[i] - 1):
laygen.route(xy0=[0, 0], xy1=[0, 0], gridname0=rg12,
refobj0=dp_row_layout[dp_index_first_half_fan[i] + j][0].pins['G'],
refobj1=dp_row_layout[dp_index_first_half_fan[i] + j + 1][0].pins['G'], via0=[0, 0],
via1=[0, 0])
laygen.route(xy0=[0, 0], xy1=[0, 0], gridname0=rg23,
refobj0=dp_row_layout[dp_index_first_half_fan[i] + j][0].pins['D'],
refobj1=dp_row_layout[dp_index_first_half_fan[i] + j + 1][0].pins['D'], via0=[0, 0],
via1=[0, 0])
# diff pair: drain of i connect to gate of i+1
for i in range(number_fan - 1):
for j in range(dp_num_half_fan[2 * i] - 1):
laygen.route(xy0=[0, -2], xy1=[0, -2], gridname0=rg34,
refobj0=dp_row_layout[dp_index_first_half_fan_odd[i] + j][0].pins['D'],
refobj1=dp_row_layout[dp_index_first_half_fan_odd[i] + j + 1][0].pins['D'], via0=[0, 0],
via1=[0, 0])
laygen.route(xy0=[0, 0], xy1=[0, 0], gridname0=rg34,
refobj0=dp_row_layout[dp_index_first_half_fan_even[i] + j][0].pins['D'],
refobj1=dp_row_layout[dp_index_first_half_fan_even[i] + j + 1][0].pins['D'], via0=[0, 0],
via1=[0, 0])
laygen.route(xy0=[0, -2], xy1=[-2, -2], gridname0=rg34,
refobj0=dp_row_layout[dp_index_first_half_fan_odd[i]][0].pins['D'],
refobj1=dp_row_layout[dp_index_first_half_fan_odd[i + 1]][0].pins['D'], via0=[0, 0], via1=[0, 0])
laygen.route(xy0=[-2, -2], xy1=[-2, -1], gridname0=rg23,
refobj0=dp_row_layout[dp_index_first_half_fan_odd[i + 1]][0].pins['D'],
refobj1=dp_row_layout[dp_index_first_half_fan_odd[i + 1]][0].pins['D'], via1=[0, 0])
laygen.route(xy0=[0, 0], xy1=[-2, 0], gridname0=rg34,
refobj0=dp_row_layout[dp_index_first_half_fan_even[i]][0].pins['D'],
refobj1=dp_row_layout[dp_index_first_half_fan_even[i + 1]][0].pins['D'], via0=[0, 0], via1=[0, 0])
laygen.route(xy0=[-2, 0], xy1=[-2, -1], gridname0=rg23,
refobj0=dp_row_layout[dp_index_first_half_fan_even[i + 1]][0].pins['D'],
refobj1=dp_row_layout[dp_index_first_half_fan_even[i + 1]][0].pins['D'], via1=[0, 0])
# diff pair source self connection
for i in range(number_fan):
laygen.route(xy0=[0, 0], xy1=[0, 0], | |
<gh_stars>1-10
import copy
import numpy as np
class MET(object):
"""
MET returns the ID of the resource that executes the current task with the minimum time
"""
def __init__(self, env):
self.env = env
def schedule(self, state):
"""
:param env_storage: data stored in environment
:return scheduled_tasks: Scheduled tasks and their resources
"""
scheduled_tasks = {}
env_storage = self.env.env_storage
# Initialize a list to record number of assigned tasks to a PE
# for every scheduling instance
# Initialize a list
assigned = [0] * (len(self.env.pes))
# For all tasks in the ready queue, find the resource with minimum execution time of the task
for idx, task in enumerate(env_storage.TaskQueues.ready.list):
exec_times = [np.inf] * (len(self.env.pes)) # initialize execution time for all resources to infinite
for i in range(len(self.env.resource_matrix_list)):
if task.name in self.env.resource_matrix_list[i].supported_functionalities:
ind = self.env.resource_matrix_list[i].supported_functionalities.index(task.name)
exec_times[i] = self.env.resource_matrix_list[i].performance[ind]
min_of_exec_times = min(exec_times) # the minimum execution time of the task among PEs
count_minimum = exec_times.count(min_of_exec_times) # check if there are multiple resources with minimum execution time
# if there are two or more PEs satisfying minimum execution
# then we should try to utilize all those PEs
if count_minimum > 1:
# if there are tow or more PEs satisfying minimum execution
# populate the IDs of those PEs into a list
min_PE_IDs = [i for i, x in enumerate(exec_times) if x == min_of_exec_times]
# then check whether those PEs are busy or idle
PE_check_list = [True if not self.env.pes[index].idle else False for i, index in enumerate(min_PE_IDs)]
# assign tasks to the idle PEs instead of the ones that are currently busy
if (True in PE_check_list) and (False in PE_check_list):
for PE in PE_check_list:
# if a PE is currently busy remove that PE from $min_PE_IDs list
# to schedule the task to a idle PE
if PE:
min_PE_IDs.remove(min_PE_IDs[PE_check_list.index(PE)])
# then compare the number of the assigned tasks to remaining PEs
# and choose the one with the lowest number of assigned tasks
assigned_tasks = [assigned[x] for i, x in enumerate(min_PE_IDs)]
PE_ID_index = assigned_tasks.index(min(assigned_tasks))
scheduled_tasks[task.ID] = (min_PE_IDs[PE_ID_index], idx, [])
else:
scheduled_tasks[task.ID] = (exec_times.index(min_of_exec_times), idx, [])
# end of if count_minimum >1:
# since one task is just assigned to a PE, increase the number by 1
assigned[task.PE_ID] += 1
# end of if task.PE_ID == -1:
# end of for task in list_of_ready:
# At the end of this loop, we should have a valid (non-negative ID)
# that can run next_task
return scheduled_tasks
class EFT(object):
def __init__(self, env):
super(EFT, self).__init__(env)
self.args = env.args
def schedule(self, state):
'''
This scheduler compares the execution times of the current
task for available resources and also considers if a resource has
already a task running. it picks the resource which will give the
earliest finish time for the task
'''
scheduled_tasks = {}
env_storage = self.env.env_storage
for idx, task in enumerate(env_storage.TaskQueues.ready.list):
comparison = [np.inf] * len(self.env.pes) # Initialize the comparison vector
comm_ready = [0] * len(self.env.pes) # A list to store the max communication times for each PE
if self.args.verbose:
print('[D] Time %s: The scheduler function is called with task %s'
% (self.env.now(), task.ID))
for i in range(len(self.env.resource_matrix_list)):
# if the task is supported by the resource, retrieve the index of the task
if task.name in self.env.resource_matrix_list[i].supported_functionalities:
ind = self.env.resource_matrix_list[i].supported_functionalities.index(task.name)
# $PE_comm_wait_times is a list to store the estimated communication time
# (or the remaining communication time) of all predecessors of a task for a PE
# As simulation forwards, relevant data is being sent after a task is completed
# based on the time instance, one should consider either whole communication
# time or the remaining communication time for scheduling
PE_comm_wait_times = []
# $PE_wait_time is a list to store the estimated wait times for a PE
# till that PE is available if the PE is currently running a task
PE_wait_time = []
job_ID = -1 # Initialize the job ID
# Retrieve the job ID which the current task belongs to
for ii, job in enumerate(self.env.jobs.list):
if job.name == task.jobname:
job_ID = ii
for predecessor in self.env.jobs.list[job_ID].task_list[task.base_ID].predecessors:
# data required from the predecessor for $ready_task
c_vol = self.env.jobs.list[job_ID].comm_vol[predecessor, task.base_ID]
# retrieve the real ID of the predecessor based on the job ID
real_predecessor_ID = predecessor + task.ID - task.base_ID
# Initialize following two variables which will be used if
# PE to PE communication is utilized
predecessor_PE_ID = -1
predecessor_finish_time = -1
for completed in env_storage.TaskQueues.completed.list:
if completed.ID == real_predecessor_ID:
predecessor_PE_ID = completed.PE_ID
predecessor_finish_time = completed.finish_time
if self.args.shared_memory:
# Compute the communication time considering the shared memory
# only consider memory to PE communication time
# since the task passed the 1st phase (PE to memory communication)
# and its status changed to ready
memory_to_PE_band = self.env.resource_matrix.comm_band[self.env.resource_matrix_list[-1].ID, i]
shared_memory_comm_time = int(c_vol / memory_to_PE_band)
PE_comm_wait_times.append(shared_memory_comm_time)
if self.args.verbose:
print('[D] Time %s: Estimated communication time between '
'memory to PE %s from task %s to task %s is %d'
% (self.env.now(), i, real_predecessor_ID, task.ID, PE_comm_wait_times[-1]))
# PE_to_PE
else:
# Compute the PE to PE communication time
PE_to_PE_band = self.env.resource_matrix.comm_band[predecessor_PE_ID, i]
PE_to_PE_comm_time = int(c_vol / PE_to_PE_band)
PE_comm_wait_times.append(max((predecessor_finish_time +
PE_to_PE_comm_time - self.env.now()), 0))
if self.args.verbose:
print('[D] Time %s: Estimated communication time between PE %s to PE'
' %s from task %s to task %s is %d'
% (self.env.now(), predecessor_PE_ID, i, real_predecessor_ID,
task.ID, PE_comm_wait_times[-1]))
# $comm_ready contains the estimated communication time
# for the resource in consideration for scheduling
# maximum value is chosen since it represents the time required for all
# data becomes available for the resource.
comm_ready[i] = (max(PE_comm_wait_times))
# end of for for predecessor in self.jobs.list[job_ID].task_list[ind].predecessors:
# if a resource currently is executing a task, then the estimated remaining time
# for the task completion should be considered during scheduling
PE_wait_time.append(max((self.env.pes[i].available_time - self.env.now()), 0))
# update the comparison vector accordingly
comparison[i] = self.env.resource_matrix_list[i].performance[ind] \
+ max(comm_ready[i], PE_wait_time[-1])
# end of if (task.name in...
# end of for i in range(len(self.env.resource_matrix_list)):
# after going over each resource, choose the one which gives the minimum result
task_PE_ID = comparison.index(min(comparison))
# Finally, update the estimated available time of the resource to which
# a task is just assigned
self.env.pes[task_PE_ID].available_time = self.env.now() + comparison[task_PE_ID]
scheduled_tasks[task.ID] = (task_PE_ID, idx, [])
return scheduled_tasks
class ETF(object):
def __init__(self, env):
super(ETF, self).__init__(env)
self.args = self.env.args
def schedule(self, state):
env_storage = self.env.env_storage
ready_list = copy.deepcopy(env_storage.TaskQueues.ready.list)
task_counter = 0
scheduled_tasks = {}
# Iterate through the list of ready tasks until all of them are scheduled
while len(ready_list) > 0:
shortest_task_exec_time = np.inf
shortest_task_pe_id = -1
shortest_comparison = [np.inf] * len(self.env.pes)
for task in ready_list:
comparison = [np.inf] * len(self.env.pes) # Initialize the comparison vector
comm_ready = [0] * len(self.env.pes) # A list to store the max communication times for each PE
if self.args.verbose:
print('[D] Time %s: The scheduler function is called with task %s'
% (self.env.now(), task.ID))
for i in range(len(self.env.resource_matrix_list)):
# if the task is supported by the resource, retrieve the index of the task
if task.name in self.env.resource_matrix_list[i].supported_functionalities:
ind = self.env.resource_matrix_list[i].supported_functionalities.index(task.name)
# $PE_comm_wait_times is a list to store the estimated communication time
# (or the remaining communication time) of all predecessors of a task for a PE
# As simulation forwards, relevant data is being sent after a task is completed
# based on the time instance, one should consider either whole communication
# time or the remaining communication time for scheduling
PE_comm_wait_times = []
# $PE_wait_time is a list to store the estimated wait times for a PE
# till that PE is available if the PE is currently running a task
PE_wait_time = []
job_ID = -1 # Initialize the job ID
# Retrieve the job ID which the current task belongs to
for ii, job in enumerate(self.env.jobs.list):
if job.name == task.jobname:
job_ID = ii
for predecessor in self.env.jobs.list[job_ID].task_list[task.base_ID].predecessors:
# data required from the predecessor for $ready_task
c_vol = self.env.jobs.list[job_ID].comm_vol[predecessor, task.base_ID]
# retrieve the real | |
company_id,
customer_payment_journal_id,
if_match=None):
return client.delete_customer_payment_journals(company_id=company_id,
customer_payment_journal_id=customer_payment_journal_id,
if_match=if_match)
def financials_financial_company_delete_dimension(client,
company_id,
dimension_id,
if_match=None):
return client.delete_dimensions(company_id=company_id,
dimension_id=dimension_id,
if_match=if_match)
def financials_financial_company_delete_dimension_value(client,
company_id,
dimension_value_id,
if_match=None):
return client.delete_dimension_values(company_id=company_id,
dimension_value_id=dimension_value_id,
if_match=if_match)
def financials_financial_company_delete_employee(client,
company_id,
employee_id,
if_match=None):
return client.delete_employees(company_id=company_id,
employee_id=employee_id,
if_match=if_match)
def financials_financial_company_delete_general_ledger_entry(client,
company_id,
general_ledger_entry_id,
if_match=None):
return client.delete_general_ledger_entries(company_id=company_id,
general_ledger_entry_id=general_ledger_entry_id,
if_match=if_match)
def financials_financial_company_delete_item(client,
company_id,
item_id,
if_match=None):
return client.delete_items(company_id=company_id,
item_id=item_id,
if_match=if_match)
def financials_financial_company_delete_item_category(client,
company_id,
item_category_id,
if_match=None):
return client.delete_item_categories(company_id=company_id,
item_category_id=item_category_id,
if_match=if_match)
def financials_financial_company_delete_journal(client,
company_id,
journal_id,
if_match=None):
return client.delete_journals(company_id=company_id,
journal_id=journal_id,
if_match=if_match)
def financials_financial_company_delete_journal_line(client,
company_id,
journal_line_id,
if_match=None):
return client.delete_journal_lines(company_id=company_id,
journal_line_id=journal_line_id,
if_match=if_match)
def financials_financial_company_delete_payment_method(client,
company_id,
payment_method_id,
if_match=None):
return client.delete_payment_methods(company_id=company_id,
payment_method_id=payment_method_id,
if_match=if_match)
def financials_financial_company_delete_payment_term(client,
company_id,
payment_term_id,
if_match=None):
return client.delete_payment_terms(company_id=company_id,
payment_term_id=payment_term_id,
if_match=if_match)
def financials_financial_company_delete_picture(client,
company_id,
picture_id,
if_match=None):
return client.delete_picture(company_id=company_id,
picture_id=picture_id,
if_match=if_match)
def financials_financial_company_delete_purchase_invoice(client,
company_id,
purchase_invoice_id,
if_match=None):
return client.delete_purchase_invoices(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
if_match=if_match)
def financials_financial_company_delete_purchase_invoice_line(client,
company_id,
purchase_invoice_line_id,
if_match=None):
return client.delete_purchase_invoice_lines(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
if_match=if_match)
def financials_financial_company_delete_sale_credit_memo(client,
company_id,
sales_credit_memo_id,
if_match=None):
return client.delete_sales_credit_memos(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
if_match=if_match)
def financials_financial_company_delete_sale_credit_memo_line(client,
company_id,
sales_credit_memo_line_id,
if_match=None):
return client.delete_sales_credit_memo_lines(company_id=company_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
if_match=if_match)
def financials_financial_company_delete_sale_invoice(client,
company_id,
sales_invoice_id,
if_match=None):
return client.delete_sales_invoices(company_id=company_id,
sales_invoice_id=sales_invoice_id,
if_match=if_match)
def financials_financial_company_delete_sale_invoice_line(client,
company_id,
sales_invoice_line_id,
if_match=None):
return client.delete_sales_invoice_lines(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
if_match=if_match)
def financials_financial_company_delete_sale_order(client,
company_id,
sales_order_id,
if_match=None):
return client.delete_sales_orders(company_id=company_id,
sales_order_id=sales_order_id,
if_match=if_match)
def financials_financial_company_delete_sale_order_line(client,
company_id,
sales_order_line_id,
if_match=None):
return client.delete_sales_order_lines(company_id=company_id,
sales_order_line_id=sales_order_line_id,
if_match=if_match)
def financials_financial_company_delete_sale_quote(client,
company_id,
sales_quote_id,
if_match=None):
return client.delete_sales_quotes(company_id=company_id,
sales_quote_id=sales_quote_id,
if_match=if_match)
def financials_financial_company_delete_sale_quote_line(client,
company_id,
sales_quote_line_id,
if_match=None):
return client.delete_sales_quote_lines(company_id=company_id,
sales_quote_line_id=sales_quote_line_id,
if_match=if_match)
def financials_financial_company_delete_shipment_method(client,
company_id,
shipment_method_id,
if_match=None):
return client.delete_shipment_methods(company_id=company_id,
shipment_method_id=shipment_method_id,
if_match=if_match)
def financials_financial_company_delete_tax_area(client,
company_id,
tax_area_id,
if_match=None):
return client.delete_tax_areas(company_id=company_id,
tax_area_id=tax_area_id,
if_match=if_match)
def financials_financial_company_delete_tax_group(client,
company_id,
tax_group_id,
if_match=None):
return client.delete_tax_groups(company_id=company_id,
tax_group_id=tax_group_id,
if_match=if_match)
def financials_financial_company_delete_unit_of_measure(client,
company_id,
unit_of_measure_id,
if_match=None):
return client.delete_units_of_measure(company_id=company_id,
unit_of_measure_id=unit_of_measure_id,
if_match=if_match)
def financials_financial_company_delete_vendor(client,
company_id,
vendor_id,
if_match=None):
return client.delete_vendors(company_id=company_id,
vendor_id=vendor_id,
if_match=if_match)
def financials_financial_company_list_account(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_accounts(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_aged_account_payable(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_aged_accounts_payable(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_aged_account_receivable(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_aged_accounts_receivable(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_company_information(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_company_information(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_country_region(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_countries_regions(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_currency(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_currencies(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_customer(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_customers(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_customer_payment(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_customer_payments(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_customer_payment_journal(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_customer_payment_journals(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_dimension(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_dimensions(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_dimension_value(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_dimension_values(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_employee(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_employees(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_general_ledger_entry(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_general_ledger_entries(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_item(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_items(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_item_category(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_item_categories(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_journal(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_journals(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_journal_line(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_journal_lines(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_payment_method(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_payment_methods(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_payment_term(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_payment_terms(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_picture(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_picture(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_purchase_invoice(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_purchase_invoices(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_purchase_invoice_line(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_purchase_invoice_lines(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_sale_credit_memo(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_sales_credit_memos(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_sale_credit_memo_line(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_sales_credit_memo_lines(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_sale_invoice(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_sales_invoices(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_sale_invoice_line(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_sales_invoice_lines(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_sale_order(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_sales_orders(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_sale_order_line(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_sales_order_lines(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_sale_quote(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_sales_quotes(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_sale_quote_line(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_sales_quote_lines(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_shipment_method(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_shipment_methods(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_tax_area(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_tax_areas(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_tax_group(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_tax_groups(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_unit_of_measure(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_units_of_measure(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_list_vendor(client,
company_id,
orderby=None,
select=None,
expand=None):
return client.list_vendors(company_id=company_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_set_company_information_picture(client,
company_id,
company_information_id,
data):
return client.set_company_information_picture(company_id=company_id,
company_information_id=company_information_id,
data=data)
def financials_financial_company_set_picture_content(client,
company_id,
picture_id,
data):
return client.set_picture_content(company_id=company_id,
picture_id=picture_id,
data=data)
def financials_financial_company_show_account(client,
company_id,
account_id,
select=None,
expand=None):
return client.get_accounts(company_id=company_id,
account_id=account_id,
select=select,
expand=expand)
def financials_financial_company_show_aged_account_payable(client,
company_id,
aged_accounts_payable_id,
select=None,
expand=None):
return client.get_aged_accounts_payable(company_id=company_id,
aged_accounts_payable_id=aged_accounts_payable_id,
select=select,
expand=expand)
def financials_financial_company_show_aged_account_receivable(client,
company_id,
aged_accounts_receivable_id,
select=None,
expand=None):
return client.get_aged_accounts_receivable(company_id=company_id,
aged_accounts_receivable_id=aged_accounts_receivable_id,
select=select,
expand=expand)
def financials_financial_company_show_company_information(client,
company_id,
company_information_id,
select=None,
expand=None):
return client.get_company_information(company_id=company_id,
company_information_id=company_information_id,
select=select,
expand=expand)
def financials_financial_company_show_company_information_picture(client,
company_id,
company_information_id):
return client.get_company_information_picture(company_id=company_id,
company_information_id=company_information_id)
def financials_financial_company_show_country_region(client,
company_id,
country_region_id,
select=None,
expand=None):
return client.get_countries_regions(company_id=company_id,
country_region_id=country_region_id,
select=select,
expand=expand)
def financials_financial_company_show_currency(client,
company_id,
currency_id,
select=None,
expand=None):
return client.get_currencies(company_id=company_id,
currency_id=currency_id,
select=select,
expand=expand)
def financials_financial_company_show_customer(client,
company_id,
customer_id,
select=None,
expand=None):
return client.get_customers(company_id=company_id,
customer_id=customer_id,
select=select,
expand=expand)
def financials_financial_company_show_customer_payment(client,
company_id,
customer_payment_id,
select=None,
expand=None):
return client.get_customer_payments(company_id=company_id,
customer_payment_id=customer_payment_id,
select=select,
expand=expand)
def financials_financial_company_show_customer_payment_journal(client,
company_id,
customer_payment_journal_id,
select=None,
expand=None):
return client.get_customer_payment_journals(company_id=company_id,
customer_payment_journal_id=customer_payment_journal_id,
select=select,
expand=expand)
def financials_financial_company_show_dimension(client,
company_id,
dimension_id,
select=None,
expand=None):
return client.get_dimensions(company_id=company_id,
dimension_id=dimension_id,
select=select,
expand=expand)
def financials_financial_company_show_dimension_value(client,
company_id,
dimension_value_id,
select=None,
expand=None):
return client.get_dimension_values(company_id=company_id,
dimension_value_id=dimension_value_id,
select=select,
expand=expand)
def financials_financial_company_show_employee(client,
company_id,
employee_id,
select=None,
expand=None):
return client.get_employees(company_id=company_id,
employee_id=employee_id,
select=select,
expand=expand)
def financials_financial_company_show_general_ledger_entry(client,
company_id,
general_ledger_entry_id,
select=None,
expand=None):
return client.get_general_ledger_entries(company_id=company_id,
general_ledger_entry_id=general_ledger_entry_id,
select=select,
expand=expand)
def financials_financial_company_show_item(client,
company_id,
item_id,
select=None,
expand=None):
return client.get_items(company_id=company_id,
item_id=item_id,
select=select,
expand=expand)
def financials_financial_company_show_item_category(client,
company_id,
item_category_id,
select=None,
expand=None):
return client.get_item_categories(company_id=company_id,
item_category_id=item_category_id,
select=select,
expand=expand)
def financials_financial_company_show_journal(client,
company_id,
journal_id,
select=None,
expand=None):
return client.get_journals(company_id=company_id,
journal_id=journal_id,
select=select,
expand=expand)
def financials_financial_company_show_journal_line(client,
company_id,
journal_line_id,
select=None,
expand=None):
return client.get_journal_lines(company_id=company_id,
journal_line_id=journal_line_id,
select=select,
expand=expand)
def financials_financial_company_show_payment_method(client,
company_id,
payment_method_id,
select=None,
expand=None):
return client.get_payment_methods(company_id=company_id,
payment_method_id=payment_method_id,
select=select,
expand=expand)
def financials_financial_company_show_payment_term(client,
company_id,
payment_term_id,
select=None,
expand=None):
return client.get_payment_terms(company_id=company_id,
payment_term_id=payment_term_id,
select=select,
expand=expand)
def financials_financial_company_show_picture(client,
company_id,
picture_id,
select=None,
expand=None):
return client.get_picture(company_id=company_id,
picture_id=picture_id,
select=select,
expand=expand)
def financials_financial_company_show_picture_content(client,
company_id,
picture_id):
return client.get_picture_content(company_id=company_id,
picture_id=picture_id)
def financials_financial_company_show_purchase_invoice(client,
company_id,
purchase_invoice_id,
select=None,
expand=None):
return client.get_purchase_invoices(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
select=select,
expand=expand)
def financials_financial_company_show_purchase_invoice_line(client,
company_id,
purchase_invoice_line_id,
select=None,
expand=None):
return client.get_purchase_invoice_lines(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_show_sale_credit_memo(client,
company_id,
sales_credit_memo_id,
select=None,
expand=None):
return client.get_sales_credit_memos(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
select=select,
expand=expand)
def financials_financial_company_show_sale_credit_memo_line(client,
company_id,
sales_credit_memo_line_id,
select=None,
expand=None):
return client.get_sales_credit_memo_lines(company_id=company_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
select=select,
expand=expand)
def financials_financial_company_show_sale_invoice(client,
company_id,
sales_invoice_id,
select=None,
expand=None):
return client.get_sales_invoices(company_id=company_id,
sales_invoice_id=sales_invoice_id,
select=select,
expand=expand)
def financials_financial_company_show_sale_invoice_line(client,
company_id,
sales_invoice_line_id,
select=None,
expand=None):
return client.get_sales_invoice_lines(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_show_sale_order(client,
company_id,
sales_order_id,
select=None,
expand=None):
return client.get_sales_orders(company_id=company_id,
sales_order_id=sales_order_id,
select=select,
expand=expand)
def financials_financial_company_show_sale_order_line(client,
company_id,
sales_order_line_id,
select=None,
expand=None):
return client.get_sales_order_lines(company_id=company_id,
sales_order_line_id=sales_order_line_id,
select=select,
expand=expand)
def financials_financial_company_show_sale_quote(client,
company_id,
sales_quote_id,
select=None,
expand=None):
return client.get_sales_quotes(company_id=company_id,
sales_quote_id=sales_quote_id,
select=select,
expand=expand)
def financials_financial_company_show_sale_quote_line(client,
company_id,
sales_quote_line_id,
select=None,
expand=None):
return client.get_sales_quote_lines(company_id=company_id,
sales_quote_line_id=sales_quote_line_id,
select=select,
expand=expand)
def financials_financial_company_show_shipment_method(client,
company_id,
shipment_method_id,
select=None,
expand=None):
return client.get_shipment_methods(company_id=company_id,
shipment_method_id=shipment_method_id,
select=select,
expand=expand)
def financials_financial_company_show_tax_area(client,
company_id,
tax_area_id,
select=None,
expand=None):
return client.get_tax_areas(company_id=company_id,
tax_area_id=tax_area_id,
select=select,
expand=expand)
def financials_financial_company_show_tax_group(client,
company_id,
tax_group_id,
select=None,
expand=None):
return client.get_tax_groups(company_id=company_id,
tax_group_id=tax_group_id,
select=select,
expand=expand)
def financials_financial_company_show_unit_of_measure(client,
company_id,
unit_of_measure_id,
select=None,
expand=None):
return client.get_units_of_measure(company_id=company_id,
unit_of_measure_id=unit_of_measure_id,
select=select,
expand=expand)
def financials_financial_company_show_vendor(client,
company_id,
vendor_id,
select=None,
expand=None):
return client.get_vendors(company_id=company_id,
vendor_id=vendor_id,
select=select,
expand=expand)
def financials_financial_company_update_account(client,
company_id,
account_id,
id_=None,
blocked=None,
category=None,
display_name=None,
last_modified_date_time=None,
number=None,
sub_category=None):
body = {}
body['id'] = id_
body['blocked'] = blocked
body['category'] = category
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['sub_category'] = sub_category
return client.update_accounts(company_id=company_id,
account_id=account_id,
body=body)
def financials_financial_company_update_aged_account_payable(client,
company_id,
aged_accounts_payable_id,
id_=None,
aged_as_of_date=None,
balance_due=None,
currency_code=None,
current_amount=None,
name=None,
period1_amount=None,
period2_amount=None,
period3_amount=None,
period_length_filter=None,
vendor_number=None):
body = {}
body['id'] = id_
body['aged_as_of_date'] = aged_as_of_date
body['balance_due'] = balance_due
body['currency_code'] = currency_code
body['current_amount'] = current_amount
body['name'] = name
body['period1_amount'] = period1_amount
body['period2_amount'] = period2_amount
body['period3_amount'] = period3_amount
body['period_length_filter'] = period_length_filter
body['vendor_number'] = vendor_number
return client.update_aged_accounts_payable(company_id=company_id,
aged_accounts_payable_id=aged_accounts_payable_id,
body=body)
def financials_financial_company_update_aged_account_receivable(client,
company_id,
aged_accounts_receivable_id,
id_=None,
aged_as_of_date=None,
balance_due=None,
currency_code=None,
current_amount=None,
customer_number=None,
name=None,
period1_amount=None,
period2_amount=None,
period3_amount=None,
period_length_filter=None):
body = {}
body['id'] = id_
body['aged_as_of_date'] = aged_as_of_date
body['balance_due'] = balance_due
body['currency_code'] = currency_code
body['current_amount'] = current_amount
body['customer_number'] = customer_number
body['name'] = name
body['period1_amount'] = period1_amount
body['period2_amount'] = period2_amount
body['period3_amount'] = period3_amount
body['period_length_filter'] = period_length_filter
return client.update_aged_accounts_receivable(company_id=company_id,
aged_accounts_receivable_id=aged_accounts_receivable_id,
body=body)
def financials_financial_company_update_company_information(client,
company_id,
company_information_id,
id_=None,
address=None,
currency_code=None,
current_fiscal_year_start_date=None,
display_name=None,
email=None,
fax_number=None,
industry=None,
last_modified_date_time=None,
phone_number=None,
picture=None,
tax_registration_number=None,
website=None):
body = {}
body['id'] = id_
body['address'] = address
body['currency_code'] = currency_code
body['current_fiscal_year_start_date'] = current_fiscal_year_start_date
body['display_name'] = display_name
body['email'] = email
body['fax_number'] = fax_number
body['industry'] = industry
body['last_modified_date_time'] = last_modified_date_time
body['phone_number'] = phone_number
body['picture'] = picture
body['tax_registration_number'] = tax_registration_number
body['website'] = website
return client.update_company_information(company_id=company_id,
company_information_id=company_information_id,
body=body)
def financials_financial_company_update_country_region(client,
company_id,
country_region_id,
id_=None,
address_format=None,
code=None,
display_name=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['address_format'] = address_format
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
return client.update_countries_regions(company_id=company_id,
country_region_id=country_region_id,
body=body)
def financials_financial_company_update_currency(client,
company_id,
currency_id,
id_=None,
amount_decimal_places=None,
amount_rounding_precision=None,
code=None,
display_name=None,
last_modified_date_time=None,
symbol=None):
body = {}
body['id'] = id_
body['amount_decimal_places'] = amount_decimal_places
body['amount_rounding_precision'] = amount_rounding_precision
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['symbol'] = symbol
return client.update_currencies(company_id=company_id,
currency_id=currency_id,
body=body)
def financials_financial_company_update_customer(client,
company_id,
customer_id,
id_=None,
address=None,
blocked=None,
currency_code=None,
currency_id=None,
display_name=None,
email=None,
last_modified_date_time=None,
number=None,
payment_method_id=None,
payment_terms_id=None,
phone_number=None,
shipment_method_id=None,
tax_area_display_name=None,
tax_area_id=None,
tax_liable=None,
tax_registration_number=None,
type_=None,
website=None,
currency=None,
payment_method=None,
payment_term=None,
picture=None,
shipment_method=None):
body = {}
body['id'] = id_
body['address'] = address
body['blocked'] = blocked
body['currency_code'] = currency_code
body['currency_id'] = currency_id
body['display_name'] = display_name
body['email'] = email
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['payment_method_id'] = payment_method_id
body['payment_terms_id'] | |
# constant grid for ROC
fpr_grid = np.linspace(0, 1, num=20)
# get class scores
class_score = PyPLS.predict(self, x=x)
# binery classification
if n_classes == 2:
y_pred = self.predict(x)
accuracy = metrics.accuracy_score(y, y_pred)
precision = metrics.precision_score(y, y_pred)
recall = metrics.recall_score(y, y_pred)
misclassified_samples = np.where(y.ravel() != y_pred.ravel())[0]
f1_score = metrics.f1_score(y, y_pred)
conf_matrix = metrics.confusion_matrix(y, y_pred)
zero_oneloss = metrics.zero_one_loss(y, y_pred)
matthews_mcc = metrics.matthews_corrcoef(y, y_pred)
# Interpolated ROC curve and AUC
roc_curve = metrics.roc_curve(y, class_score.ravel())
tpr = roc_curve[1]
fpr = roc_curve[0]
interpolated_tpr = np.zeros_like(fpr_grid)
interpolated_tpr += interp(fpr_grid, fpr, tpr)
roc_curve = (fpr_grid, interpolated_tpr, roc_curve[2])
auc_area = metrics.auc(fpr_grid, interpolated_tpr)
else:
# multi class classification
y_pred = self.predict(x)
accuracy = metrics.accuracy_score(y, y_pred)
precision = metrics.precision_score(y, y_pred, average='weighted')
recall = metrics.recall_score(y, y_pred, average='weighted')
misclassified_samples = np.where(y.ravel() != y_pred.ravel())[0]
f1_score = metrics.f1_score(y, y_pred, average='weighted')
conf_matrix = metrics.confusion_matrix(y, y_pred)
zero_oneloss = metrics.zero_one_loss(y, y_pred)
matthews_mcc = np.nan
roc_curve = list()
auc_area = list()
# Generate multiple ROC curves - one for each class the multiple class case
for predclass in range(self.n_classes):
current_roc = metrics.roc_curve(y, class_score[:, predclass], pos_label=predclass)
# Interpolate all ROC curves to a finite grid
# Makes it easier to average and compare multiple models - with CV in mind
tpr = current_roc[1]
fpr = current_roc[0]
interpolated_tpr = np.zeros_like(fpr_grid)
interpolated_tpr += interp(fpr_grid, fpr, tpr)
roc_curve.append([fpr_grid, interpolated_tpr, current_roc[2]])
auc_area.append(metrics.auc(fpr_grid, interpolated_tpr))
# Obtain residual sum of squares for whole data set and per component
# Same as Chemometrics PLS, this is so we can use VIP's and other metrics as usual
if self.n_classes > 2:
cm_fit = self.cummulativefit(x, dummy_mat)
else:
cm_fit = self.cummulativefit(x, y)
# save the model params
self.m_params = {'PLS': {'R2Y': R2Y, 'R2X': R2X, 'SSX': cm_fit['SSX'], 'SSY': cm_fit['SSY'],
'SSXcomp': cm_fit['SSXcomp'], 'SSYcomp': cm_fit['SSYcomp']},
'DA': {'Accuracy': accuracy, 'AUC': auc_area,
'ConfusionMatrix': conf_matrix, 'ROC': roc_curve,
'MisclassifiedSamples': misclassified_samples,
'Precision': precision, 'Recall': recall,
'F1': f1_score, '0-1Loss': zero_oneloss, 'MatthewsMCC': matthews_mcc,
'ClassPredictions': y_pred}}
except ValueError as verr:
raise verr
def score(self, x, y, sample_weight=None):
"""
Predict and calculate the R2 for the model using one of the data blocks (X or Y) provided.
Equivalent to the scikit-learn ClassifierMixin score method.
:param x: Data matrix to fit the PLS model.
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
:param str block_to_score: Which of the data blocks (X or Y) to calculate the R2 goodness of fit.
:param sample_weight: Optional sample weights to use in scoring.
:type sample_weight: numpy.ndarray, shape [n_samples] or None defaults to None
:return R2Y: The model's R2Y, calculated by predicting Y from X and scoring.
:rtype: float
:return R2X: The model's R2X, calculated by predicting X from Y and scoring.
:rtype: float
:raise ValueError: If block to score argument is not acceptable or date mismatch issues with the provided data.
"""
try:
# return metrics.accuracy_score(y, self.predict(x), sample_weight=sample_weight)
return PyPLS.score(self, x, y, block_to_score='x')
except ValueError as verr:
raise verr
def predict(self, x):
"""
predict the value of the target variable based on predictive variable x
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
:return: Predicted data block Y by as discret values using argmin
:rtype: numpy.ndarray, shape (n_samples, n_features)
:raise ValueError: If no data matrix is passed, or dimensions mismatch issues with the provided data.
:raise AttributeError: Calling the method without fitting the model before.
"""
try:
if self.isfitted is False:
raise AttributeError("Model is not fitted")
# based on original encoding as 0, 1 (binery classification )
if self.n_classes == 2:
y_pred = PyPLS.predict(self, x)
class_pred = np.argmin(np.abs(y_pred - np.array([0, 1])), axis=1)
else:
# multiclass classification
pred_scores = self.transform(x=x)
# encode the predicted variable
closest_class_mean = lambda x: np.argmin(np.linalg.norm((x - self.class_means), axis=1))
class_pred = np.apply_along_axis(closest_class_mean, axis=1, arr=pred_scores)
return class_pred
except ValueError as verr:
raise verr
except AttributeError as atter:
raise atter
def inverse_transform(self, t=None, u=None):
"""
transform T and U scores to get X and Y
:param t: T scores corresponding to the X data matrix. defaults to None
:type numpy array
:param u: Y scores corresponding to the Y data vector/matrix defaults to None
:type numpy array
:return x: data metrix to be fit (rows : samples , columns : variables )
:return y: depentent variable or target variable
:rtype: numpy.ndarray, shape (n_samples, n_features) or None
:raise ValueError: If dimensions of input data are mismatched.
"""
try:
if self.isfitted is True:
if t is not None and u is not None:
raise ValueError('T or U scores must be set to None ')
elif t is None and u is None:
raise ValueError('T and U cant be both None ')
# If T is given, return U
elif t is not None:
# calculate x prediction
xpred = np.dot(t, self.loadings_p.T)
if self.x_scaler is not None:
xscaled = self.x_scaler.inverse_transform(xpred)
else:
xscaled = xpred
return xscaled
# If U is given, return T
elif u is not None:
# calculate y bases on loading transpose
ypred = np.dot(u, self.loadings_q.T)
return ypred
except ValueError as verr:
raise verr
def transform(self, x=None, y=None):
"""
calculate U or T metrix equivalent to sklearn TransformeMixin
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target variable
:type y: list or 1d array
:return: Latent Variable scores (T) for the X matrix and for the Y vector/matrix (U).
:rtype: tuple with 2 numpy.ndarray, shape (n_samples, n_comps)
:raise ValueError: If dimensions of input data are mismatched.
:raise AttributeError: When calling the method before the model is fitted.
"""
try:
# Check if model is fitted or not
if self.isfitted is True:
# If X and Y are passed, complain and do nothing
if (x is not None) and (y is not None):
raise ValueError('one of the variable must be None')
# If nothing is passed at all, complain and do nothing
elif (x is None) and (y is None):
raise ValueError('both variables are set to None')
# If Y is given, return U
elif x is None:
# verify that y is a single vector
if y.ndim != 1:
raise TypeError('Please supply a dummy vector with integer as class membership')
# muticlass classification
if self.n_classes > 2:
y = self.y_scaler.transform(pd.get_dummies(y).values)
else:
# binery classification
if y.ndim == 1:
y = y.reshape(-1, 1)
y = self.y_scaler.transform(y)
U = np.dot(y, self.rotations_cs)
return U
# If X is given, return T
elif y is None:
# add extra dimention to x if its a vector
if x.ndim == 1:
x = x.reshape(-1, 1)
if self.x_scaler == None:
xscaled = x
else:
xscaled = self.x_scaler.fit_transform(x)
T = np.dot(xscaled, self.rotations_ws)
return T
else:
raise AttributeError('Model not fitted yet ')
except ValueError as verr:
raise verr
except AttributeError as atter:
raise atter
def cross_validation(self, x, y, cv_method=KFold(7, shuffle=True), outputdist=False,
):
"""
cross validation result of the model and calculate Q2
:param x: data metrix to be fit
:type x: numpy.ndarray, shape (rows : samples , columns : variables )
:param y: target varibale
:type y: list or 1d array
:param cv_method: cross valiation method
:param bool outputdist: Output the whole distribution for. Useful when ShuffleSplit or CrossValidators other than KFold defaults to false
:return: dict of cross validation scores
:rtype dict
:raise TypeError: If the cv_method passed is not a scikit-learn CrossValidator object.
:raise ValueError: If the x and y data matrices are invalid.
"""
try:
# Check if global model is fitted... and if not, fit it using all of X
if self.isfitted is False:
self.fit(x, y)
# Make a copy of the object, to ensure the internal state of the object is not modified during
# the cross_validation method call
cv_pipeline = deepcopy(self)
# Number of splits
ncvrounds = cv_method.get_n_splits()
# Number of classes | |
import random
import re
import math
import numpy as np
from src import constants
from src.multi_agent.elements.camera import Camera, CameraRepresentation
from src.my_utils import constant_class
from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound
from src.my_utils.my_math.line import distance_btw_two_point, Line
from src.my_utils.string_operations import parse_list
class MobileCameraType:
"""
Camera types
dof = degree of freedom
1) FIX -- 1 dof beta
2) ROTATIVE -- 2 dof beta,alpha
3) RAIL -- 3 dof beta,alpha,(x,y)=f(s)
4) FREE -- 4 dof beta,alpha,x,y
"""
FIX = 0
ROTATIVE = 1
RAIL = 2
FREE = 3
class MobileCameraRepresentation(CameraRepresentation):
"""
Class MobileCameraRepresentation.
Description :
:param
8. (MobileCameraType) camera_type -- describe what feature the camera has
9. (Trajectory) trajectory -- only used for RAIL camera
:attibutes
8. (MobileCameraType) camera_type -- describe what feature the camera has
9. (Trajectory) trajectory -- only used for RAIL camera
"""
def __init__(self, id=None, xc=None, yc=None, alpha=None, beta=None, field_depth=None, type=None, color=None):
CameraRepresentation.__init__(self, id, xc, yc, alpha, beta, field_depth, color)
self.camera_type = type
self.trajectory = TrajectoryPlaner([])
def update_from_camera(self, camera):
super().update_from_camera(camera)
self.camera_type = camera.camera_type
self.trajectory = TrajectoryPlaner(camera.trajectory.trajectory)
class MobileCamera(Camera, MobileCameraRepresentation):
"""
Class MobileCameraRepresentation.
Description :
:param
:attibutes
"""
def __init__(self, id=None, xc=None, yc=None, alpha=None, beta=None, trajectory=None, field_depth=None, color=None,
t_add=None, t_del=None, type=None, vx_vy_min=None, vx_vy_max=None, v_alpha_min=None, v_alpha_max=None,
delta_beta=None, v_beta_min=None, v_beta_max=None):
Camera.__init__(self, id, xc, yc, alpha, beta, field_depth, color, t_add, t_del)
camera_attributes_not_to_txt = self.attributes_not_to_txt
MobileCameraRepresentation.__init__(self, id, xc, yc, alpha, beta, field_depth, type, color)
self.attributes_not_to_txt += [elem for elem in camera_attributes_not_to_txt if
elem not in self.attributes_not_to_txt]
self.attributes_not_to_txt += ["coeff_field", "coeff_std_position", "coeff_std_speed", "coeff_std_acc",
"swipe_angle_direction", "swipe_delta_alpha", "last_swipe_direction_change",
"dt_next_swipe_direction_change", "last_swipe_configuration",
"last_swipe_position_change","beta_min","beta_max"]
"""Limit the variation"""
self.vx_vy_min = vx_vy_min
self.vx_vy_max = vx_vy_max
self.v_alpha_min = v_alpha_min
self.v_alpha_max = v_alpha_max
self.v_beta_min = v_beta_min
self.v_beta_max = v_beta_max
self.delta_beta = delta_beta
"""Zoom"""
self.coeff_field = constants.COEFF_VARIATION_FROM_FIELD_DEPTH
self.coeff_std_position = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_POSITION
self.coeff_std_speed = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_SPEED
self.coeff_std_acc = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_ACCELERATION
"""Trajectory"""
self.trajectory = TrajectoryPlaner(trajectory)
"""Variables for the swipe"""
self.swipe_angle_direction = 1
self.swipe_delta_alpha = 0.2
self.last_swipe_direction_change = constants.get_time()
self.dt_next_swipe_direction_change = -10
self.last_swipe_position_change = -10
from src.multi_agent.tools.configuration import Configuration
self.last_swipe_configuration = Configuration(None, None, random.uniform(0, constants.ROOM_DIMENSION_X),
random.uniform(0, constants.ROOM_DIMENSION_Y), 1, 1,
self.field_depth,
False)
self.default_parameters()
def default_parameters(self):
"""Default option"""
if not self.camera_type == MobileCameraType.RAIL:
self.trajectory = TrajectoryPlaner([])
if self.camera_type == MobileCameraType.FIX or self.camera_type == MobileCameraType.ROTATIVE:
self.vx_vy_min = 0
self.vx_vy_max = 0
if self.camera_type == MobileCameraType.FIX:
self.v_alpha_min = 0
self.v_alpha_max = 0
if self.delta_beta is not None and self.beta is not None:
self.beta_min = bound_angle_btw_minus_pi_plus_pi(self.beta - self.delta_beta)
self.beta_max = bound_angle_btw_minus_pi_plus_pi(self.beta + self.delta_beta)
else:
self.beta_min = None
self.beta_max = None
def angle_degToRad(self):
"""
:description
Transforms angle attribues to radians supposing it is in degree
"""
super().angle_degToRad()
if self.delta_beta is not None:
self.delta_beta = math.radians(self.delta_beta)
if self.beta_min is not None:
self.beta_min = math.radians(self.beta_min)
if self.beta_max is not None:
self.beta_max = math.radians(self.beta_max)
self.v_alpha_min = math.radians(self.v_alpha_min)
self.v_alpha_max = math.radians(self.v_alpha_max)
self.v_beta_min = math.radians(self.v_beta_min)
self.v_beta_max = math.radians(self.v_beta_max)
def angle_radToDeg(self):
"""
:description
Transforms angle attribues to degrees supposing it is in radians
"""
super().angle_radToDeg()
if self.delta_beta is not None:
self.delta_beta = math.degrees(self.delta_beta)
if self.beta_min is not None:
self.beta_min = math.degrees(self.beta_min)
if self.beta_max is not None:
self.beta_max = math.degrees(self.beta_max)
self.v_alpha_min = math.degrees(self.v_alpha_min)
self.v_alpha_max = math.degrees(self.v_alpha_max)
self.v_beta_min = math.degrees(self.v_beta_min)
self.v_beta_max = math.degrees(self.v_beta_max)
def load_from_save_to_txt(self, s):
"""
:description
Load attributes for a txt string representation
:param
1. (string) s -- string description of the object, method save_to_txt.
"""
super().load_from_save_to_txt(s)
self.trajectory = TrajectoryPlaner(self.trajectory)
self.default_parameters()
def my_rand(self, bound):
"""
:description
Random function used in randomize
:param
1. ((int,int)) bound -- limit of the random variable that is created.
:return
1. (float) random value btw bound[0] and bound[1]
"""
return random.uniform(bound[0], bound[1])
def randomize(self, camera_type, beta_bound, delta_beta_bound, field_bound, v_xy_min_bound, v_xy_max_bound,
v_alpha_min_bound, v_alpha_max_bound, v_beta_min_bound, v_beta_max_bound):
"""
:description
Create a mobile camera with random
:param
1.(MobileCameraType) camera_type -- Camera type
2.((int,int))beta_bound - [degree] -- random bound of beta
3.((int,int))delta_beta_bound - [degree] -- random bound of delta_beta
4.((int,int))field_bound - [m] -- random bound of field_detph
5.((int,int))v_xx_min_bound -[m/s] -- random bound of v_min in x and y axis
6.((int,int))v_xy_max_bound -[m/s] -- random bound of v_max in x and y axis
7.((int,int))v_alpha_min_bound - [degree/s] -- random bound of alpha min
8.((int,int))v_alpha_max_bound - [degree/s] -- random bound of alpha max
9.((int,int))v_beta_min_bound - [degree/s] -- random bound of beta min
10((int,int))v_beta_ùax_bound - [degree/s] -- random bound of beta max
:return
set severals attributes to random values bounded btw parameters
"""
self.xc = self.my_rand((0, constants.ROOM_DIMENSION_X))
self.yc = self.my_rand((0, constants.ROOM_DIMENSION_Y))
self.alpha = bound_angle_btw_minus_pi_plus_pi(self.my_rand((-math.pi, math.pi)))
self.beta = self.my_rand(beta_bound)
self.delta_beta = self.my_rand(delta_beta_bound)
self.field_depth = self.my_rand(field_bound)
self.t_add = [0]
self.t_del = [1000]
self.vx_vy_min = self.my_rand(v_xy_min_bound)
self.vx_vy_max = self.my_rand(v_xy_max_bound)
self.v_alpha_min = self.my_rand(v_alpha_min_bound)
self.v_alpha_max = self.my_rand(v_alpha_max_bound)
self.v_beta_min = self.my_rand(v_beta_min_bound)
self.v_beta_max = self.my_rand(v_beta_max_bound)
self.trajectory = TrajectoryPlaner([])
self.camera_type = camera_type
"""Default values"""
self.set_default_values(xc=self.xc, yc=self.yc, alpha=self.alpha, beta=self.beta, field_depth=self.field_depth)
self.beta_min = self.beta - self.delta_beta
self.beta_max = self.beta + self.delta_beta
self.angle_degToRad()
def compute_field_depth_variation_for_a_new_beta(self, new_beta):
"""
:description
the field depth is inversaly propotional to beta
:param
1. (float) new_beta - [radians] -- new angle from the camera
:return
1. (float) field_depth - [m] -- field depth corresponding to the new beta
"""
delta = new_beta - self.beta
field_depth = self.field_depth - delta * self.coeff_field
field_depth = bound(field_depth, constants.AGENT_CAMERA_FIELD_MIN * self.default_field_depth,
constants.AGENT_CAMERA_FIELD_MAX * self.default_field_depth)
return field_depth
def zoom(self, speed, dt):
"""
:description
Modelize the zoom of a camera (modifies beta and field_depth)
effects :
zoom in / zoom out
1) on the field geometry:
a. Increase/decrease beta
b. Decrease/increase the field depth
2) on the precision
c. Decrease/increase the std on the measure
self.coeff_speed -- value > 0, defines the proportionality btw a. and b.
self.coeff_std -- value > 0, defines the proportionality btw a. and c.
:param
1. (float) speed -- going from -1 to 1, + to zoom out - to zoom
2. (float) dt -- time
"""
sign = np.sign(speed)
if self.beta_min <= self.beta <= self.beta_max:
if speed == 0:
delta = 0
else:
delta = sign * dt * (self.v_beta_min + math.fabs(speed) * (self.v_beta_max - self.v_beta_min))
elif self.beta < self.beta_min or self.beta_max > 0:
self.beta = bound(self.beta, self.beta_min, self.beta_max)
delta = 0
else:
delta = 0
print("problem in beta target")
self.field_depth = self.compute_field_depth_variation_for_a_new_beta(self.beta + delta)
self.beta += delta
if constants.ERROR_VARIATION_ZOOM:
self.std_measurement_error_position -= delta * self.coeff_std_position
self.std_measurement_error_speed -= delta * self.coeff_std_speed
self.std_measurement_error_acceleration -= delta * self.coeff_std_acc
self.std_measurement_error_position = bound(self.std_measurement_error_position, 0,
self.std_measurement_error_position * 10)
self.std_measurement_error_speed = bound(self.std_measurement_error_speed, 0,
self.std_measurement_error_speed * 10)
self.std_measurement_error_acceleration = bound(self.std_measurement_error_acceleration, 0,
self.std_measurement_error_acceleration * 10)
def rotate(self, speed, dt):
"""
:description
Rotate the camera in the room '(modifies angle alpha)
:param
1. (float) speed -- going from -1 to 1
2. (float) dt -- time
"""
if not self.camera_type == MobileCameraType.FIX:
sign = np.sign(speed)
if speed == 0:
delta = 0
else:
delta = sign * dt * (self.v_alpha_min + math.fabs(speed) * (self.v_alpha_max - self.v_alpha_min))
self.alpha += delta
self.alpha = bound_angle_btw_minus_pi_plus_pi(self.alpha)
def move(self, speed_x, speed_y, dt):
"""
:description
Move the camera in the room (modifies xc and yc)
:param
1. (float) speed_x -- going from -1 to 1
1. (float) speed_y -- going from -1 to 1
2. (float) dt -- time
"""
sign_x = np.sign(speed_x)
sign_y = np.sign(speed_y)
if speed_x == 0:
delta_x = 0
else:
delta_x = sign_x * dt * (self.vx_vy_min + math.fabs(speed_x) * (self.vx_vy_max - self.vx_vy_min))
if speed_y == 0:
delta_y = 0
else:
delta_y = sign_y * dt * (self.vx_vy_min + math.fabs(speed_y) * (self.vx_vy_max - self.vx_vy_min))
if self.camera_type == MobileCameraType.RAIL:
"On the rail it is only 1 dimension"
delta = delta_x
x_new, y_new = self.trajectory.move_on_trajectory(self.xc, self.yc, delta)
self.xc = x_new
self.yc = y_new
elif self.camera_type == MobileCameraType.FREE:
self.xc += delta_x
self.yc += delta_y
self.xc = bound(self.xc, self.xc_min, self.xc_max)
self.yc = bound(self.yc, self.yc_min, self.yc_max)
def set_configuration(self, configuration):
"""
:description
Set the parameters thanks to a configuration
:param
1. (Configuration) configuration -- group several parameters
"""
self.xc = configuration.x
self.yc = configuration.y
self.alpha = configuration.alpha
self.beta = configuration.beta
self.field_depth = configuration.field_depth
def get_edge_points_world_frame(self):
"""
:description
#TODO - petite description
"""
# angles of edge of field of view in cam frame
angle_min, angle_max = -self.beta / 2, self.beta / 2
# distance of depth field along these angles
min_edge = (self.field_depth * math.cos(angle_min), self.field_depth * math.sin(angle_min))
max_edge = (self.field_depth * math.sin(angle_max), self.field_depth * math.sin(angle_max))
| |
: An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup:
Setting DisableAutomatedBackup to ``true`` disables automated or scheduled backups. Automated backups are enabled by default.
:type BackupRetentionCount: integer
:param BackupRetentionCount:
Sets the number of automated backups that you want to keep.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server to update.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
``DDD:HH:MM`` (weekly start time) or ``HH:MM`` (daily start time).
Time windows always use coordinated universal time (UTC). Valid strings for day of week (``DDD`` ) are: ``Mon`` , ``Tue`` , ``Wed`` , ``Thr`` , ``Fri`` , ``Sat`` , or ``Sun`` .
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
``DDD:HH:MM`` (weekly start time) or ``HH:MM`` (daily start time).
Time windows always use coordinated universal time (UTC). Valid strings for day of week (``DDD`` ) are: ``Mon`` , ``Tue`` , ``Wed`` , ``Thr`` , ``Fri`` , ``Sat`` , or ``Sun`` .
:rtype: dict
:returns:
"""
pass
def update_server_engine_attributes(self, ServerName: str, AttributeName: str, AttributeValue: str = None) -> Dict:
"""
Updates engine-specific attributes on a specified server. The server enters the ``MODIFYING`` state when this operation is in progress. Only one update can occur at a time. You can use this command to reset a Chef server's public key (``CHEF_PIVOTAL_KEY`` ) or a Puppet server's admin password (``PUPPET_ADMIN_PASSWORD`` ).
This operation is asynchronous.
This operation can only be called for servers in ``HEALTHY`` or ``UNHEALTHY`` states. Otherwise, an ``InvalidStateException`` is raised. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/UpdateServerEngineAttributes>`_
**Request Syntax**
::
response = client.update_server_engine_attributes(
ServerName='string',
AttributeName='string',
AttributeValue='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to an ``UpdateServerEngineAttributes`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group | |
# Copyright [2018-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mysql.connector
from mysql.connector import Error
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pprint
import unicodedata
import time
import datetime
import argparse
#import traceback
def fetch_db_data(query,database,host,port,user,password):
try:
conn = mysql.connector.connect(database=database,
host=host,
port=port,
user=user,
password=password)
cursor = conn.cursor()
cursor.execute(query)
rows = cursor.fetchall()
except Error as e:
print(e)
finally:
cursor.close()
conn.close()
return rows
def update_assembly_sheet(assembly_db_data,meta_db_data,existing_sheet_records,assembly_sheet,gettime,worksheet_name):
# This method creates a dictionary for both the lists from the db and sheets and makes both
# dicts key on the versioned GCA (which is unique). Once the dicts are generated keys in the
# assembly db dict are compared to the keys in the sheets dict. If a key is not in the sheets
# dict then it is a new entry and gets made into a new row and added to the sheet. If the key
# is present then some tests are done to see if anything needs updating. Some of these tests
# could be made generic, but there are some complex cases like when the filters need updating
# The filters are basically tags for the assemblies that are then used to create the filter
# views in sheets
min_contig_n50_filter = 100000
assembly_db_dict = {}
existing_sheet_dict = {}
max_version_dict = {}
existing_annotations_dict = {}
# This ordering needs to match the ordering of the query on the assembly db
assembly_db_columns = ['subspecies_name','common_name','chain','version','clade','contig_N50','assembly_level','assembly_date','refseq_accession','assembly_name','genome_rep','rnaseq_data', 'genebuilder','progress_status','assembly_group']
# This ordering needs to match the ordering of the columns on the sheet
assembly_sheet_columns = ['GCA','Clade','Species name','Common name','Contig N50','Assembly level','Assembly date','Assembly name','RNAseq data','RefSeq accession','Genebuilder','Status','Assembly group','Expected release','Grant','Notes','Filter: Max version','Filter: Genome rep','Filter: N50','Filter: Non-human']
# This makes a dict for the db on the versioned GCA and also makes a dict to track the highest
# version for a particular GCA (used in filtering later)
# Note the db has entries that are in unicode in some cases and need to be converted
for row in assembly_db_data:
chain = row[assembly_db_columns.index('chain')]
version = row[assembly_db_columns.index('version')]
chain.encode('ascii','ignore')
gca = make_gca(chain,version)
assembly_db_dict[gca] = row
if chain in max_version_dict:
current_max_version = max_version_dict[chain]
if version > current_max_version:
max_version_dict[chain] = version
else:
max_version_dict[chain] = version
# This makes an existing annotations dict based on the meta data db. Note that this db only
# goes back to e80, so there is a small chance that assemblies that were once annotated are not marked
# as handed over in the filters, but this shouldn't be a problem
for row in meta_db_data:
gca = row[0]
gca.encode('ascii','ignore')
existing_annotations_dict[gca] = 1
# This just makes a dict for the sheet based on the versioned GCA
for row in existing_sheet_records:
gca = row[0]
gca.encode('ascii','ignore')
if(gca == 'GCA'):
next
else:
existing_sheet_dict[gca] = row
# This is where the majority of the work occurs. All assembly GCAs are examined to determined what
# should be added/updated
# Note that currently a three second sleep is needed to avoid exhausting the Sheets REST API quota
for gca in assembly_db_dict:
# Check that time since last authentication is < 1hr
# If greater than 1 hr, then re-authenticate
if(time.time() - gettime > 60* 59):
print("Re-authenticating API's connection ")
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name(credentials_path, scope)
client = gspread.authorize(creds)
gettime = time.time()
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
assembly_sheet = client.open(worksheet_name).worksheet("EnsemblAssemblyRegistry")
assembly_row = assembly_db_dict[gca]
species_name = assembly_row[assembly_db_columns.index('subspecies_name')]
common_name = assembly_row[assembly_db_columns.index('common_name')]
chain = assembly_row[assembly_db_columns.index('chain')]
chain.encode('ascii','ignore')
version = assembly_row[assembly_db_columns.index('version')]
clade = assembly_row[assembly_db_columns.index('clade')]
contig_N50 = assembly_row[assembly_db_columns.index('contig_N50')]
assembly_level = assembly_row[assembly_db_columns.index('assembly_level')]
assembly_date = assembly_row[assembly_db_columns.index('assembly_date')]
refseq_accession = assembly_row[assembly_db_columns.index('refseq_accession')]
assembly_name = assembly_row[assembly_db_columns.index('assembly_name')]
genome_rep = assembly_row[assembly_db_columns.index('genome_rep')]
rnaseq_data = assembly_row[assembly_db_columns.index('rnaseq_data')]
gca = make_gca(chain,version)
genebuilder = assembly_row[assembly_db_columns.index('genebuilder')]
annotation_status = assembly_row[assembly_db_columns.index('progress_status')]
assembly_group = assembly_row[assembly_db_columns.index('assembly_group')]
# If GCA is in meta db, then it means db has been handed over
if gca in existing_annotations_dict:
annotation_status = 'Handed over'
# If the row does not exist then add it in with the filtering info
if not gca in existing_sheet_dict:
# Depending on the assembly group, we match the display to the right project naming convention
# For example, Darwin Tree of Life (DToL), Vertebrates Genomes Project (VGP), etc.
# Ungrouped refers to non-project specific assemblies
if assembly_group == 'dtol':
assembly_group = 'DToL'
elif assembly_group == 'ungrouped':
assembly_group = assembly_group.capitalize()
else:
assembly_group.upper()
# When an assembly is first written to sheets, its status should be set to 'Not started' and genebuilder set to 'Not assigned'
annotation_status = 'Not started'
genebuilder = 'Not assigned'
new_row = [gca,clade,species_name,common_name,contig_N50,assembly_level,assembly_date.strftime('%Y-%m-%d'),assembly_name,rnaseq_data,refseq_accession,genebuilder,annotation_status,assembly_group,'','Not assigned','']
# This section sets various filters
# Setting filter of versioned GCA
if version == max_version_dict[chain]:
new_row.append(1)
else:
new_row.append(0)
# Setting filter for genome representation
if genome_rep == 'full':
new_row.append(1)
else:
new_row.append(0)
# Setting contig_N50 filter
if contig_N50 >= min_contig_n50_filter:
new_row.append(1)
else:
new_row.append(0)
# Set RNASeq status based on contig_N50 if not already assigned
if rnaseq_data is None:
if contig_N50 >= 100000:
new_row[8] = 'No RNAseq data'
else:
new_row[8] = 'Non candidate assembly'
else:
new_row[8] = rnaseq_data.capitalize()
# There is an issue with the db at the moment with trailing spaces on the species names, but this should get fixed
if not (species_name == "Homo sapiens " or species_name == "Homo sapiens"):
new_row.append(1)
else:
new_row.append(0)
# Add new record to sheets
print(new_row)
insert_index = 2
assembly_sheet.append_row(new_row)
time.sleep(3)
# If it does exist we need to check if an update is required. There are only a few columns this might pertain to
else:
sheet_row = existing_sheet_dict[gca]
sheet_clade_index = assembly_sheet_columns.index('Clade')
sheet_clade_val = sheet_row[sheet_clade_index]
sheet_filter_version_index = assembly_sheet_columns.index('Filter: Max version')
sheet_filter_N50_index = assembly_sheet_columns.index('Filter: N50')
sheet_filter_version_val = sheet_row[sheet_filter_version_index]
sheet_filter_N50_val = sheet_row[sheet_filter_N50_index]
sheet_refseq_accession_index = assembly_sheet_columns.index('RefSeq accession')
sheet_assembly_name_index = assembly_sheet_columns.index('Assembly name')
sheet_refseq_accession_val = sheet_row[sheet_refseq_accession_index]
sheet_assembly_name_val = sheet_row[sheet_assembly_name_index]
sheet_rnaseq_data_index = assembly_sheet_columns.index('RNAseq data')
sheet_rnaseq_data_val = sheet_row[sheet_rnaseq_data_index]
sheet_contig_N50_index = assembly_sheet_columns.index('Contig N50')
sheet_contig_N50_val = sheet_row[sheet_contig_N50_index]
sheet_annotation_status_index = assembly_sheet_columns.index('Status')
sheet_annotation_status_val = sheet_row[sheet_annotation_status_index]
sheet_genebuilder_index = assembly_sheet_columns.index('Genebuilder')
sheet_genebuilder_val = sheet_row[sheet_genebuilder_index]
sheet_assembly_group_index = assembly_sheet_columns.index('Assembly group')
sheet_assembly_group_val = sheet_row[sheet_assembly_group_index]
# Check if transcriptomic data status from db is null.
# If yes, check if assembly has been handed over or if assembly meets candidate assembly criteria
if ((rnaseq_data is None) and (sheet_rnaseq_data_val == 'Non candidate assembly' or sheet_rnaseq_data_val == 'No RNAseq data' or sheet_rnaseq_data_val == 'Not available')):
# Nothing to update
print("No update on rnaseq data status for: " + gca)
# It is possible to annotate a species and handover without RNASeq data
elif rnaseq_data is None and annotation_status == 'Handed over':
# update the RNASeq data status as not applicable
rnaseq_data = 'N/A'
print("Updating rnaseq data status for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_rnaseq_data_index,rnaseq_data)
time.sleep(3)
elif rnaseq_data.lower() != sheet_rnaseq_data_val.lower():
rnaseq_data = rnaseq_data.capitalize()
# update the RNASeq data status with value from db
print("Updating rnaseq data status for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_rnaseq_data_index,rnaseq_data)
time.sleep(3)
# Sometimes we could have assemblies with no contig_N50 value in the sheet. This can cause issues with comparison
if sheet_contig_N50_val is None:
# Set a default value for contig_N50
sheet_contig_N50_val = 0
if contig_N50 != int(sheet_contig_N50_val):
# Update the contig info on the sheet
print("Updating the contig for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_contig_N50_index,contig_N50)
time.sleep(3)
# Compare clade vlaues between db and sheets
if clade != sheet_clade_val:
# Update the clade
print("Updating the clade for: " + gca)
row_update_index = assembly_sheet.find(gca).row
update_cell_val(assembly_sheet,row_update_index,sheet_clade_index,clade)
time.sleep(3)
# Updating specific filters
if sheet_filter_version_val == "1" and str(version) != str(max_version_dict[chain]):
# update | |
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import uuid
from collections import defaultdict
from django.db import transaction
from django.db.models import Q, F, Count
from django.utils.functional import cached_property
from bridge.vars import SAFE_VERDICTS, UNSAFE_VERDICTS, ASSOCIATION_TYPE
from marks.models import (
MarkSafe, MarkSafeHistory, MarkUnsafe, MarkUnsafeHistory, MarkUnknown,
MarkSafeReport, MarkUnsafeReport, MarkUnknownReport, MarkSafeTag, MarkUnsafeTag, Tag
)
from caches.models import (
ASSOCIATION_CHANGE_KIND, ReportSafeCache, ReportUnsafeCache, ReportUnknownCache,
SafeMarkAssociationChanges, UnsafeMarkAssociationChanges, UnknownMarkAssociationChanges
)
from reports.verdicts import safe_verdicts_sum, unsafe_verdicts_sum, BugStatusCollector
@transaction.atomic
def update_cache_atomic(queryset, data):
for rep_cache in queryset.select_for_update():
if rep_cache.report_id not in data:
continue
for field, value in data[rep_cache.report_id].items():
setattr(rep_cache, field, value)
rep_cache.save()
class UpdateSafeCachesOnMarkChange:
def __init__(self, mark, old_links, new_links):
self._mark = mark
self._old_links = old_links
self._new_links = new_links
self._affected_reports = self._old_links | self._new_links
self._cache_queryset = ReportSafeCache.objects.filter(report_id__in=self._affected_reports)
self._markreport_qs = MarkSafeReport.objects\
.filter(report_id__in=self._affected_reports, associated=True).select_related('mark')
self._collected = set()
self._old_data = self.__collect_old_data()
self._new_data = self.__init_new_data()
def save(self):
update_cache_atomic(self._cache_queryset, self._new_data)
return self.__create_changes_cache()
def __collect_old_data(self):
old_data = {}
for cache_obj in self._cache_queryset:
old_data[cache_obj.report_id] = {
'decision_id': cache_obj.decision_id,
'verdict': cache_obj.verdict,
'tags': cache_obj.tags
}
return old_data
def __init_new_data(self):
return dict((cache_obj.report_id, {}) for cache_obj in self._cache_queryset)
def update_all(self):
if 'verdicts' in self._collected and 'tags' in self._collected:
return
for cache_obj in self._cache_queryset:
self._new_data[cache_obj.report_id]['verdict'] = SAFE_VERDICTS[4][0]
self._new_data[cache_obj.report_id]['tags'] = {}
self._new_data[cache_obj.report_id]['marks_total'] = 0
self._new_data[cache_obj.report_id]['marks_confirmed'] = 0
self._new_data[cache_obj.report_id]['marks_automatic'] = 0
# Count automatic associations
automatic_qs = MarkSafeReport.objects\
.filter(report_id__in=self._affected_reports, type=ASSOCIATION_TYPE[2][0]).values('report_id')\
.annotate(number=Count('report_id')).values_list('report_id', 'number')
for report_id, automatic_num in automatic_qs:
self._new_data[report_id]['marks_automatic'] = automatic_num
for mr in self._markreport_qs:
self.__add_verdict(mr.report_id, mr.mark.verdict)
self.__add_tags(mr.report_id, mr.mark.cache_tags)
self._new_data[mr.report_id]['marks_total'] += 1
self._new_data[mr.report_id]['marks_confirmed'] += int(mr.type == ASSOCIATION_TYPE[3][0])
self._collected.add('verdicts')
self._collected.add('tags')
def update_verdicts(self):
if 'verdicts' in self._collected:
return
for cache_obj in self._cache_queryset:
self._new_data[cache_obj.report_id]['verdict'] = SAFE_VERDICTS[4][0]
for mr in self._markreport_qs:
self.__add_verdict(mr.report_id, mr.mark.verdict)
self._collected.add('verdicts')
def update_tags(self):
if 'tags' in self._collected:
return
for cache_obj in self._cache_queryset:
self._new_data[cache_obj.report_id]['tags'] = {}
for mr in self._markreport_qs:
self.__add_tags(mr.report_id, mr.mark.cache_tags)
self._collected.add('tags')
def __get_change_kind(self, report_id):
if report_id in self._old_links:
if report_id in self._new_links:
return ASSOCIATION_CHANGE_KIND[0][0]
return ASSOCIATION_CHANGE_KIND[2][0]
# Report is always in new links as method called with needed report id
return ASSOCIATION_CHANGE_KIND[1][0]
@cached_property
def _change_kinds(self):
return dict((report_id, self.__get_change_kind(report_id)) for report_id in self._affected_reports)
def __add_verdict(self, report_id, verdict):
self._new_data[report_id]['verdict'] = safe_verdicts_sum(self._new_data[report_id]['verdict'], verdict)
def __add_tags(self, report_id, tags_list):
for tag in tags_list:
self._new_data[report_id]['tags'].setdefault(tag, 0)
self._new_data[report_id]['tags'][tag] += 1
def __create_changes_cache(self):
# Remove old association changes cache
SafeMarkAssociationChanges.objects.filter(mark=self._mark).delete()
# Create new association changes
identifier = uuid.uuid4()
changes_objects = []
for report_id in self._affected_reports:
verdict_old = self._old_data[report_id]['verdict']
verdict_new = self._new_data[report_id].get('verdict', verdict_old)
tags_old = self._old_data[report_id]['tags']
tags_new = self._new_data[report_id].get('tags', tags_old)
changes_objects.append(SafeMarkAssociationChanges(
identifier=identifier, mark=self._mark,
decision_id=self._old_data[report_id]['decision_id'], report_id=report_id,
kind=self._change_kinds[report_id],
verdict_old=verdict_old, verdict_new=verdict_new,
tags_old=tags_old, tags_new=tags_new
))
SafeMarkAssociationChanges.objects.bulk_create(changes_objects)
return str(identifier)
class UpdateUnsafeCachesOnMarkChange:
def __init__(self, mark, old_links, new_links):
self._mark = mark
self._old_links = old_links
self._new_links = new_links
self._affected_reports = self._old_links | self._new_links
self._cache_queryset = ReportUnsafeCache.objects.filter(report_id__in=self._affected_reports)
self._markreport_qs = MarkUnsafeReport.objects\
.filter(report_id__in=self._affected_reports, associated=True).select_related('mark')
self._collected = set()
self._old_data = self.__collect_old_data()
self._new_data = self.__init_new_data()
def save(self):
update_cache_atomic(self._cache_queryset, self._new_data)
return self.__create_changes_cache()
def __collect_old_data(self):
old_data = {}
for cache_obj in self._cache_queryset:
old_data[cache_obj.report_id] = {
'decision_id': cache_obj.decision_id,
'verdict': cache_obj.verdict,
'status': cache_obj.status,
'tags': cache_obj.tags
}
return old_data
def __init_new_data(self):
return dict((cache_obj.report_id, {}) for cache_obj in self._cache_queryset)
def update_all(self):
if 'verdicts' in self._collected and 'tags' in self._collected:
return
for cache_obj in self._cache_queryset:
self._new_data[cache_obj.report_id]['verdict'] = UNSAFE_VERDICTS[5][0]
self._new_data[cache_obj.report_id]['status'] = None
self._new_data[cache_obj.report_id]['tags'] = {}
self._new_data[cache_obj.report_id]['marks_total'] = 0
self._new_data[cache_obj.report_id]['marks_confirmed'] = 0
self._new_data[cache_obj.report_id]['marks_automatic'] = 0
# Count automatic associations
automatic_qs = MarkUnsafeReport.objects \
.filter(report_id__in=self._affected_reports, type=ASSOCIATION_TYPE[2][0]).values('report_id') \
.annotate(number=Count('report_id')).values_list('report_id', 'number')
for report_id, automatic_num in automatic_qs:
self._new_data[report_id]['marks_automatic'] = automatic_num
statuses_collector = BugStatusCollector()
for mr in self._markreport_qs:
self.__add_verdict(mr.report_id, mr.mark.verdict)
statuses_collector.add(mr.report_id, mr.mark.verdict, mr.mark.status)
self.__add_tags(mr.report_id, mr.mark.cache_tags)
self._new_data[mr.report_id]['marks_total'] += 1
self._new_data[mr.report_id]['marks_confirmed'] += int(mr.type == ASSOCIATION_TYPE[3][0])
for report_id, status in statuses_collector.result.items():
self._new_data[report_id]['status'] = status
self._collected.add('verdicts')
self._collected.add('statuses')
self._collected.add('tags')
def update_verdicts(self):
if 'verdicts' in self._collected:
return
for cache_obj in self._cache_queryset:
self._new_data[cache_obj.report_id]['verdict'] = UNSAFE_VERDICTS[5][0]
for mr in self._markreport_qs:
self.__add_verdict(mr.report_id, mr.mark.verdict)
self._collected.add('verdicts')
def update_statuses(self):
if 'statuses' in self._collected:
return
for cache_obj in self._cache_queryset:
self._new_data[cache_obj.report_id]['status'] = None
statuses_collector = BugStatusCollector()
for mr in self._markreport_qs:
statuses_collector.add(mr.report_id, mr.mark.verdict, mr.mark.status)
for report_id, status in statuses_collector.result.items():
self._new_data[report_id]['status'] = status
self._collected.add('statuses')
def update_tags(self):
if 'tags' in self._collected:
return
for cache_obj in self._cache_queryset:
self._new_data[cache_obj.report_id]['tags'] = {}
for mr in self._markreport_qs:
self.__add_tags(mr.report_id, mr.mark.cache_tags)
self._collected.add('tags')
def __get_change_kind(self, report_id):
if report_id in self._old_links:
if report_id in self._new_links:
return ASSOCIATION_CHANGE_KIND[0][0]
return ASSOCIATION_CHANGE_KIND[2][0]
# Report is always in new links as method called with needed report id
return ASSOCIATION_CHANGE_KIND[1][0]
@cached_property
def _change_kinds(self):
return dict((report_id, self.__get_change_kind(report_id)) for report_id in self._affected_reports)
def __add_verdict(self, report_id, verdict):
self._new_data[report_id]['verdict'] = unsafe_verdicts_sum(self._new_data[report_id]['verdict'], verdict)
def __add_tags(self, report_id, tags_list):
for tag in tags_list:
self._new_data[report_id]['tags'].setdefault(tag, 0)
self._new_data[report_id]['tags'][tag] += 1
def __create_changes_cache(self):
# Remove old association changes cache
UnsafeMarkAssociationChanges.objects.filter(mark=self._mark).delete()
# Create new association changes
identifier = uuid.uuid4()
changes_objects = []
for report_id in self._affected_reports:
verdict_old = self._old_data[report_id]['verdict']
verdict_new = self._new_data[report_id].get('verdict', verdict_old)
status_old = self._old_data[report_id]['status']
status_new = self._new_data[report_id].get('status', verdict_old)
tags_old = self._old_data[report_id]['tags']
tags_new = self._new_data[report_id].get('tags', tags_old)
changes_objects.append(UnsafeMarkAssociationChanges(
identifier=identifier, mark=self._mark,
decision_id=self._old_data[report_id]['decision_id'], report_id=report_id,
kind=self._change_kinds[report_id],
verdict_old=verdict_old, verdict_new=verdict_new,
tags_old=tags_old, tags_new=tags_new,
status_old=status_old, status_new=status_new
))
UnsafeMarkAssociationChanges.objects.bulk_create(changes_objects)
return str(identifier)
class UpdateUnknownCachesOnMarkChange:
def __init__(self, mark, old_links, new_links):
self._mark = mark
self._old_links = old_links
self._new_links = new_links
self._affected_reports = self._old_links | self._new_links
self._cache_queryset = ReportUnknownCache.objects.filter(report_id__in=self._affected_reports)
self._markreport_qs = MarkUnknownReport.objects.filter(report_id__in=self._affected_reports, associated=True)
self._collected = False
self._old_data = self.__collect_old_data()
self._new_data = self.__init_new_data()
def save(self):
if self._collected:
update_cache_atomic(self._cache_queryset, self._new_data)
self._collected = False
return self.__create_changes_cache()
def __collect_old_data(self):
old_data = {}
for cache_obj in self._cache_queryset:
old_data[cache_obj.report_id] = {
'decision_id': cache_obj.decision_id,
'problems': cache_obj.problems
}
return old_data
def __init_new_data(self):
return dict((cache_obj.report_id, {}) for cache_obj in self._cache_queryset)
def update_all(self):
for cache_obj in self._cache_queryset:
self._new_data[cache_obj.report_id]['problems'] = {}
self._new_data[cache_obj.report_id]['marks_total'] = 0
self._new_data[cache_obj.report_id]['marks_confirmed'] = 0
self._new_data[cache_obj.report_id]['marks_automatic'] = 0
# Count automatic associations
automatic_qs = MarkUnknownReport.objects \
.filter(report_id__in=self._affected_reports, type=ASSOCIATION_TYPE[2][0]).values('report_id') \
.annotate(number=Count('report_id')).values_list('report_id', 'number')
for report_id, automatic_num in automatic_qs:
self._new_data[report_id]['marks_automatic'] = automatic_num
for mr in self._markreport_qs:
self._new_data[mr.report_id]['problems'].setdefault(mr.problem, 0)
self._new_data[mr.report_id]['problems'][mr.problem] += 1
self._new_data[mr.report_id]['marks_total'] += 1
self._new_data[mr.report_id]['marks_confirmed'] += int(mr.type == ASSOCIATION_TYPE[3][0])
self._collected = True
def __get_change_kind(self, report_id):
if report_id in self._old_links:
if report_id in self._new_links:
return ASSOCIATION_CHANGE_KIND[0][0]
return ASSOCIATION_CHANGE_KIND[2][0]
# Report is always in new links as method called with needed report id
return ASSOCIATION_CHANGE_KIND[1][0]
@cached_property
def _change_kinds(self):
return dict((report_id, self.__get_change_kind(report_id)) for report_id in self._affected_reports)
def __create_changes_cache(self):
# Remove old association changes cache
UnknownMarkAssociationChanges.objects.filter(mark=self._mark).delete()
# Create new association changes
identifier = uuid.uuid4()
changes_objects = []
for report_id in self._affected_reports:
problems_old = self._old_data[report_id]['problems']
problems_new = self._new_data[report_id].get('problems', problems_old)
changes_objects.append(UnknownMarkAssociationChanges(
identifier=identifier, mark=self._mark,
decision_id=self._old_data[report_id]['decision_id'], report_id=report_id,
kind=self._change_kinds[report_id],
problems_old=problems_old, problems_new=problems_new
))
UnknownMarkAssociationChanges.objects.bulk_create(changes_objects)
return str(identifier)
class UpdateCachesOnMarkPopulate:
def __init__(self, mark, new_links):
self._mark = mark
self._new_links = new_links
def update(self):
if not self._new_links:
# Nothing changed
return
if isinstance(self._mark, MarkSafe):
self.__update_safes()
elif isinstance(self._mark, MarkUnsafe):
self.__update_unsafes()
elif isinstance(self._mark, MarkUnknown):
self.__update_unknowns()
@transaction.atomic
def __update_safes(self):
for cache_obj in ReportSafeCache.objects.filter(report_id__in=self._new_links).select_for_update():
# All safe links after the population are automatic
cache_obj.marks_automatic += 1
if cache_obj.marks_confirmed == 0:
# If report has confirmed mark, then new populated mark can't affect its cache
cache_obj.verdict = safe_verdicts_sum(cache_obj.verdict, self._mark.verdict)
cache_obj.tags = self.__sum_tags(cache_obj.tags)
cache_obj.marks_total += 1
# Populated mark can't be confirmed, so we don't need to update confirmed number
cache_obj.save()
def __update_unsafes(self):
# Filter new_links with automatic associations as just such associations can affect report's cache
affected_reports = set(MarkUnsafeReport.objects.filter(
report_id__in=self._new_links, mark=self._mark, type=ASSOCIATION_TYPE[2][0]
).values_list('report_id', flat=True))
with transaction.atomic():
for cache_obj in ReportUnsafeCache.objects.filter(report_id__in=affected_reports).select_for_update():
cache_obj.marks_automatic += 1
if cache_obj.marks_confirmed == 0:
# If report has confirmed mark, then new populated mark can't affect its cache
cache_obj.verdict = unsafe_verdicts_sum(cache_obj.verdict, self._mark.verdict)
cache_obj.status = BugStatusCollector.sum(cache_obj.status, self._mark.status, cache_obj.verdict)
cache_obj.tags = self.__sum_tags(cache_obj.tags)
cache_obj.marks_total += 1
# Populated mark can't be confirmed, so we don't need to update confirmed number
cache_obj.save()
def __update_unknowns(self):
new_problems = dict(MarkUnknownReport.objects.filter(mark=self._mark).values_list('report_id', 'problem'))
with transaction.atomic():
for cache_obj in ReportUnknownCache.objects.filter(report_id__in=self._new_links)\
.select_for_update():
# All unknown links after the population are automatic
cache_obj.marks_automatic += 1
# If report has confirmed mark, then new populated mark can't affect its cache
if cache_obj.marks_confirmed == 0 and cache_obj.report_id in new_problems:
problem = new_problems[cache_obj.report_id]
cache_obj.problems.setdefault(problem, 0)
cache_obj.problems[problem] += 1
cache_obj.marks_total += 1
# Populated mark can't be confirmed, | |
*args, **kwargs):
return [{"Roles": [{"RoleName": "fake-role-1"}, {"RoleName": "fake-role-2"}]}]
iam_client.get_paginator.return_value = MockPaginator_Roles()
with CaptureStdout() as output:
kwargs = _get_aws_list_assets_kwargs()
iam.list_roles([], argparse.Namespace(**kwargs))
self.assertIn("fake-role-1", output)
self.assertIn("fake-role-2", output)
def test_iam_fus_list_assets(self):
def _get_fus_list_assets_kwargs(**kwargs):
# Set default kwargs values, then set user-specified kwargs
custom_kwargs = dict(
cloud_provider="fusillade",
output=None,
force=False,
exclude_headers=False,
)
for kw, val in kwargs.items():
custom_kwargs[kw] = val
return custom_kwargs
with self.subTest("Fusillade list users"):
with mock.patch("dss.operations.iam.FusilladeClient") as fus_client:
side_effects = [[
"<EMAIL>",
"<EMAIL>"
]]
fus_client().paginate = mock.MagicMock(side_effect=side_effects)
kwargs = _get_fus_list_assets_kwargs()
with CaptureStdout() as output:
iam.list_users([], argparse.Namespace(**kwargs))
self.assertIn("<EMAIL>", output)
self.assertIn("<EMAIL>", output)
with self.subTest("Fusillade list groups"):
with mock.patch("dss.operations.iam.FusilladeClient") as fus_client:
side_effects = [["fake-group-1", "fake-group-2"]]
fus_client().paginate = mock.MagicMock(side_effect=side_effects)
kwargs = _get_fus_list_assets_kwargs()
with CaptureStdout() as output:
iam.list_groups([], argparse.Namespace(**kwargs))
self.assertIn("fake-group-1", output)
self.assertIn("fake-group-2", output)
with self.subTest("Fusillade list roles"):
with mock.patch("dss.operations.iam.FusilladeClient") as fus_client:
side_effects = [["fake-role-1", "fake-role-2"]]
fus_client().paginate = mock.MagicMock(side_effect=side_effects)
kwargs = _get_fus_list_assets_kwargs()
with CaptureStdout() as output:
iam.list_roles([], argparse.Namespace(**kwargs))
self.assertIn("fake-role-1", output)
self.assertIn("fake-role-2", output)
def test_secrets_crud(self):
# CRUD (create read update delete) test procedure:
# - create new secret
# - list secrets and verify new secret shows up
# - get secret value and verify it is correct
# - update secret value
# - get secret value and verify it is correct
# - delete secret
which_stage = os.environ["DSS_DEPLOYMENT_STAGE"]
which_store = os.environ["DSS_SECRETS_STORE"]
secret_name = random_alphanumeric_string()
testvar_name = f"{which_store}/{which_stage}/{secret_name}"
testvar_value = "Hello world!"
testvar_value2 = "Goodbye world!"
unusedvar_name = f"{which_store}/{which_stage}/admin_user_emails"
with self.subTest("Create a new secret"):
# Monkeypatch the secrets manager
with mock.patch("dss.operations.secrets.sm_client") as sm:
# Creating a new variable will first call get, which will not find it
sm.get_secret_value = mock.MagicMock(return_value=None, side_effect=ClientError({}, None))
# Next we will use the create secret command
sm.create_secret = mock.MagicMock(return_value=None)
# Create initial secret value:
# Dry run first
with SwapStdin(testvar_value):
secrets.set_secret(
[],
argparse.Namespace(
secret_name=testvar_name, dry_run=True, infile=None, quiet=True, force=True
),
)
# Provide secret via stdin
with SwapStdin(testvar_value):
secrets.set_secret(
[],
argparse.Namespace(
secret_name=testvar_name, dry_run=False, infile=None, quiet=True, force=True
),
)
# Provide secret via infile
with tempfile.NamedTemporaryFile(prefix='dss-test-operations-new-secret-temp-input', mode='w') as f:
f.write(testvar_value)
secrets.set_secret(
[],
argparse.Namespace(
secret_name=testvar_name, dry_run=False, infile=f.name, force=True, quiet=True
),
)
# Check error-catching with non-existent infile
mf = 'this-file-is-not-here'
with self.assertRaises(RuntimeError):
secrets.set_secret(
[],
argparse.Namespace(
secret_name=testvar_name, dry_run=False, infile=mf, force=True, quiet=True
),
)
with self.subTest("List secrets"):
with mock.patch("dss.operations.secrets.sm_client") as sm:
# Listing secrets requires creating a paginator first,
# so mock what the paginator returns
class MockPaginator(object):
def paginate(self):
# Return a mock page from the mock paginator
return [{"SecretList": [{"Name": testvar_name}, {"Name": unusedvar_name}]}]
sm.get_paginator.return_value = MockPaginator()
# Non-JSON output first
with CaptureStdout() as output:
secrets.list_secrets([], argparse.Namespace(json=False))
self.assertIn(testvar_name, output)
# JSON output
with CaptureStdout() as output:
secrets.list_secrets([], argparse.Namespace(json=True))
all_secrets_output = json.loads("\n".join(output))
self.assertIn(testvar_name, all_secrets_output)
with self.subTest("Get secret value"):
with mock.patch("dss.operations.secrets.sm_client") as sm:
# Requesting the variable will try to get secret value and succeed
sm.get_secret_value.return_value = {"SecretString": testvar_value}
# Now run get secret value in JSON mode and non-JSON mode
# and verify variable name/value is in both.
# New output file
with tempfile.NamedTemporaryFile(prefix='dss-test-operations-get-secret-temp-output', mode='w') as f:
# Try to overwrite outfile without --force
with self.assertRaises(RuntimeError):
secrets.get_secret(
[], argparse.Namespace(secret_name=testvar_name, outfile=f.name, force=False)
)
# Overwrite outfile with --force
secrets.get_secret(
[], argparse.Namespace(secret_name=testvar_name, outfile=f.name, force=True)
)
with open(f.name, 'r') as fr:
file_contents = fr.read()
self.assertIn(testvar_value, file_contents)
# Output secret to stdout
with CaptureStdout() as output:
secrets.get_secret(
[], argparse.Namespace(secret_name=testvar_name, outfile=None, force=False)
)
self.assertIn(testvar_value, "\n".join(output))
with self.subTest("Update existing secret"):
with mock.patch("dss.operations.secrets.sm_client") as sm:
# Updating the variable will try to get secret value and succeed
sm.get_secret_value = mock.MagicMock(return_value={"SecretString": testvar_value})
# Next we will call the update secret command
sm.update_secret = mock.MagicMock(return_value=None)
# Update secret:
# Dry run first
with SwapStdin(testvar_value2):
secrets.set_secret(
[],
argparse.Namespace(
secret_name=testvar_name, dry_run=True, infile=None, force=True, quiet=True
),
)
# Use stdin
with SwapStdin(testvar_value2):
secrets.set_secret(
[],
argparse.Namespace(
secret_name=testvar_name, dry_run=False, infile=None, force=True, quiet=True
),
)
# Use input file
with tempfile.NamedTemporaryFile(prefix='dss-test-operations-update-secret-temp-input', mode='w') as f:
f.write(testvar_value2)
secrets.set_secret(
[],
argparse.Namespace(
secret_name=testvar_name, dry_run=False, infile=f.name, force=True, quiet=True
),
)
with self.subTest("Delete secret"):
with mock.patch("dss.operations.secrets.sm_client") as sm:
# Deleting the variable will try to get secret value and succeed
sm.get_secret_value = mock.MagicMock(return_value={"SecretString": testvar_value})
sm.delete_secret = mock.MagicMock(return_value=None)
# Delete secret
# Dry run first
secrets.del_secret(
[], argparse.Namespace(secret_name=testvar_name, force=True, dry_run=True, quiet=True)
)
# Real thing
secrets.del_secret(
[], argparse.Namespace(secret_name=testvar_name, force=True, dry_run=False, quiet=True)
)
def test_ssmparams_utilities(self):
prefix = f"/{os.environ['DSS_PARAMETER_STORE']}/{os.environ['DSS_DEPLOYMENT_STAGE']}"
gold_var = f"{prefix}/dummy_variable"
var = "dummy_variable"
new_var = fix_ssm_variable_prefix(var)
self.assertEqual(new_var, gold_var)
var = "/dummy_variable"
new_var = fix_ssm_variable_prefix(var)
self.assertEqual(new_var, gold_var)
var = f"{prefix}/dummy_variable"
new_var = fix_ssm_variable_prefix(var)
self.assertEqual(new_var, gold_var)
var = f"{prefix}/dummy_variable/"
new_var = fix_ssm_variable_prefix(var)
self.assertEqual(new_var, gold_var)
def test_ssmparams_crud(self):
# CRUD (create read update delete) test for setting environment variables in SSM param store
testvar_name = random_alphanumeric_string()
testvar_value = "Hello world!"
# Assemble environment to return
old_env = {"DUMMY_VARIABLE": "dummy_value"}
new_env = dict(**old_env)
new_env[testvar_name] = testvar_value
ssm_new_env = self._wrap_ssm_env(new_env)
with self.subTest("Print the SSM environment"):
with mock.patch("dss.operations.lambda_params.ssm_client") as ssm:
# listing params will call ssm.get_parameter to get the entire environment
ssm.get_parameter = mock.MagicMock(return_value=ssm_new_env)
# Now call our params.py module. Output var=value on each line.
with CaptureStdout() as output:
lambda_params.ssm_environment([], argparse.Namespace(json=False))
self.assertIn(f"{testvar_name}={testvar_value}", output)
def test_lambdaparams_crud(self):
# CRUD (create read update delete) test for setting lambda function environment variables
testvar_name = random_alphanumeric_string()
testvar_value = "Hello world!"
testvar_value2 = "Goodbye world!"
# Assemble an old and new environment to return
old_env = {"DUMMY_VARIABLE": "dummy_value"}
new_env = dict(**old_env)
new_env[testvar_name] = testvar_value
ssm_old_env = self._wrap_ssm_env(old_env)
ssm_new_env = self._wrap_ssm_env(new_env)
lam_old_env = self._wrap_lambda_env(old_env)
lam_new_env = self._wrap_lambda_env(new_env)
with self.subTest("Create a new lambda parameter"):
with mock.patch("dss.operations.lambda_params.ssm_client") as ssm, \
mock.patch("dss.operations.lambda_params.lambda_client") as lam:
# If this is not a dry run, lambda_set in params.py
# will update the SSM first, so we mock those first.
# Before we have set the new test variable for the
# first time, we will see the old environment.
ssm.put_parameter = mock.MagicMock(return_value=None)
ssm.get_parameter = mock.MagicMock(return_value=ssm_old_env)
# The lambda_set func in params.py will update lambdas,
# so we mock the calls that those will make too.
lam.get_function = mock.MagicMock(return_value=None)
lam.get_function_configuration = mock.MagicMock(return_value=lam_old_env)
lam.update_function_configuration = mock.MagicMock(return_value=None)
with SwapStdin(testvar_value):
lambda_params.lambda_set(
[], argparse.Namespace(name=testvar_name, dry_run=True, quiet=True)
)
with SwapStdin(testvar_value):
lambda_params.lambda_set(
[], argparse.Namespace(name=testvar_name, dry_run=False, quiet=True)
)
with self.subTest("List lambda parameters"):
with mock.patch("dss.operations.lambda_params.lambda_client") as lam:
# The lambda_list func in params.py calls get_deployed_lambas, which calls lam.get_function()
# using daemon folder names (this function is called only to ensure no exception is thrown)
lam.get_function = mock.MagicMock(return_value=None)
# Next we call get_deployed_lambda_environment(), which calls lam.get_function_configuration
# (this returns the mocked new env vars json)
lam.get_function_configuration = mock.MagicMock(return_value=lam_new_env)
# Used to specify a lambda by name
stage = os.environ["DSS_DEPLOYMENT_STAGE"]
# Non-JSON fmt
with CaptureStdout() as output:
lambda_params.lambda_list([], argparse.Namespace(json=False))
# Check that all deployed lambdas are present
for lambda_name in lambda_params.get_deployed_lambdas(quiet=True):
self.assertIn(f"{lambda_name}", output)
# JSON fmt
with CaptureStdout() as output:
lambda_params.lambda_list([], argparse.Namespace(json=True))
# Check that all deployed lambdas are present
all_lams_output = json.loads("\n".join(output))
for lambda_name in lambda_params.get_deployed_lambdas(quiet=True):
self.assertIn(lambda_name, all_lams_output)
with self.subTest("Get environments of each lambda function"):
with mock.patch("dss.operations.lambda_params.ssm_client") as ssm, \
mock.patch("dss.operations.lambda_params.lambda_client") as lam:
# lambda_environment() function in dss/operations/lambda_params.py calls get_deployed_lambdas()
# (which only does local operations)
# then it calls get_deployed_lambda_environment() on every lambda,
# which calls lambda_client.get_function() (only called to ensure no exception is thrown)
lam.get_function = mock.MagicMock(return_value=None)
# then calls lambda_client.get_function_configuration()
lam.get_function_configuration = mock.MagicMock(return_value=lam_new_env)
# TODO: reduce copypasta
# Non-JSON, no lambda name specified
with CaptureStdout() as output:
lambda_params.lambda_environment([], argparse.Namespace(lambda_name=None, json=False))
# Check that all deployed lambdas are present
output = "\n".join(output)
for lambda_name in lambda_params.get_deployed_lambdas(quiet=True):
self.assertIn(lambda_name, output)
# Non-JSON, lambda name specified
with CaptureStdout() as output:
lambda_params.lambda_environment([], argparse.Namespace(lambda_name=f"dss-{stage}", json=False))
output = "\n".join(output)
self.assertIn(f"dss-{stage}", output)
# JSON, no lambda name specified
with CaptureStdout() as output:
lambda_params.lambda_environment([], argparse.Namespace(lambda_name=None, json=True))
# Check that all deployed lambdas are present
all_lams_output = json.loads("\n".join(output))
for lambda_name in lambda_params.get_deployed_lambdas(quiet=True):
self.assertIn(lambda_name, all_lams_output)
# JSON, lambda name specified
with CaptureStdout() as output:
lambda_params.lambda_environment([], argparse.Namespace(lambda_name=f"dss-{stage}", json=True))
all_lams_output = json.loads("\n".join(output))
self.assertIn(f"dss-{stage}", all_lams_output)
with self.subTest("Update (set) existing lambda parameters"):
with mock.patch("dss.operations.lambda_params.ssm_client") as ssm, \
mock.patch("dss.operations.lambda_params.lambda_client") as lam:
# Mock the same way we did for create new param above.
# First we mock the SSM param store
ssm.get_parameter = mock.MagicMock(return_value=ssm_new_env)
ssm.put_parameter = mock.MagicMock(return_value=None)
# Next we mock the lambda client
lam.get_function = mock.MagicMock(return_value=None)
lam.get_function_configuration = mock.MagicMock(return_value=lam_new_env)
lam.update_function_configuration = mock.MagicMock(return_value=None)
# Dry run | |
<gh_stars>0
import networkx as nx
import tapnx as tapnx
import tapnx.utils_graph as utils_graph
#from networkx.algorithms.shortest_paths.weighted import _weight_function
#from networkx.algorithms.shortest_paths.weighted import _dijkstra_multisource
from networkx.algorithms.shortest_paths.generic import _build_paths_from_predecessors
import matplotlib.pyplot as plt
from collections import defaultdict
import numpy as np
import time as time
def shortest_paths_pred(G,pred):
paths = dict()
dists = dict()
# loop through all nodes in topological order.
# store shortest path to current node as sp[node] = sp[pred] + dist(pred, node)
for v in pred.keys():
# access the predecessor
if pred[v]:
u = pred[v][0]
dists[v] = dists[u] + G[u][v]['weight']
paths[v] = paths[u] + [v]
else:
dists[v] = 0
paths[v] = [v]
return dists, paths
def get_edges_from_pred(pred):
edges = []
# loop through all nodes in topological order.
# store shortest path to current node as sp[node] = sp[pred] + dist(pred, node)
for v in pred.keys():
# access the predecessor
if pred[v]:
for u in pred[v]:
edges.append((u,v))
return edges
def shortest_path_dag(G, s):
adj_list = nx.to_dict_of_lists(G)
stack = list(nx.topological_sort(G))
dist = defaultdict(lambda: float("Inf"))
paths = dict()
dist[s] = 0
paths[s] = [s]
#print(stack)
#print(dist)
# Process vertices in topological order
for u in stack:
# Update distances of all adjacent vertices
for v in adj_list[u]:
if dist[v] > dist[u] + G[u][v]['weight']:
dist[v] = dist[u] + G[u][v]['weight']
paths[v] = paths[u] + [v]
return dist, paths
def longest_path_dag(G, s):
adj_list = nx.to_dict_of_lists(G)
stack = list(nx.topological_sort(G))
dist = defaultdict(lambda: -float("Inf"))
paths = dict()
dist[s] = 0
paths[s] = [s]
#print(stack)
#print(dist)
# Process vertices in topological order
for u in stack:
# Update distances of all adjacent vertices
for v in adj_list[u]:
if dist[v] < dist[u] + G[u][v]['weight']:
dist[v] = dist[u] + G[u][v]['weight']
paths[v] = paths[u] + [v]
return dist, paths
def divergence_node(shortest_path, longest_path):
divergence_node = None
# reverse the shortest list
nodes = shortest_path[::-1]
# scan the nodes in reverse order, exclude last node
for u in shortest_path[::-1][1:]:
if u in longest_path:
divergence_node = u
break
return divergence_node
def algorithm_b(G, trips):
no_edges = G.graph['no_edges']
# compute shortest path trees for all origins
# return a dict {origin:{B:networkx graph, x:numpy array (length is no edges in G not B!)}}
bushes = initialise_bushes(G,trips,no_edges)
#bushes = drop_edges(bushes)
for origin, bush_dict in bushes.items():
fig, ax = tapnx.plot_graph(bush_dict['bush'], edge_labels=True)
plt.show()
x = np.zeros(no_edges)
# edge attributes used in the calculation of edge travel times
a = utils_graph.get_np_array_from_edge_attribute(G, 'a')
b = utils_graph.get_np_array_from_edge_attribute(G, 'b')
c = utils_graph.get_np_array_from_edge_attribute(G, 'c')
n = utils_graph.get_np_array_from_edge_attribute(G, 'n')
# update the travel times and derivatives
t, t_prime = update_edge_labels(x,a,b,c,n)
# update graph weights
utils_graph.update_edge_attribute(G, 'weight', t)
# update bush weights, required for shortest paths and longest paths
bushes = update_bush_weights(bushes, t)
# this computes U, L, shortest_paths, longest_paths
bushes = update_labels(bushes)
# all or nothing assignment
bushes = assign_initial_flows(bushes, trips)
# compute the total flow on edges from all bushes
x = compute_total_flow(bushes, no_edges)
# update the travel times and derivatives
t, t_prime = update_edge_labels(x,a,b,c,n)
# update bush weights, required for shortest paths and longest paths
bushes = update_bush_weights(bushes, t)
# this computes U, L, shortest_paths, longest_paths
bushes = update_labels(bushes)
# drop links (if shortest path only computed for destinations, not necessary)
bushes = drop_edges(bushes)
# this is needed as original bush connects all destinations, however after assignment, some edges
# have zero flow and are thus removed, this may disconnected nodes who have a demand of 0
#bushes = clean_up_disconnected_nodes(bushes)
# add shortcuts
bushes = add_shortcut_edges(bushes, G, t)
# update bush weights, required for shortest paths and longest paths
bushes = update_bush_weights(bushes, t)
i = 1
while True:
# this computes U, L, shortest_paths, longest_paths
#bushes = update_labels(bushes)
# using the shortest_paths and the current flow, compute AEC measure
# requires the current flow and current shortest paths, i.e. calculate AEC after computing the new
# shortest paths
#AEC = aec()
# convergence test
#if AEC < tol:
if i > 5:
break
for origin, bush_dict in bushes.items():
print('equilibriating bush for origin {}'.format(origin))
while True:
# update all_bushes with new weights
update_bush_weights(bushes, t)
# update bush labels
bush_dict = update_bush_labels(bush_dict)
# equilibriate bush
bush_dict = equilibriate_bush(bush_dict, t_prime, bushes, no_edges, a, b, c, n)
# compute total flow
x = compute_total_flow(bushes, no_edges)
# compute t and t_prime
t, t_prime = update_edge_labels(x,a,b,c,n)
# update all_bushes with new weights
update_bush_weights(bushes, t)
#update bush labels
bush_dict = update_bush_labels(bush_dict)
# drop edges for bush
bush_dict, dropped = drop_bush_edges(bush_dict)
# add shortcuts for bush
bush_dict, no_edges_added = add_shortcut_bush_edges(bush_dict, G, t, dropped)
if no_edges_added == 0:
break
# equilibriate the bushes by shifting flow from longest path to shortest path
#bushes = equilibriate_bushes(bushes, t_prime)
# compute the total flow on edges from all bushes
#x = compute_total_flow(bushes, no_edges)
# update the travel times and derivatives
#t, t_prime = update_edge_labels(x,a,b,c,n)
# update bush weights, required for shortest paths and longest paths
#bushes = update_bush_weights(bushes, t)
# this computes U, L, shortest_paths, longest_paths
#bushes = update_labels(bushes)
# drop links (if shortest path only computed for destinations, not necessary)
#bushes = drop_edges(bushes)
#bushes = clean_up_disconnected_nodes(bushes)
# add shortcuts
#bushes = add_shortcut_edges(bushes, G, t)
# update bush weights, required for shortest paths and longest paths
# MAY BE A MORE EFFICIENT WAY TO DO THIS
#bushes = update_bush_weights(bushes, t)
# for origin, bush_dict in bushes.items():
# fig, ax = tapnx.plot_graph(bush_dict['bush'], edge_labels=True)
# plt.show()
print(i)
print(x)
i += 1
# intialise a set of bushes for each origin
# note that this computes a bush to every destination, update to only include destinations. subset of pred??
# returns a dict {origin: networkx Graph}
def initialise_bushes(G, trips, no_edges):
bushes = dict()
for origin, destinations in trips.items():
pred, distance = nx.dijkstra_predecessor_and_distance(G, origin, cutoff=None, weight="weight")
pred = {key:[value[0]] for key, value in pred.items() if value}
edges = get_edges_from_pred(pred)
B = G.edge_subgraph(edges).copy()
bushes[origin] = {'bush':B, 'x':np.zeros(no_edges), 'L':{}, 'U':{}, 'L_paths':{}, 'U_paths':{}, 'destinations':[], 'origin':origin}
return bushes
def assign_initial_flows(bushes, trips):
for origin, destinations in trips.items():
bush_dict = bushes[origin]
for destination in destinations:
demand = trips[origin][destination]
if not ((demand == 0) or (demand == np.nan)):
B = bush_dict['bush']
demand = trips[origin][destination]
shortest_path = bush_dict['L_paths'][int(destination)]
path_edges = [B[u][v]['id'] for u,v in utils_graph.edges_from_path(shortest_path)]
bush_dict['x'][path_edges] += demand
bush_dict['destinations'].append(destination)
return bushes
def compute_total_flow(bushes, no_edges):
x = np.zeros(no_edges)
for origin, B in bushes.items():
x += B['x']
return x
def update_edge_labels(x,a,b,c,n):
t = _edge_func_np(x,a,b,c,n)
t_prime = _edge_func_derivative_np(x,a,b,c,n)
return t, t_prime
def update_bush_weights(bushes,t):
for origin, bush_dict in bushes.items():
B = bush_dict['bush']
edge_ids = [B[u][v]['id'] for u,v in sorted(B.edges())]
B = utils_graph.update_edge_attribute(B, 'weight', t[edge_ids])
return bushes
# update the labels for each of the bushes
def update_labels(bushes):
for origin, bush_dict in bushes.items():
B = bush_dict['bush']
bush_dict['L'], bush_dict['L_paths'] = shortest_path_dag(B,origin)
bush_dict['U'], bush_dict['U_paths'] = longest_path_dag(B,origin)
return bushes
def update_bush_labels(bush_dict):
B = bush_dict['bush']
origin = bush_dict['origin']
bush_dict['L'], bush_dict['L_paths'] = shortest_path_dag(B,origin)
bush_dict['U'], bush_dict['U_paths'] = longest_path_dag(B,origin)
return bush_dict
# drop any edge in the bush with zero flow
def drop_bush_edges(bush_dict):
dropped = []
candidate_edges_for_removal = []
#fig, ax = tapnx.plot_graph(bush_dict['bush'], edge_labels=True)
for u,v in bush_dict['bush'].edges():
edge_id = G[u][v]['id']
x = bush_dict['x']
if x[edge_id] == 0:
#drop edge if it does not cause an isolate
if bush_dict['bush'].in_degree(v) > 1:
candidate_edges_for_removal.append((u,v))
for u,v in candidate_edges_for_removal:
if bush_dict['bush'].in_degree(v) > 1:
#print('removing edge ({},{})'.format(u,v))
bush_dict['bush'].remove_edge(u,v)
dropped.append((u,v))
#else:
# print('removing edge would result in a isolate({},{})'.format(u,v))
#fig, ax = tapnx.plot_graph(bush_dict['bush'], edge_labels=True)
#plt.show()
return bush_dict, dropped
# drop any edge in the bush with zero flow
def drop_edges(bushes):
for origin, bush_dict in bushes.items():
candidate_edges_for_removal = []
#fig, ax = tapnx.plot_graph(bush_dict['bush'], edge_labels=True)
for u,v in bush_dict['bush'].edges():
edge_id = G[u][v]['id']
x = bush_dict['x']
if x[edge_id] == 0:
#drop edge if it does not cause an isolate
if bush_dict['bush'].in_degree(v) > 1:
candidate_edges_for_removal.append((u,v))
for u,v in candidate_edges_for_removal:
if bush_dict['bush'].in_degree(v) > 1:
#print('removing edge ({},{})'.format(u,v))
bush_dict['bush'].remove_edge(u,v)
#else:
# print('removing edge would result in a isolate({},{})'.format(u,v))
#fig, ax = tapnx.plot_graph(bush_dict['bush'], edge_labels=True)
#plt.show()
return bushes
def clean_up_disconnected_nodes(bushes):
for origin, bush_dict in bushes.items():
B = bush_dict['bush']
isolates = list(nx.isolates(B))
B.remove_nodes_from(isolates)
return bushes
def add_shortcut_bush_edges(bush_dict, G, t, dropped):
G_edges = set(G.edges())
B = bush_dict['bush']
origin = bush_dict['origin']
| |
def knight_walk_distance(src, dest, board_size = 8):
"""
The first challenge was to:
Compute the number of chess' knight's jumps from src to dest.
The squares of a chess board are enumerated sequentially from
left to right and top to bottom, starting from 0.
src = value for the position of the initial position
dest = value for the position of the destination
What it does is Breadth-first-search
(https://en.wikipedia.org/wiki/Breadth-first_search)
The challenge only required 8x8 boards, if I remember correctly.
"""
from collections import deque
def inside(row, col):
return (0 <= row and row <= board_size-1 and 0 <= col and col <= board_size-1)
# To store if the position has been visited.
board = [[False]*board_size for i in range(board_size)]
# Deltas that move you like a knight
delta_row = [-2, -1, 1, 2, -2, -1, 1, 2]
delta_col = [-1, -2, -2, -1, 1, 2, 2, 1]
# row and column of the initial position
row, col = src//board_size, src%board_size
board[row][col] = True
# row and column of the destination
dest_row, dest_col = dest//board_size, dest%board_size
# End-points of the trajectories that still need to be explored
q = deque([[row,col,0]])
while (len(q) != 0):
t = q.popleft()
# Go o the destination
if t[0] == dest_row and t[1] == dest_col:
return t[2]
# Try each knight's jump from where we are now
for i in range(8):
newpos_row = t[0] + drow[i]
newpos_col = t[1] + dcol[i]
inSideIs = inside(newpos_row, newpos_col)
if inSideIs:
# the new position is inside the board.
# Let's see if we have been here before.
boardVisited = (board[newpos_row][newpos_col] == True)
if inSideIs and not boardVisited:
# The new position is inside the board and was not
# visited before. Add it to the queue of routs to try.
board[newpos_row][newpos_col] = True
q.append([newpos_row, newpos_col, t[2] + 1])
return 0 # What is this? Why are you here?
# And because one must have fun with an easter egg ...
def knight_walk_distance_catched(src,dest):
"""
Returns the number of chess knight's jumps from src to dest.
The squares of a chess board are enumerated sequentially from
left to right and top to bottom.
src = value for the position of the initial position
dest = value for the position of the destination
This one just has the solutions computed by the previous function.
SOLUTIONS = [[0] * 64 for i in range(64)]
for i in range(64):
for j in range(64):
SOLUTIONS[i][j] = knight_walk_distance(i,j)
with open('SOLUTIONS.txt','w') as outfile:
outfile.write(str(SOLUTIONS))
"""
# For an 8 x 8 board we can just compute all solutions
# using knight_walk_distance(src, dest, board_size = 8)
# and just return the saved values each time.
SOLUTIONS = [[0, 3, 2, 3, 2, 3, 4, 5, 3, 4, 1, 2, 3, 4, 3, 4,
2, 1, 4, 3, 2, 3, 4, 5, 3, 2, 3, 2, 3, 4, 3, 4,
2, 3, 2, 3, 4, 3, 4, 5, 3, 4, 3, 4, 3, 4, 5, 4,
4, 3, 4, 3, 4, 5, 4, 5, 5, 4, 5, 4, 5, 4, 5, 6],
[3, 0, 3, 2, 3, 2, 3, 4, 2, 3, 2, 1, 2, 3, 4, 3,
1, 2, 1, 4, 3, 2, 3, 4, 2, 3, 2, 3, 2, 3, 4, 3,
3, 2, 3, 2, 3, 4, 3, 4, 4, 3, 4, 3, 4, 3, 4, 5,
3, 4, 3, 4, 3, 4, 5, 4, 4, 5, 4, 5, 4, 5, 4, 5],
[2, 3, 0, 3, 2, 3, 2, 3, 1, 2, 3, 2, 1, 2, 3, 4,
4, 1, 2, 1, 4, 3, 2, 3, 3, 2, 3, 2, 3, 2, 3, 4,
2, 3, 2, 3, 2, 3, 4, 3, 3, 4, 3, 4, 3, 4, 3, 4,
4, 3, 4, 3, 4, 3, 4, 5, 5, 4, 5, 4, 5, 4, 5, 4],
[3, 2, 3, 0, 3, 2, 3, 2, 2, 1, 2, 3, 2, 1, 2, 3,
3, 4, 1, 2, 1, 4, 3, 2, 2, 3, 2, 3, 2, 3, 2, 3,
3, 2, 3, 2, 3, 2, 3, 4, 4, 3, 4, 3, 4, 3, 4, 3,
3, 4, 3, 4, 3, 4, 3, 4, 4, 5, 4, 5, 4, 5, 4, 5],
[2, 3, 2, 3, 0, 3, 2, 3, 3, 2, 1, 2, 3, 2, 1, 2,
2, 3, 4, 1, 2, 1, 4, 3, 3, 2, 3, 2, 3, 2, 3, 2,
4, 3, 2, 3, 2, 3, 2, 3, 3, 4, 3, 4, 3, 4, 3, 4,
4, 3, 4, 3, 4, 3, 4, 3, 5, 4, 5, 4, 5, 4, 5, 4],
[3, 2, 3, 2, 3, 0, 3, 2, 4, 3, 2, 1, 2, 3, 2, 1,
3, 2, 3, 4, 1, 2, 1, 4, 4, 3, 2, 3, 2, 3, 2, 3,
3, 4, 3, 2, 3, 2, 3, 2, 4, 3, 4, 3, 4, 3, 4, 3,
5, 4, 3, 4, 3, 4, 3, 4, 4, 5, 4, 5, 4, 5, 4, 5],
[4, 3, 2, 3, 2, 3, 0, 3, 3, 4, 3, 2, 1, 2, 3, 2,
4, 3, 2, 3, 4, 1, 2, 1, 3, 4, 3, 2, 3, 2, 3, 2,
4, 3, 4, 3, 2, 3, 2, 3, 5, 4, 3, 4, 3, 4, 3, 4,
4, 5, 4, 3, 4, 3, 4, 3, 5, 4, 5, 4, 5, 4, 5, 4],
[5, 4, 3, 2, 3, 2, 3, 0, 4, 3, 4, 3, 2, 1, 4, 3,
5, 4, 3, 2, 3, 4, 1, 2, 4, 3, 4, 3, 2, 3, 2, 3,
5, 4, 3, 4, 3, 2, 3, 2, 4, 5, 4, 3, 4, 3, 4, 3,
5, 4, 5, 4, 3, 4, 3, 4, 6, 5, 4, 5, 4, 5, 4, 5],
[3, 2, 1, 2, 3, 4, 3, 4, 0, 3, 2, 3, 2, 3, 4, 5,
3, 2, 1, 2, 3, 4, 3, 4, 2, 1, 4, 3, 2, 3, 4, 5,
3, 2, 3, 2, 3, 4, 3, 4, 2, 3, 2, 3, 4, 3, 4, 5,
3, 4, 3, 4, 3, 4, 5, 4, 4, 3, 4, 3, 4, 5, 4, 5],
[4, 3, 2, 1, 2, 3, 4, 3, 3, 0, 3, 2, 3, 2, 3, 4,
2, 3, 2, 1, 2, 3, 4, 3, 1, 2, 1, 4, 3, 2, 3, 4,
2, 3, 2, 3, 2, 3, 4, 3, 3, 2, 3, 2, 3, 4, 3, 4,
4, 3, 4, 3, 4, 3, 4, 5, 3, 4, 3, 4, 3, 4, 5, 4],
[1, 2, 3, 2, 1, 2, 3, 4, 2, 3, 0, 3, 2, 3, 2, 3,
1, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 1, 4, 3, 2, 3,
3, 2, 3, 2, 3, 2, 3, 4, 2, 3, 2, 3, 2, 3, 4, 3,
3, 4, 3, 4, 3, 4, 3, 4, 4, 3, 4, 3, 4, 3, 4, 5],
[2, 1, 2, 3, 2, 1, 2, 3, 3, 2, 3, 0, 3, 2, 3, 2,
2, 1, 2, 3, 2, 1, 2, 3, 3, 4, 1, 2, 1, 4, 3, 2,
2, 3, 2, 3, 2, 3, 2, 3, 3, 2, 3, 2, 3, 2, 3, 4,
4, 3, 4, 3, 4, 3, 4, 3, 3, 4, 3, 4, 3, 4, 3, 4],
[3, 2, 1, 2, 3, 2, 1, 2, 2, 3, 2, 3, 0, 3, 2, 3,
3, 2, 1, 2, 3, 2, 1, | |
import numpy as np
import numpy.ma as ma
from numpy import linalg as LA
import tsstats
#from numba import jit
from wormtracker import Logger
class WormTrajectoryPostProcessor:
## static properties!
# bad frame settings
filterByWidth = True
filterByLength = True
widthThreshold = (0.7, 1.3)
lengthThreshold = (0.9, 1.1)
filterBySpeed = True
maxSpeed = 1000
# segment settings
max_n_missing = 3
max_d_um = 10.
max_segment_frames = 10000
min_segment_size = 150
# head assignment settings (centoid only method)
headMinSpeed = 40. # Min <s> for head assignment by leading end
headMinLeading = 1.3 # Min relative time leading for head
headMinRelSpeed = 1.1 # Min relative end speed for head
headMinRelBrightness = 0.2 # Min relative brightness for head
# head assignment settings (posture method)
headVarianceMinRatio = 1.2 # Min ratio of head to tail posture variation
# head assignment settings (posture dynamics method)
headDeltaCorrelation = 0.05
bodyWaveDelay = 0.08
# smoothing
useSmoothingFilterDerivatives = True
filterWindow = 1. # smoothing filter window size
def __init__(self, h5obj, strain, name):
self.h5obj = h5obj
self.strain = strain
self.name = name
self.h5ref = h5obj['worms'][strain][name]
self.lengths = self.h5ref['length'][...]
self.widths = self.h5ref['width'][...]
self.frameRate = h5obj['/video/frameRate'][0]
self.pixelsPerMicron = h5obj['/video/pixelsPerMicron'][0]
self.maxFrameNumber = self.h5ref['time'].shape[0]
self.nAngles = self.h5ref['posture'].shape[1]
self.badFrames = np.zeros((self.maxFrameNumber,), dtype='bool')
self.haveSkeleton = np.zeros((self.maxFrameNumber,), dtype='bool')
self.skeleton = None
self.posture = None
self.length = None
self.width = None
self.t = self.h5ref['time'][...]
self.X = ma.array(self.h5ref['centroid'][...] / self.pixelsPerMicron)
self.Xhead = ma.zeros(self.X.shape)
self.Xtail = ma.zeros(self.X.shape)
self.v = ma.zeros(self.X.shape)
self.s = ma.zeros((self.maxFrameNumber,))
self.phi = ma.zeros((self.maxFrameNumber,))
self.psi = ma.zeros((self.maxFrameNumber,))
self.dpsi = ma.zeros((self.maxFrameNumber,))
self.Ctheta = None
self.ltheta = None
self.vtheta = None
self.usePosturalHeadAssignment = True
def postProcess(self):
Logger.logPrint('Identifying bad frames...')
self.identifyBadFrames()
Logger.logPrint('Extracting postural data...')
self.extractPosturalData()
Logger.logPrint('Fixing order of postural data...')
self.fixPosturalOrdering()
Logger.logPrint('Segmenting trajectory...')
self.segment()
Logger.logPrint('Assigning head...')
if self.usePosturalHeadAssignment:
self.assignHeadTail()
else:
self.assignHeadTailCentroidOnly()
Logger.logPrint('Ordering postural data head to tail...')
self.orderHeadTail()
Logger.logPrint('Calculating centroid motion variables...')
self.calculateCentroidMeasurements()
Logger.logPrint('Calculating postural measurements...')
self.calculatePosturalMeasurements()
def identifyBadFrames(self):
badFrames = np.logical_or(self.lengths == 0,
self.widths == 0)
self.length = np.median(self.lengths[np.logical_not(badFrames)])
self.width = np.median(self.widths[np.logical_not(badFrames)])
if self.filterByWidth:
badFrames = np.logical_or(badFrames,
np.logical_or(self.widths < self.widthThreshold[0]*self.width,
self.widths > self.widthThreshold[1]*self.width))
if self.filterByLength:
badFrames = np.logical_or(badFrames,
np.logical_or(self.lengths <
self.lengthThreshold[0]*self.length,
self.lengths >
self.lengthThreshold[1]*self.length))
if self.filterBySpeed:
v = ma.zeros(self.X.shape)
v[1:-1] = (self.X[2:, :] - self.X[0:-2])/(2.0/self.frameRate)
instSpeed = np.sqrt(np.sum(np.power(v, 2), axis=1))
badFrames = np.logical_or(badFrames,
instSpeed > self.maxSpeed)
self.badFrames = badFrames
def extractPosturalData(self):
# import skeleton splines
self.skeleton = self.h5ref['skeletonSpline'][...]
self.posture = self.h5ref['posture'][...]
self.haveSkeleton = np.array([np.any(skeleton > 0) and ~badFrame
for skeleton, badFrame in zip(self.skeleton, self.badFrames)])
# @jit
@staticmethod
def skeletonDist(skeleton1, skeleton2):
distEachPoint = np.sqrt(np.sum(np.power(skeleton1 -
skeleton2, 2),
axis=1))
# return average distance per spline point
return np.sum(distEachPoint)/skeleton1.shape[0]
def fixPosturalOrdering(self):
# compare possible skeleton orientations
interframe_d = np.empty((self.maxFrameNumber, 2)) * np.NaN
flipped = np.zeros((self.maxFrameNumber,), dtype=bool)
nFromLastGood = np.empty((self.maxFrameNumber,)) * np.NaN
for i in xrange(1, self.maxFrameNumber):
# check whether there is a previous skeleton to compare
if not self.haveSkeleton[i] or not np.any(self.haveSkeleton[:i]):
continue
ip = np.where(self.haveSkeleton[:i])[0][-1] # last skeleton
nFromLastGood[i] = i - ip
interframe_d[i, 0] = self.skeletonDist(
np.squeeze(self.skeleton[i, :, :]),
np.squeeze(self.skeleton[ip, :, :]))
# flipped orientation
interframe_d[i, 1] = self.skeletonDist(
np.flipud(np.squeeze(self.skeleton[i, :, :])),
np.squeeze(self.skeleton[ip]))
if interframe_d[i, 1] < interframe_d[i, 0]:
# if the flipped orientation is better, flip the data
flipped[i] = not flipped[ip]
else:
flipped[i] = flipped[ip]
self.interframe_d = interframe_d
# flip data appropriately
sel = flipped
self.skeleton[sel, :, :] = self.skeleton[sel, ::-1, :]
self.posture[sel, :] = self.posture[sel, ::-1]
def segment(self):
# break video into segments with matched skeletons
max_d = self.max_d_um/self.pixelsPerMicron
ii = 0
segments = []
while ii < self.maxFrameNumber:
begin = ii
ii += 1
# Continue segment until >max_n_missing consecutive bad frames
# are found, or >max_segment_frames are collected
n_missing = 0
last_missing = False
while (ii < self.maxFrameNumber and
ii - begin < self.max_segment_frames and
(np.isnan(self.interframe_d[ii, 0]) or
np.min(self.interframe_d[ii, :]) < max_d)):
if not self.haveSkeleton[ii]:
n_missing += 1
last_missing = True
if n_missing > self.max_n_missing:
ii += 1
break
else:
n_missing = 0
last_missing = False
ii += 1
segments.append([begin, ii])
self.segments = [segment for segment in segments
if segment[1] - segment[0] >
self.min_segment_size]
def assignHeadTail(self):
flipSegment = np.zeros((len(self.segments),), dtype='bool')
segmentAssignMethod = -np.ones((len(self.segments),), dtype='int8')
npoints = round(self.posture.shape[1]*0.1)
A = ma.array(self.posture)
A[self.badFrames] = ma.masked
A[~self.haveSkeleton] = ma.masked
for i, segment in enumerate(self.segments):
b = segment[0]
e = segment[1]
# posture variance method
v = A.std(axis=0)
npoints=5
vh = v[:npoints].sum()
vt = v[-npoints:].sum()
# calculate dynamics measures
hm = _headMoveMeasure(A[b:e,:])
bw = _bodyWaveMeasure(A[b:e,:])
# head has more variance
# head has oscillatory head movement unlike tail (negative delta correlation measure)
# body wave delay is positive
if vh/vt > self.headVarianceMinRatio:
# not flipped
segmentAssignMethod[i] = 1
elif vt/vh > self.headVarianceMinRatio:
# flipped
flipSegment[i] = True
segmentAssignMethod[i] = 1
elif np.abs(bw) > self.bodyWaveDelay:
segmentAssignMethod[i] = 3
if bw < -self.bodyWaveDelay:
flipSegment[i] = True
elif np.abs(hm) > self.headDeltaCorrelation:
segmentAssignMethod[i] = 2
if hm < -self.headDeltaCorrelation:
flipSegment[i] = True
else:
segmentAssignMethod[i] = 0 # can't assign
self.flipSegment = flipSegment
self.segmentAssignMethod = segmentAssignMethod
def assignHeadTailPostureVariance(self):
flipSegment = np.zeros((len(self.segments),), dtype='bool')
segmentAssignMethod = -np.ones((len(self.segments),), dtype='int8')
npoints = round(self.posture.shape[1]*0.1)
for i, segment in enumerate(self.segments):
b = segment[0]
e = segment[1]
# calculate std at each posture position over segment
v = self.posture[b:e, :].std(axis=0)
# calculate total std for 10% from each end
vh = v[:npoints].sum()
vt = v[-npoints:].sum()
# head has higher variance
if vh/vt > self.headVarianceMinRatio:
# not flipped
segmentAssignMethod[i] = 3
elif vt/vh > self.headVarianceMinRatio:
# flipped
flipSegment[i] = True
segmentAssignMethod[i] = 3
else:
segmentAssignMethod[i] = 0 # can't assign
self.flipSegment = flipSegment
self.segmentAssignMethod = segmentAssignMethod
def assignHeadTailCentroidOnly(self):
flipSegment = np.zeros((len(self.segments),), dtype='bool')
segmentAssignMethod = -np.ones((len(self.segments),), dtype='int8')
X = ma.array(self.h5ref['centroid'][...] / self.pixelsPerMicron)
X[self.badFrames, :] = ma.masked
dt = 1/self.frameRate
(v, s, phi) = _getMotionVariables(X, dt)
Xhead = np.squeeze(self.skeleton[:, 0, :] - self.h5ref['midpoint'][...])
Xhead[self.badFrames, :] = ma.masked
Xtail = np.squeeze(self.skeleton[:, -1, :] - self.h5ref['midpoint'][...])
Xtail[self.badFrames, :] = ma.masked
for i, segment in enumerate(self.segments):
# method 1: head leads during movement
# first check worm is actually moving significantly
b = segment[0]
e = segment[1]
if (np.median(s[b:e].compressed()) >
self.headMinSpeed):
# then calculate relative time moving in each direction
phi_ends = np.zeros((e-b, 2))
phi_ends[:, 0] = np.arctan2((Xhead[b:e, 1] - X[b:e, 1]),
(Xhead[b:e, 0] - X[b:e, 0]))
phi_ends[:, 1] = np.arctan2((Xtail[b:e, 1] - X[b:e, 1]),
(Xtail[b:e, 0] - X[b:e, 0]))
rphi_ends = np.cos(phi_ends.T - phi[b:e]).T
first_leading = np.sum((rphi_ends[:, 0] >
rphi_ends[:, 1]).compressed())
last_leading = np.sum((rphi_ends[:, 1] >
rphi_ends[:, 0]).compressed())
if (max(first_leading, last_leading) /
min(first_leading, last_leading) >
self.headMinLeading):
segmentAssignMethod[i] = 1
if last_leading > first_leading:
flipSegment[i] = True
continue
# method 2: head moves more than tail
(vh, sh, phih) = _getMotionVariables(Xhead[b:e, :], dt)
(vt, st, phit) = _getMotionVariables(Xtail[b:e, :], dt)
mu_sh = np.mean(sh.compressed())
mu_st = np.mean(st.compressed())
if (max(mu_sh, mu_st) / min(mu_sh, mu_st) >
self.headMinRelSpeed):
segmentAssignMethod[i] = 2
if mu_st > mu_sh:
flipSegment[i] = True
continue
# method 3: head is brighter
# this method isn't very reliable and isn't being used
self.flipSegment = flipSegment
self.segmentAssignMethod = segmentAssignMethod
def orderHeadTail(self):
orientationFixed = np.zeros((self.maxFrameNumber,), dtype='bool')
for i, segment in enumerate(self.segments):
if self.segmentAssignMethod[i] > 0:
b = segment[0]
e = segment[1]
orientationFixed[b:e] = True
if self.flipSegment[i]:
self.skeleton[b:e, :, :] = \
np.fliplr(self.skeleton[b:e, :, :])
self.posture[b:e, :] = \
np.fliplr(self.posture[b:e, :])
self.orientationFixed = orientationFixed
def calculateCentroidMeasurements(self):
self.X[self.badFrames, :] = ma.masked
if not self.useSmoothingFilterDerivatives:
self.v[1:-1] = (self.X[2:, :] - self.X[0:-2])/(2.0/self.frameRate)
else:
# use a cubic polynomial filter to estimate the velocity
self.v = ma.zeros(self.X.shape)
halfWindow = int(np.round(self.filterWindow/2.*self.frameRate))
for i in xrange(halfWindow, self.v.shape[0]-halfWindow):
start = i-halfWindow
mid = i
finish = i+halfWindow+1
if not np.any(self.X.mask[start:finish,:]):
px = np.polyder(np.polyfit(self.t[start:finish]-self.t[mid],
self.X[start:finish, 0], 3))
py = np.polyder(np.polyfit(self.t[start:finish]-self.t[mid],
self.X[start:finish, 1], 3))
self.v[i,:] = [np.polyval(px, 0), np.polyval(py, 0)]
else:
self.v[i,:] = ma.masked
self.s = ma.sqrt(ma.sum(ma.power(self.v, 2), axis=1))
self.phi = ma.arctan2(self.v[:, 1], self.v[:, 0])
self.t[self.badFrames] = ma.masked
self.X[self.badFrames, :] = ma.masked
self.v[self.badFrames, :] = ma.masked
self.s[self.badFrames] = ma.masked
self.phi[self.badFrames] = ma.masked
def calculatePosturalMeasurements(self):
self.Xhead = ma.array(ma.squeeze(self.skeleton[:, 0, :]))
self.Xhead = ((self.Xhead + self.h5ref['boundingBox'][:, :2]) /
self.pixelsPerMicron)
self.Xhead[np.logical_not(self.orientationFixed), :] = ma.masked
self.Xtail = ma.array(np.squeeze(self.skeleton[:, -1, :]))
self.Xtail = ((self.Xtail + self.h5ref['boundingBox'][:, :2]) /
self.pixelsPerMicron)
self.Xtail[np.logical_not(self.orientationFixed), :] = ma.masked
self.psi = ma.arctan2(self.Xhead[:, 0]-self.X[:, 0],
self.Xhead[:, | |
the extraction process so we don't end up with a corrupted cache.
tmp_extraction_dir = tempfile.mkdtemp(dir=os.path.split(extraction_path)[0])
try:
if is_zipfile(file_path):
with ZipFile(file_path, "r") as zip_file:
zip_file.extractall(tmp_extraction_dir)
zip_file.close()
else:
tar_file = tarfile.open(file_path)
check_tarfile(tar_file)
tar_file.extractall(tmp_extraction_dir)
tar_file.close()
# Extraction was successful, rename temp directory to final
# cache directory and dump the meta data.
os.replace(tmp_extraction_dir, extraction_path)
meta = _Meta(
resource=url_or_filename,
cached_path=extraction_path,
creation_time=time.time(),
extraction_dir=True,
size=_get_resource_size(extraction_path),
)
meta.to_file()
finally:
shutil.rmtree(tmp_extraction_dir, ignore_errors=True)
return extraction_path
return file_path
def is_url_or_existing_file(url_or_filename: PathOrStr) -> bool:
"""
Given something that might be a URL (or might be a local path),
determine check if it's url or an existing file path.
"""
if url_or_filename is None:
return False
url_or_filename = os.path.expanduser(str(url_or_filename))
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs") or os.path.exists(url_or_filename)
def _split_s3_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "s3")
def _split_gcs_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "gs")
def _split_cloud_path(url: str, provider: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad {} path {}".format(provider, url))
bucket_name = parsed.netloc
provider_path = parsed.path
# Remove '/' at beginning of path.
if provider_path.startswith("/"):
provider_path = provider_path[1:]
return bucket_name, provider_path
def _s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except botocore.exceptions.ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
def _get_s3_resource():
session = boto3.session.Session()
if session.get_credentials() is None:
# Use unsigned requests.
s3_resource = session.resource(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
else:
s3_resource = session.resource("s3")
return s3_resource
@_s3_request
def _s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = _get_s3_resource()
bucket_name, s3_path = _split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@_s3_request
def _s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = _get_s3_resource()
bucket_name, s3_path = _split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def _gcs_request(func: Callable):
"""
Wrapper function for gcs requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except NotFound:
raise FileNotFoundError("file {} not found".format(url))
return wrapper
def _get_gcs_client():
storage_client = storage.Client()
return storage_client
def _get_gcs_blob(url: str) -> storage.blob.Blob:
gcs_resource = _get_gcs_client()
bucket_name, gcs_path = _split_gcs_path(url)
bucket = gcs_resource.bucket(bucket_name)
blob = bucket.blob(gcs_path)
return blob
@_gcs_request
def _gcs_md5(url: str) -> Optional[str]:
"""Get GCS object's md5."""
blob = _get_gcs_blob(url)
return blob.md5_hash
@_gcs_request
def _gcs_get(url: str, temp_filename: str) -> None:
"""Pull a file directly from GCS."""
blob = _get_gcs_blob(url)
blob.download_to_filename(temp_filename)
def _session_with_backoff() -> requests.Session:
"""
We ran into an issue where http requests to s3 were timing out,
possibly because we were making too many requests too quickly.
This helper function returns a requests session that has retry-with-backoff
built in. See
<https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library>.
"""
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
return session
def _http_etag(url: str) -> Optional[str]:
with _session_with_backoff() as session:
response = session.head(url, allow_redirects=True)
if response.status_code != 200:
raise OSError(
"HEAD request failed for url {} with status code {}".format(url, response.status_code)
)
return response.headers.get("ETag")
def _http_get(url: str, temp_file: IO) -> None:
with _session_with_backoff() as session:
req = session.get(url, stream=True)
req.raise_for_status()
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", unit_scale=True, total=total, desc="downloading")
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def _find_latest_cached(url: str, cache_dir: PathOrStr) -> Optional[str]:
filename = _resource_to_filename(url)
cache_path = os.path.join(cache_dir, filename)
candidates: List[Tuple[str, float]] = []
for path in glob.glob(cache_path + "*"):
if path.endswith(".json") or path.endswith("-extracted") or path.endswith(".lock"):
continue
mtime = os.path.getmtime(path)
candidates.append((path, mtime))
# Sort candidates by modification time, newest first.
candidates.sort(key=lambda x: x[1], reverse=True)
if candidates:
return candidates[0][0]
return None
class CacheFile:
"""
This is a context manager that makes robust caching easier.
On `__enter__`, an IO handle to a temporarily file is returned, which can
be treated as if it's the actual cache file.
On `__exit__`, the temporarily file is renamed to the cache file. If anything
goes wrong while writing to the temporary file, it will be removed.
"""
def __init__(self, cache_filename: PathOrStr, mode: str = "w+b", suffix: str = ".tmp") -> None:
self.cache_filename = (
cache_filename if isinstance(cache_filename, Path) else Path(cache_filename)
)
self.cache_directory = os.path.dirname(self.cache_filename)
self.mode = mode
self.temp_file = tempfile.NamedTemporaryFile(
self.mode, dir=self.cache_directory, delete=False, suffix=suffix
)
def __enter__(self):
return self.temp_file
def __exit__(self, exc_type, exc_value, traceback):
self.temp_file.close()
if exc_value is None:
# Success.
logger.debug(
"Renaming temp file %s to cache at %s", self.temp_file.name, self.cache_filename
)
# Rename the temp file to the actual cache filename.
os.replace(self.temp_file.name, self.cache_filename)
return True
# Something went wrong, remove the temp file.
logger.debug("removing temp file %s", self.temp_file.name)
os.remove(self.temp_file.name)
return False
@dataclass
class _Meta:
"""
Any resource that is downloaded to - or extracted in - the cache directory will
have a meta JSON file written next to it, which corresponds to an instance
of this class.
In older versions of AllenNLP, this meta document just had two fields: 'url' and
'etag'. The 'url' field is now the more general 'resource' field, but these old
meta files are still compatible when a `_Meta` is instantiated with the `.from_path()`
class method.
"""
resource: str
"""
URL or normalized path to the resource.
"""
cached_path: str
"""
Path to the corresponding cached version of the resource.
"""
creation_time: float
"""
The unix timestamp of when the corresponding resource was cached or extracted.
"""
size: int = 0
"""
The size of the corresponding resource, in bytes.
"""
etag: Optional[str] = None
"""
Optional ETag associated with the current cached version of the resource.
"""
extraction_dir: bool = False
"""
Does this meta corresponded to an extraction directory?
"""
def to_file(self) -> None:
with open(self.cached_path + ".json", "w") as meta_file:
json.dump(asdict(self), meta_file)
@classmethod
def from_path(cls, path: PathOrStr) -> "_Meta":
path = str(path)
with open(path) as meta_file:
data = json.load(meta_file)
# For backwards compat:
if "resource" not in data:
data["resource"] = data.pop("url")
if "creation_time" not in data:
data["creation_time"] = os.path.getmtime(path[:-5])
if "extraction_dir" not in data and path.endswith("-extracted.json"):
data["extraction_dir"] = True
if "cached_path" not in data:
data["cached_path"] = path[:-5]
if "size" not in data:
data["size"] = _get_resource_size(data["cached_path"])
return cls(**data)
def _hf_hub_download(
url, model_identifier: str, filename: Optional[str], cache_dir: PathOrStr = CACHE_DIRECTORY
) -> str:
revision: Optional[str]
if "@" in model_identifier:
repo_id = model_identifier.split("@")[0]
revision = model_identifier.split("@")[1]
else:
repo_id = model_identifier
revision = None
if filename is not None:
hub_url = hf_hub.hf_hub_url(repo_id=repo_id, filename=filename, revision=revision)
cache_path = str(
hf_hub.cached_download(
url=hub_url,
library_name="cached_path",
library_version=VERSION,
cache_dir=cache_dir,
)
)
# HF writes it's own meta '.json' file which uses the same format we used to use and still
# support, but is missing some fields that we like to have.
# So we overwrite it when it we can.
with FileLock(cache_path + ".lock", read_only_ok=True):
meta = _Meta.from_path(cache_path + ".json")
# The file HF writes will have 'resource' set to the 'http' URL corresponding to the 'hf://' URL,
# but we want 'resource' to be the original 'hf://' URL.
if meta.resource != url:
meta.resource = url
meta.to_file()
else:
cache_path = str(hf_hub.snapshot_download(repo_id, revision=revision, cache_dir=cache_dir))
# Need to write the meta file for snapshot downloads if it doesn't exist.
with FileLock(cache_path + ".lock", read_only_ok=True):
if not os.path.exists(cache_path + ".json"):
meta = _Meta(
resource=url,
cached_path=cache_path,
creation_time=time.time(),
extraction_dir=True,
size=_get_resource_size(cache_path),
)
meta.to_file()
return cache_path
# TODO(joelgrus): do we want to do checksums or anything like that?
def get_from_cache(url: str, cache_dir: PathOrStr = CACHE_DIRECTORY) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if url.startswith("hf://"):
# Remove the 'hf://' prefix
identifier = url[5:]
if identifier.count("/") > 1:
filename = "/".join(identifier.split("/")[2:])
model_identifier = "/".join(identifier.split("/")[:2])
return _hf_hub_download(url, model_identifier, filename, cache_dir)
elif identifier.count("/") == 1:
# 'hf://' URLs like 'hf://xxxx/yyyy' are potentially ambiguous,
# because this could refer to either:
# 1. the file 'yyyy' in the 'xxxx' repository, or
# 2. the repo 'yyyy' under the user/org name 'xxxx'.
# We default to (1), but if we | |
str upload_id: 分片上传ID
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
logger.debug("Start to abort multipart upload, bucket: {0}, key: {1}, upload_id: {2}".format(
self.bucket_name, to_string(key), upload_id))
headers = http.CaseInsensitiveDict(headers)
resp = self.__do_object('DELETE', key,
params={'uploadId': upload_id}, headers=headers)
logger.debug("Abort multipart done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def list_multipart_uploads(self,
prefix='',
delimiter='',
key_marker='',
upload_id_marker='',
max_uploads=1000,
headers=None):
"""罗列正在进行中的分片上传。支持分页。
:param str prefix: 只罗列匹配该前缀的文件的分片上传
:param str delimiter: 目录分割符
:param str key_marker: 文件名分页符。第一次调用可以不传,后续设为返回值中的 `next_key_marker`
:param str upload_id_marker: 分片ID分页符。第一次调用可以不传,后续设为返回值中的 `next_upload_id_marker`
:param int max_uploads: 一次罗列最多能够返回的条目数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`ListMultipartUploadsResult <oss2.models.ListMultipartUploadsResult>`
"""
logger.debug("Start to list multipart uploads, bucket: {0}, prefix: {1}, delimiter: {2}, key_marker: {3}, "
"upload_id_marker: {4}, max_uploads: {5}".format(self.bucket_name, to_string(prefix), delimiter,
to_string(key_marker), upload_id_marker,
max_uploads))
headers = http.CaseInsensitiveDict(headers)
resp = self.__do_object('GET', '',
params={'uploads': '',
'prefix': prefix,
'delimiter': delimiter,
'key-marker': key_marker,
'upload-id-marker': upload_id_marker,
'max-uploads': str(max_uploads),
'encoding-type': 'url'},
headers=headers)
logger.debug("List multipart uploads done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_list_multipart_uploads, ListMultipartUploadsResult)
def upload_part_copy(self, source_bucket_name, source_key, byte_range,
target_key, target_upload_id, target_part_number,
headers=None, params=None):
"""分片拷贝。把一个已有文件的一部分或整体拷贝成目标文件的一个分片。
:source_bucket_name: 源文件所在bucket的名称
:source_key:源文件名称
:param byte_range: 指定待拷贝内容在源文件里的范围。参见 :ref:`byte_range`
:target_key: 目的文件的名称
:target_upload_id: 目的文件的uploadid
:target_part_number: 目的文件的分片号
:param params: 请求参数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = http.CaseInsensitiveDict(headers)
if params and Bucket.VERSIONID in params:
headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + \
'/' + urlquote(source_key, '') + '?versionId=' + params[Bucket.VERSIONID]
else:
headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + '/' + urlquote(source_key, '')
range_string = _make_range_string(byte_range)
if range_string:
headers[OSS_COPY_OBJECT_SOURCE_RANGE] = range_string
logger.debug("Start to upload part copy, source bucket: {0}, source key: {1}, bucket: {2}, key: {3}, range"
": {4}, upload id: {5}, part_number: {6}, headers: {7}".format(source_bucket_name,
to_string(source_key),self.bucket_name,to_string(target_key),
byte_range, target_upload_id,target_part_number, headers))
if params is None:
params = dict()
params['uploadId'] = target_upload_id
params['partNumber'] = str(target_part_number)
resp = self.__do_object('PUT', target_key,
params=params,headers=headers)
logger.debug("Upload part copy done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return PutObjectResult(resp)
def list_parts(self, key, upload_id,
marker='', max_parts=1000, headers=None):
"""列举已经上传的分片。支持分页。
:param headers: HTTP头部
:param str key: 文件名
:param str upload_id: 分片上传ID
:param str marker: 分页符
:param int max_parts: 一次最多罗列多少分片
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`ListPartsResult <oss2.models.ListPartsResult>`
"""
logger.debug("Start to list parts, bucket: {0}, key: {1}, upload_id: {2}, marker: {3}, max_parts: {4}".format(
self.bucket_name, to_string(key), upload_id, marker, max_parts))
headers = http.CaseInsensitiveDict(headers)
resp = self.__do_object('GET', key,
params={'uploadId': upload_id,
'part-number-marker': marker,
'max-parts': str(max_parts)},
headers=headers)
logger.debug("List parts done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_list_parts, ListPartsResult)
def put_symlink(self, target_key, symlink_key, headers=None):
"""创建Symlink。
:param str target_key: 目标文件,目标文件不能为符号连接
:param str symlink_key: 符号连接类文件,其实质是一个特殊的文件,数据指向目标文件
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
headers = http.CaseInsensitiveDict(headers)
headers[OSS_SYMLINK_TARGET] = urlquote(target_key, '')
logger.debug("Start to put symlink, bucket: {0}, target_key: {1}, symlink_key: {2}, headers: {3}".format(
self.bucket_name, to_string(target_key), to_string(symlink_key), headers))
resp = self.__do_object('PUT', symlink_key, headers=headers, params={Bucket.SYMLINK: ''})
logger.debug("Put symlink done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_symlink(self, symlink_key, params=None, headers=None):
"""获取符号连接文件的目标文件。
:param str symlink_key: 符号连接类文件
:param dict params: 请求参数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`GetSymlinkResult <oss2.models.GetSymlinkResult>`
:raises: 如果文件的符号链接不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
logger.debug(
"Start to get symlink, bucket: {0}, symlink_key: {1}".format(self.bucket_name, to_string(symlink_key)))
headers = http.CaseInsensitiveDict(headers)
if params is None:
params = dict()
if Bucket.SYMLINK not in params:
params[Bucket.SYMLINK] = ''
resp = self.__do_object('GET', symlink_key, params=params, headers=headers)
logger.debug("Get symlink done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return GetSymlinkResult(resp)
def create_bucket(self, permission=None, input=None):
"""创建新的Bucket。
:param str permission: 指定Bucket的ACL。可以是oss2.BUCKET_ACL_PRIVATE(推荐、缺省)、oss2.BUCKET_ACL_PUBLIC_READ或是
oss2.BUCKET_ACL_PUBLIC_READ_WRITE。
:param input: :class:`BucketCreateConfig <oss2.models.BucketCreateConfig>` object
"""
if permission:
headers = {OSS_CANNED_ACL: permission}
else:
headers = None
data = self.__convert_data(BucketCreateConfig, xml_utils.to_put_bucket_config, input)
logger.debug("Start to create bucket, bucket: {0}, permission: {1}, config: {2}".format(self.bucket_name,
permission, data))
resp = self.__do_bucket('PUT', headers=headers, data=data)
logger.debug("Create bucket done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def delete_bucket(self):
"""删除一个Bucket。只有没有任何文件,也没有任何未完成的分片上传的Bucket才能被删除。
:return: :class:`RequestResult <oss2.models.RequestResult>`
":raises: 如果试图删除一个非空Bucket,则抛出 :class:`BucketNotEmpty <oss2.exceptions.BucketNotEmpty>`
"""
logger.info("Start to delete bucket, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('DELETE')
logger.debug("Delete bucket done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def put_bucket_acl(self, permission):
"""设置Bucket的ACL。
:param str permission: 新的ACL,可以是oss2.BUCKET_ACL_PRIVATE、oss2.BUCKET_ACL_PUBLIC_READ或
oss2.BUCKET_ACL_PUBLIC_READ_WRITE
"""
logger.debug("Start to put bucket acl, bucket: {0}, acl: {1}".format(self.bucket_name, permission))
resp = self.__do_bucket('PUT', headers={OSS_CANNED_ACL: permission}, params={Bucket.ACL: ''})
logger.debug("Put bucket acl done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_bucket_acl(self):
"""获取Bucket的ACL。
:return: :class:`GetBucketAclResult <oss2.models.GetBucketAclResult>`
"""
logger.debug("Start to get bucket acl, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.ACL: ''})
logger.debug("Get bucket acl done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_acl, GetBucketAclResult)
def put_bucket_cors(self, input):
"""设置Bucket的CORS。
:param input: :class:`BucketCors <oss2.models.BucketCors>` 对象或其他
"""
data = self.__convert_data(BucketCors, xml_utils.to_put_bucket_cors, input)
logger.debug("Start to put bucket cors, bucket: {0}, cors: {1}".format(self.bucket_name, data))
resp = self.__do_bucket('PUT', data=data, params={Bucket.CORS: ''})
logger.debug("Put bucket cors done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_bucket_cors(self):
"""获取Bucket的CORS配置。
:return: :class:`GetBucketCorsResult <oss2.models.GetBucketCorsResult>`
"""
logger.debug("Start to get bucket CORS, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.CORS: ''})
logger.debug("Get bucket CORS done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_cors, GetBucketCorsResult)
def delete_bucket_cors(self):
"""删除Bucket的CORS配置。"""
logger.debug("Start to delete bucket CORS, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('DELETE', params={Bucket.CORS: ''})
logger.debug("Delete bucket CORS done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def put_bucket_lifecycle(self, input):
"""设置生命周期管理的配置。
:param input: :class:`BucketLifecycle <oss2.models.BucketLifecycle>` 对象或其他
"""
data = self.__convert_data(BucketLifecycle, xml_utils.to_put_bucket_lifecycle, input)
logger.debug("Start to put bucket lifecycle, bucket: {0}, lifecycle: {1}".format(self.bucket_name, data))
resp = self.__do_bucket('PUT', data=data, params={Bucket.LIFECYCLE: ''})
logger.debug("Put bucket lifecycle done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_bucket_lifecycle(self):
"""获取生命周期管理配置。
:return: :class:`GetBucketLifecycleResult <oss2.models.GetBucketLifecycleResult>`
:raises: 如果没有设置Lifecycle,则抛出 :class:`NoSuchLifecycle <oss2.exceptions.NoSuchLifecycle>`
"""
logger.debug("Start to get bucket lifecycle, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.LIFECYCLE: ''})
logger.debug("Get bucket lifecycle done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_lifecycle, GetBucketLifecycleResult)
def delete_bucket_lifecycle(self):
"""删除生命周期管理配置。如果Lifecycle没有设置,也返回成功。"""
logger.debug("Start to delete bucket lifecycle, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('DELETE', params={Bucket.LIFECYCLE: ''})
logger.debug("Delete bucket lifecycle done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_bucket_location(self):
"""获取Bucket的数据中心。
:return: :class:`GetBucketLocationResult <oss2.models.GetBucketLocationResult>`
"""
logger.debug("Start to get bucket location, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.LOCATION: ''})
logger.debug("Get bucket location done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_location, GetBucketLocationResult)
def put_bucket_logging(self, input):
"""设置Bucket的访问日志功能。
:param input: :class:`BucketLogging <oss2.models.BucketLogging>` 对象或其他
"""
data = self.__convert_data(BucketLogging, xml_utils.to_put_bucket_logging, input)
logger.debug("Start to put bucket logging, bucket: {0}, logging: {1}".format(self.bucket_name, data))
resp = self.__do_bucket('PUT', data=data, params={Bucket.LOGGING: ''})
logger.debug("Put bucket logging done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_bucket_logging(self):
"""获取Bucket的访问日志功能配置。
:return: :class:`GetBucketLoggingResult <oss2.models.GetBucketLoggingResult>`
"""
logger.debug("Start to get bucket logging, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.LOGGING: ''})
logger.debug("Get bucket logging done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_logging, GetBucketLoggingResult)
def delete_bucket_logging(self):
"""关闭Bucket的访问日志功能。"""
logger.debug("Start to delete bucket loggging, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('DELETE', params={Bucket.LOGGING: ''})
logger.debug("Put bucket lifecycle done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def put_bucket_referer(self, input):
"""为Bucket设置防盗链。
:param input: :class:`BucketReferer <oss2.models.BucketReferer>` 对象或其他
"""
data = self.__convert_data(BucketReferer, xml_utils.to_put_bucket_referer, input)
logger.debug("Start to put bucket referer, bucket: {0}, referer: {1}".format(self.bucket_name, to_string(data)))
resp = self.__do_bucket('PUT', data=data, params={Bucket.REFERER: ''})
logger.debug("Put bucket referer done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_bucket_referer(self):
"""获取Bucket的防盗链配置。
:return: :class:`GetBucketRefererResult <oss2.models.GetBucketRefererResult>`
"""
logger.debug("Start to get bucket referer, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.REFERER: ''})
logger.debug("Get bucket referer done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_referer, GetBucketRefererResult)
def get_bucket_stat(self):
"""查看Bucket的状态,目前包括bucket大小,bucket的object数量,bucket正在上传的Multipart Upload事件个数等。
:return: :class:`GetBucketStatResult <oss2.models.GetBucketStatResult>`
"""
logger.debug("Start to get bucket stat, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.STAT: ''})
logger.debug("Get bucket stat done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_stat, GetBucketStatResult)
def get_bucket_info(self):
"""获取bucket相关信息,如创建时间,访问Endpoint,Owner与ACL等。
:return: :class:`GetBucketInfoResult <oss2.models.GetBucketInfoResult>`
"""
logger.debug("Start to get bucket info, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.BUCKET_INFO: ''})
logger.debug("Get bucket info done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_info, GetBucketInfoResult)
def put_bucket_website(self, input):
"""为Bucket配置静态网站托管功能。
:param input: :class:`BucketWebsite <oss2.models.BucketWebsite>`
"""
data = self.__convert_data(BucketWebsite, xml_utils.to_put_bucket_website, input)
headers = http.CaseInsensitiveDict()
headers['Content-MD5'] = utils.content_md5(data)
logger.debug("Start to put bucket website, bucket: {0}, website: {1}".format(self.bucket_name, to_string(data)))
resp = self.__do_bucket('PUT', data=data, params={Bucket.WEBSITE: ''}, headers=headers)
logger.debug("Put bucket website done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_bucket_website(self):
"""获取Bucket的静态网站托管配置。
:return: :class:`GetBucketWebsiteResult <oss2.models.GetBucketWebsiteResult>`
:raises: 如果没有设置静态网站托管,那么就抛出 :class:`NoSuchWebsite <oss2.exceptions.NoSuchWebsite>`
"""
logger.debug("Start to get bucket website, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('GET', params={Bucket.WEBSITE: ''})
logger.debug("Get bucket website done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_bucket_website, GetBucketWebsiteResult)
def delete_bucket_website(self):
"""关闭Bucket的静态网站托管功能。"""
logger.debug("Start to delete bucket website, bucket: {0}".format(self.bucket_name))
resp = self.__do_bucket('DELETE', params={Bucket.WEBSITE: ''})
logger.debug("Delete bucket website done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def create_live_channel(self, channel_name, input):
"""创建推流直播频道
:param str channel_name: 要创建的live channel的名称
:param input: LiveChannelInfo类型,包含了live channel中的描述信息
:return: :class:`CreateLiveChannelResult <oss2.models.CreateLiveChannelResult>`
"""
data = self.__convert_data(LiveChannelInfo, xml_utils.to_create_live_channel, input)
logger.debug("Start to create live-channel, bucket: {0}, channel_name: {1}, info: | |
def Tx_Dia(self):
"""
Tx_Dia(Pointing_Errors_ff_sptr self) -> float
Return current pointing errors transmitter aperture diameter.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_Tx_Dia(self)
def set_Tx_Theta(self, Tx_Theta):
"""
set_Tx_Theta(Pointing_Errors_ff_sptr self, float Tx_Theta)
Set poinintg errors transmitter beam divergence.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_Tx_Theta(self, Tx_Theta)
def Tx_Theta(self):
"""
Tx_Theta(Pointing_Errors_ff_sptr self) -> float
Return current poinintg errors transmitter beam divergence.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_Tx_Theta(self)
def set_Rx_Dia(self, Rx_Dia):
"""
set_Rx_Dia(Pointing_Errors_ff_sptr self, float Rx_Dia)
Set pointing errors receiver aperture diameter.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_Rx_Dia(self, Rx_Dia)
def Rx_Dia(self):
"""
Rx_Dia(Pointing_Errors_ff_sptr self) -> float
Return current pointing errors receiver aperture diameter.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_Rx_Dia(self)
def set_TempCorr(self, TempCorr):
"""
set_TempCorr(Pointing_Errors_ff_sptr self, float TempCorr)
Set pointing errors temporal correlation.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_TempCorr(self, TempCorr)
def TempCorr(self):
"""
TempCorr(Pointing_Errors_ff_sptr self) -> float
Return current pointing errors temporal correlation.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_TempCorr(self)
def set_SampRate(self, SampRate):
"""
set_SampRate(Pointing_Errors_ff_sptr self, float SampRate)
Set poinintg errors sampling rate.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_SampRate(self, SampRate)
def SampRate(self):
"""
SampRate(Pointing_Errors_ff_sptr self) -> float
Return current poinintg errors sampling rate.
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_SampRate(self)
def history(self):
"""history(Pointing_Errors_ff_sptr self) -> unsigned int"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_history(self)
def declare_sample_delay(self, *args):
"""
declare_sample_delay(Pointing_Errors_ff_sptr self, int which, int delay)
declare_sample_delay(Pointing_Errors_ff_sptr self, unsigned int delay)
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_declare_sample_delay(self, *args)
def sample_delay(self, which):
"""sample_delay(Pointing_Errors_ff_sptr self, int which) -> unsigned int"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_sample_delay(self, which)
def output_multiple(self):
"""output_multiple(Pointing_Errors_ff_sptr self) -> int"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_output_multiple(self)
def relative_rate(self):
"""relative_rate(Pointing_Errors_ff_sptr self) -> double"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_relative_rate(self)
def start(self):
"""start(Pointing_Errors_ff_sptr self) -> bool"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_start(self)
def stop(self):
"""stop(Pointing_Errors_ff_sptr self) -> bool"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_stop(self)
def nitems_read(self, which_input):
"""nitems_read(Pointing_Errors_ff_sptr self, unsigned int which_input) -> uint64_t"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_nitems_read(self, which_input)
def nitems_written(self, which_output):
"""nitems_written(Pointing_Errors_ff_sptr self, unsigned int which_output) -> uint64_t"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_nitems_written(self, which_output)
def max_noutput_items(self):
"""max_noutput_items(Pointing_Errors_ff_sptr self) -> int"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_max_noutput_items(self)
def set_max_noutput_items(self, m):
"""set_max_noutput_items(Pointing_Errors_ff_sptr self, int m)"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_max_noutput_items(self, m)
def unset_max_noutput_items(self):
"""unset_max_noutput_items(Pointing_Errors_ff_sptr self)"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_unset_max_noutput_items(self)
def is_set_max_noutput_items(self):
"""is_set_max_noutput_items(Pointing_Errors_ff_sptr self) -> bool"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_is_set_max_noutput_items(self)
def set_min_noutput_items(self, m):
"""set_min_noutput_items(Pointing_Errors_ff_sptr self, int m)"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_min_noutput_items(self, m)
def min_noutput_items(self):
"""min_noutput_items(Pointing_Errors_ff_sptr self) -> int"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_min_noutput_items(self)
def max_output_buffer(self, i):
"""max_output_buffer(Pointing_Errors_ff_sptr self, int i) -> long"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_max_output_buffer(self, i)
def set_max_output_buffer(self, *args):
"""
set_max_output_buffer(Pointing_Errors_ff_sptr self, long max_output_buffer)
set_max_output_buffer(Pointing_Errors_ff_sptr self, int port, long max_output_buffer)
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_max_output_buffer(self, *args)
def min_output_buffer(self, i):
"""min_output_buffer(Pointing_Errors_ff_sptr self, int i) -> long"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_min_output_buffer(self, i)
def set_min_output_buffer(self, *args):
"""
set_min_output_buffer(Pointing_Errors_ff_sptr self, long min_output_buffer)
set_min_output_buffer(Pointing_Errors_ff_sptr self, int port, long min_output_buffer)
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_min_output_buffer(self, *args)
def pc_noutput_items(self):
"""pc_noutput_items(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_noutput_items(self)
def pc_noutput_items_avg(self):
"""pc_noutput_items_avg(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_noutput_items_avg(self)
def pc_noutput_items_var(self):
"""pc_noutput_items_var(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_noutput_items_var(self)
def pc_nproduced(self):
"""pc_nproduced(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_nproduced(self)
def pc_nproduced_avg(self):
"""pc_nproduced_avg(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_nproduced_avg(self)
def pc_nproduced_var(self):
"""pc_nproduced_var(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_nproduced_var(self)
def pc_input_buffers_full(self, *args):
"""
pc_input_buffers_full(Pointing_Errors_ff_sptr self, int which) -> float
pc_input_buffers_full(Pointing_Errors_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_input_buffers_full(self, *args)
def pc_input_buffers_full_avg(self, *args):
"""
pc_input_buffers_full_avg(Pointing_Errors_ff_sptr self, int which) -> float
pc_input_buffers_full_avg(Pointing_Errors_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_input_buffers_full_avg(self, *args)
def pc_input_buffers_full_var(self, *args):
"""
pc_input_buffers_full_var(Pointing_Errors_ff_sptr self, int which) -> float
pc_input_buffers_full_var(Pointing_Errors_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_input_buffers_full_var(self, *args)
def pc_output_buffers_full(self, *args):
"""
pc_output_buffers_full(Pointing_Errors_ff_sptr self, int which) -> float
pc_output_buffers_full(Pointing_Errors_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_output_buffers_full(self, *args)
def pc_output_buffers_full_avg(self, *args):
"""
pc_output_buffers_full_avg(Pointing_Errors_ff_sptr self, int which) -> float
pc_output_buffers_full_avg(Pointing_Errors_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_output_buffers_full_avg(self, *args)
def pc_output_buffers_full_var(self, *args):
"""
pc_output_buffers_full_var(Pointing_Errors_ff_sptr self, int which) -> float
pc_output_buffers_full_var(Pointing_Errors_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_output_buffers_full_var(self, *args)
def pc_work_time(self):
"""pc_work_time(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_work_time(self)
def pc_work_time_avg(self):
"""pc_work_time_avg(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_work_time_avg(self)
def pc_work_time_var(self):
"""pc_work_time_var(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_work_time_var(self)
def pc_work_time_total(self):
"""pc_work_time_total(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_work_time_total(self)
def pc_throughput_avg(self):
"""pc_throughput_avg(Pointing_Errors_ff_sptr self) -> float"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_pc_throughput_avg(self)
def set_processor_affinity(self, mask):
"""set_processor_affinity(Pointing_Errors_ff_sptr self, std::vector< int,std::allocator< int > > const & mask)"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_processor_affinity(self, mask)
def unset_processor_affinity(self):
"""unset_processor_affinity(Pointing_Errors_ff_sptr self)"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_unset_processor_affinity(self)
def processor_affinity(self):
"""processor_affinity(Pointing_Errors_ff_sptr self) -> std::vector< int,std::allocator< int > >"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_processor_affinity(self)
def active_thread_priority(self):
"""active_thread_priority(Pointing_Errors_ff_sptr self) -> int"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_active_thread_priority(self)
def thread_priority(self):
"""thread_priority(Pointing_Errors_ff_sptr self) -> int"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_thread_priority(self)
def set_thread_priority(self, priority):
"""set_thread_priority(Pointing_Errors_ff_sptr self, int priority) -> int"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_thread_priority(self, priority)
def name(self):
"""name(Pointing_Errors_ff_sptr self) -> std::string"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_name(self)
def symbol_name(self):
"""symbol_name(Pointing_Errors_ff_sptr self) -> std::string"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_symbol_name(self)
def input_signature(self):
"""input_signature(Pointing_Errors_ff_sptr self) -> io_signature_sptr"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_input_signature(self)
def output_signature(self):
"""output_signature(Pointing_Errors_ff_sptr self) -> io_signature_sptr"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_output_signature(self)
def unique_id(self):
"""unique_id(Pointing_Errors_ff_sptr self) -> long"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_unique_id(self)
def to_basic_block(self):
"""to_basic_block(Pointing_Errors_ff_sptr self) -> basic_block_sptr"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_to_basic_block(self)
def check_topology(self, ninputs, noutputs):
"""check_topology(Pointing_Errors_ff_sptr self, int ninputs, int noutputs) -> bool"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_check_topology(self, ninputs, noutputs)
def alias(self):
"""alias(Pointing_Errors_ff_sptr self) -> std::string"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_alias(self)
def set_block_alias(self, name):
"""set_block_alias(Pointing_Errors_ff_sptr self, std::string name)"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_set_block_alias(self, name)
def _post(self, which_port, msg):
"""_post(Pointing_Errors_ff_sptr self, swig_int_ptr which_port, swig_int_ptr msg)"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr__post(self, which_port, msg)
def message_ports_in(self):
"""message_ports_in(Pointing_Errors_ff_sptr self) -> swig_int_ptr"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_message_ports_in(self)
def message_ports_out(self):
"""message_ports_out(Pointing_Errors_ff_sptr self) -> swig_int_ptr"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_message_ports_out(self)
def message_subscribers(self, which_port):
"""message_subscribers(Pointing_Errors_ff_sptr self, swig_int_ptr which_port) -> swig_int_ptr"""
return _FSO_Comm_swig.Pointing_Errors_ff_sptr_message_subscribers(self, which_port)
Pointing_Errors_ff_sptr_swigregister = _FSO_Comm_swig.Pointing_Errors_ff_sptr_swigregister
Pointing_Errors_ff_sptr_swigregister(Pointing_Errors_ff_sptr)
Pointing_Errors_ff_sptr.__repr__ = lambda self: "<gr_block %s (%d)>" % (self.name(), self.unique_id())
Pointing_Errors_ff = Pointing_Errors_ff.make;
class Turbulence_ff(object):
"""
FSO Turbulence Channel.
This block simulates the effect of turbulence (scintillation) in FSO channel.
Constructor Specific Documentation:
Make a turbulence block.
Args:
Cn2 : refractive index structure constant (m^(-2/3))
Wavelen : optical beam wavelength (m)
LinkLen : FSO channel link length (m)
Rx_Dia : receiver aperture diameter (m)
TempCorr : temppral correlation of turbulence channel (s)
SampRate : sampling rate of signal (samples per second)
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def make(Cn2, Wavelen, LinkLen, Rx_Dia, TempCorr, SampRate):
"""
make(float Cn2, float Wavelen, float LinkLen, float Rx_Dia, float TempCorr, float SampRate) -> Turbulence_ff_sptr
FSO Turbulence Channel.
This block simulates the effect of turbulence (scintillation) in FSO channel.
Constructor Specific Documentation:
Make a turbulence block.
Args:
Cn2 : refractive index structure constant (m^(-2/3))
Wavelen : optical beam wavelength (m)
LinkLen : FSO channel link length (m)
Rx_Dia : receiver aperture diameter (m)
TempCorr : temppral correlation of turbulence channel (s)
SampRate : sampling rate of signal (samples per second)
"""
return _FSO_Comm_swig.Turbulence_ff_make(Cn2, Wavelen, LinkLen, Rx_Dia, TempCorr, SampRate)
make = staticmethod(make)
def set_Cn2(self, Cn2):
"""
set_Cn2(Turbulence_ff self, float Cn2)
Set turbulence Cn2.
"""
return _FSO_Comm_swig.Turbulence_ff_set_Cn2(self, Cn2)
def Cn2(self):
"""
Cn2(Turbulence_ff self) -> float
Return current turbulence Cn2.
"""
return _FSO_Comm_swig.Turbulence_ff_Cn2(self)
def set_Wavelen(self, Wavelen):
"""
set_Wavelen(Turbulence_ff self, float Wavelen)
Set turbulence wavelength.
"""
return _FSO_Comm_swig.Turbulence_ff_set_Wavelen(self, Wavelen)
def Wavelen(self):
"""
Wavelen(Turbulence_ff self) -> float
Return current turbulence wavelength.
"""
return _FSO_Comm_swig.Turbulence_ff_Wavelen(self)
def set_LinkLen(self, LinkLen):
"""
set_LinkLen(Turbulence_ff self, float LinkLen)
Set turbulence link length.
"""
return _FSO_Comm_swig.Turbulence_ff_set_LinkLen(self, LinkLen)
def LinkLen(self):
"""
LinkLen(Turbulence_ff self) -> float
Return current turbulence link length.
"""
return _FSO_Comm_swig.Turbulence_ff_LinkLen(self)
def set_Rx_Dia(self, Rx_Dia):
"""
set_Rx_Dia(Turbulence_ff self, float Rx_Dia)
Set turbulence receiver apertture diameter.
"""
return _FSO_Comm_swig.Turbulence_ff_set_Rx_Dia(self, Rx_Dia)
def Rx_Dia(self):
"""
Rx_Dia(Turbulence_ff self) -> float
Return current turbulence receiver aperture diameter.
"""
return _FSO_Comm_swig.Turbulence_ff_Rx_Dia(self)
def set_TempCorr(self, TempCorr):
"""
set_TempCorr(Turbulence_ff self, float TempCorr)
Set turbulence temporal correlation.
"""
return _FSO_Comm_swig.Turbulence_ff_set_TempCorr(self, TempCorr)
def TempCorr(self):
"""
TempCorr(Turbulence_ff self) -> float
Return current turbulence temporal correlation.
"""
return _FSO_Comm_swig.Turbulence_ff_TempCorr(self)
def set_SampRate(self, SampRate):
"""
set_SampRate(Turbulence_ff self, float SampRate)
Set turbulence sampling rate.
"""
return _FSO_Comm_swig.Turbulence_ff_set_SampRate(self, SampRate)
def SampRate(self):
"""
SampRate(Turbulence_ff self) -> float
Return current turbulence sampling rate.
"""
return _FSO_Comm_swig.Turbulence_ff_SampRate(self)
__swig_destroy__ = _FSO_Comm_swig.delete_Turbulence_ff
__del__ = lambda self: None
Turbulence_ff_swigregister = _FSO_Comm_swig.Turbulence_ff_swigregister
Turbulence_ff_swigregister(Turbulence_ff)
def Turbulence_ff_make(Cn2, Wavelen, LinkLen, Rx_Dia, TempCorr, SampRate):
"""
Turbulence_ff_make(float Cn2, float Wavelen, float LinkLen, float Rx_Dia, float TempCorr, float SampRate) -> Turbulence_ff_sptr
FSO Turbulence Channel.
This block simulates the effect of turbulence (scintillation) in FSO channel.
Constructor Specific Documentation:
Make a turbulence block.
Args:
Cn2 : refractive index structure constant (m^(-2/3))
Wavelen : optical beam wavelength (m)
LinkLen : FSO channel link length (m)
Rx_Dia : receiver aperture diameter (m)
TempCorr : temppral correlation of turbulence channel (s)
SampRate : sampling rate of signal (samples per second)
"""
return _FSO_Comm_swig.Turbulence_ff_make(Cn2, Wavelen, LinkLen, Rx_Dia, TempCorr, SampRate)
class Turbulence_ff_sptr(object):
"""Proxy of C++ boost::shared_ptr<(gr::FSO_Comm::Turbulence_ff)> class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(boost::shared_ptr<(gr::FSO_Comm::Turbulence_ff)> self) -> Turbulence_ff_sptr
__init__(boost::shared_ptr<(gr::FSO_Comm::Turbulence_ff)> self, Turbulence_ff p) -> Turbulence_ff_sptr
"""
this = _FSO_Comm_swig.new_Turbulence_ff_sptr(*args)
try:
self.this.append(this)
except | |
munsell_value_Saunderson1944(Y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
*Saunderson and Milner (1944)* method.
Parameters
----------
Y
*luminance* :math:`Y`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Wikipedia2007c`
Examples
--------
>>> munsell_value_Saunderson1944(12.23634268) # doctest: +ELLIPSIS
4.0444736...
"""
Y = to_domain_100(Y)
V = 2.357 * spow(Y, 0.343) - 1.52
return as_float(from_range_10(V))
def munsell_value_Ladd1955(Y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
*Ladd and Pinney (1955)* method.
Parameters
----------
Y
*luminance* :math:`Y`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Wikipedia2007c`
Examples
--------
>>> munsell_value_Ladd1955(12.23634268) # doctest: +ELLIPSIS
4.0511633...
"""
Y = to_domain_100(Y)
V = 2.468 * spow(Y, 1 / 3) - 1.636
return as_float(from_range_10(V))
def munsell_value_McCamy1987(Y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
*McCamy (1987)* method.
Parameters
----------
Y
*luminance* :math:`Y`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`ASTMInternational1989a`
Examples
--------
>>> munsell_value_McCamy1987(12.23634268) # doctest: +ELLIPSIS
4.0814348...
"""
Y = to_domain_100(Y)
V = np.where(
Y <= 0.9,
0.87445 * spow(Y, 0.9967),
2.49268 * spow(Y, 1 / 3)
- 1.5614
- (0.985 / (((0.1073 * Y - 3.084) ** 2) + 7.54))
+ (0.0133 / spow(Y, 2.3))
+ 0.0084 * np.sin(4.1 * spow(Y, 1 / 3) + 1)
+ (0.0221 / Y) * np.sin(0.39 * (Y - 2))
- (0.0037 / (0.44 * Y)) * np.sin(1.28 * (Y - 0.53)),
)
return as_float(from_range_10(V))
def munsell_value_ASTMD1535(Y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
an inverse lookup table from *ASTM D1535-08e1* method.
Parameters
----------
Y
*luminance* :math:`Y`
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
- The *Munsell* value* computation with *ASTM D1535-08e1* method is only
defined for domain [0, 100].
References
----------
:cite:`ASTMInternational1989a`
Examples
--------
>>> munsell_value_ASTMD1535(12.23634268) # doctest: +ELLIPSIS
4.0824437...
"""
Y = to_domain_100(Y)
V = _munsell_value_ASTMD1535_interpolator()(Y)
return as_float(from_range_10(V))
MUNSELL_VALUE_METHODS: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"Priest 1920": munsell_value_Priest1920,
"Munsell 1933": munsell_value_Munsell1933,
"Moon 1943": munsell_value_Moon1943,
"Saunderson 1944": munsell_value_Saunderson1944,
"Ladd 1955": munsell_value_Ladd1955,
"McCamy 1987": munsell_value_McCamy1987,
"ASTM D1535": munsell_value_ASTMD1535,
}
)
MUNSELL_VALUE_METHODS.__doc__ = """
Supported *Munsell* value computation methods.
References
----------
:cite:`ASTMInternational1989a`, :cite:`Wikipedia2007c`
Aliases:
- 'astm2008': 'ASTM D1535'
"""
MUNSELL_VALUE_METHODS["astm2008"] = MUNSELL_VALUE_METHODS["ASTM D1535"]
def munsell_value(
Y: FloatingOrArrayLike,
method: Union[
Literal[
"ASTM D1535",
"Ladd 1955",
"McCamy 1987",
"Moon 1943",
"Munsell 1933",
"Priest 1920",
"Saunderson 1944",
],
str,
] = "ASTM D1535",
) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
given method.
Parameters
----------
Y
*luminance* :math:`Y`.
method
Computation method.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`ASTMInternational1989a`, :cite:`Wikipedia2007c`
Examples
--------
>>> munsell_value(12.23634268) # doctest: +ELLIPSIS
4.0824437...
>>> munsell_value(12.23634268, method='Priest 1920') # doctest: +ELLIPSIS
3.4980484...
>>> munsell_value(12.23634268, method='Munsell 1933') # doctest: +ELLIPSIS
4.1627702...
>>> munsell_value(12.23634268, method='Moon 1943') # doctest: +ELLIPSIS
4.0688120...
>>> munsell_value(12.23634268, method='Saunderson 1944')
... # doctest: +ELLIPSIS
4.0444736...
>>> munsell_value(12.23634268, method='Ladd 1955') # doctest: +ELLIPSIS
4.0511633...
>>> munsell_value(12.23634268, method='McCamy 1987') # doctest: +ELLIPSIS
4.0814348...
"""
method = validate_method(method, MUNSELL_VALUE_METHODS)
return MUNSELL_VALUE_METHODS[method](Y)
def _munsell_scale_factor() -> NDArray:
"""
Return the domain-range scale factor for *Munsell Renotation System*.
Returns
-------
:class:`numpy.ndarray`
Domain-range scale factor for *Munsell Renotation System*.
"""
return np.array([10, 10, 50 if get_domain_range_scale() == "1" else 2, 10])
def _munsell_specification_to_xyY(specification: ArrayLike) -> NDArray:
"""
Convert given *Munsell* *Colorlab* specification to *CIE xyY* colourspace.
Parameters
----------
specification
*Munsell* *Colorlab* specification.
Returns
-------
:class:`numpy.ndarray`
*CIE xyY* colourspace array.
"""
specification = normalise_munsell_specification(specification)
if is_grey_munsell_colour(specification):
specification = as_float_array(to_domain_10(specification))
hue, value, chroma, code = specification
else:
specification = to_domain_10(specification, _munsell_scale_factor())
hue, value, chroma, code = specification
code = as_int_scalar(code)
attest(
0 <= hue <= 10,
f'"{specification}" specification hue must be normalised to '
f"domain [0, 10]!",
)
attest(
0 <= value <= 10,
f'"{specification}" specification value must be normalised to '
f"domain [0, 10]!",
)
with domain_range_scale("ignore"):
Y = luminance_ASTMD1535(value)
if is_integer(value):
value_minus = value_plus = round(value)
else:
value_minus = np.floor(value)
value_plus = value_minus + 1
specification_minus = as_float_array(
value_minus
if is_grey_munsell_colour(specification)
else [hue, value_minus, chroma, code]
)
x_minus, y_minus = tsplit(munsell_specification_to_xy(specification_minus))
specification_plus = as_float_array(
value_plus
if (is_grey_munsell_colour(specification) or value_plus == 10)
else [hue, value_plus, chroma, code]
)
x_plus, y_plus = tsplit(munsell_specification_to_xy(specification_plus))
if value_minus == value_plus:
x = x_minus
y = y_minus
else:
with domain_range_scale("ignore"):
Y_minus = as_float_array(luminance_ASTMD1535(value_minus))
Y_plus = as_float_array(luminance_ASTMD1535(value_plus))
Y_minus_plus = np.squeeze([Y_minus, Y_plus])
x_minus_plus = np.squeeze([x_minus, x_plus])
y_minus_plus = np.squeeze([y_minus, y_plus])
x = LinearInterpolator(Y_minus_plus, x_minus_plus)(Y)
y = LinearInterpolator(Y_minus_plus, y_minus_plus)(Y)
return tstack([x, y, from_range_1(Y / 100)])
def munsell_specification_to_xyY(specification: ArrayLike) -> NDArray:
"""
Convert given *Munsell* *Colorlab* specification to *CIE xyY* colourspace.
Parameters
----------
specification
*Munsell* *Colorlab* specification.
Returns
-------
:class:`numpy.ndarray`
*CIE xyY* colourspace array.
Notes
-----
+-------------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+===================+=======================+===============+
| ``specification`` | ``hue`` : [0, 10] | [0, 1] |
| | | |
| | ``value`` : [0, 10] | [0, 1] |
| | | |
| | ``chroma`` : [0, 50] | [0, 1] |
| | | |
| | ``code`` : [0, 10] | [0, 1] |
+-------------------+-----------------------+---------------+
+-------------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+===================+=======================+===============+
| ``xyY`` | [0, 1] | [0, 1] |
+-------------------+-----------------------+---------------+
References
----------
:cite:`Centore2014m`
Examples
--------
>>> munsell_specification_to_xyY(np.array([2.1, 8.0, 17.9, 4]))
... # doctest: +ELLIPSIS
array([ 0.4400632..., 0.5522428..., 0.5761962...])
>>> munsell_specification_to_xyY(np.array([np.nan, 8.9, np.nan, np.nan]))
... # doctest: +ELLIPSIS
array([ 0.31006 , 0.31616 , 0.7461345...])
"""
specification = as_float_array(specification)
shape = list(specification.shape)
xyY = [
_munsell_specification_to_xyY(a)
for a in specification.reshape([-1, 4])
]
shape[-1] = 3
return np.reshape(as_float_array(xyY), shape)
def munsell_colour_to_xyY(munsell_colour: StrOrArrayLike) -> NDArray:
"""
Convert given *Munsell* colour to *CIE xyY* colourspace.
Parameters
----------
munsell_colour
*Munsell* colour.
Returns
-------
:class:`numpy.ndarray`
*CIE xyY* colourspace array.
Notes
-----
+-----------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+===========+=======================+===============+
| ``xyY`` | [0, 1] | [0, 1] |
+-----------+-----------------------+---------------+
References
----------
:cite:`Centorea`, :cite:`Centore2012a`
Examples
--------
>>> munsell_colour_to_xyY('4.2YR 8.1/5.3') # doctest: +ELLIPSIS
array([ 0.3873694..., 0.3575165..., 0.59362 ])
>>> munsell_colour_to_xyY('N8.9') # doctest: +ELLIPSIS
array([ 0.31006 , 0.31616 , 0.7461345...])
"""
munsell_colour = np.array(munsell_colour)
shape = list(munsell_colour.shape)
specification = np.array(
[
munsell_colour_to_munsell_specification(a)
for a in np.ravel(munsell_colour)
]
)
return munsell_specification_to_xyY(
from_range_10(
specification.reshape(shape + [4]), _munsell_scale_factor()
)
)
def _xyY_to_munsell_specification(xyY: ArrayLike) -> NDArray:
"""
Convert from *CIE xyY* colourspace to *Munsell* *Colorlab* specification.
Parameters
----------
xyY
*CIE xyY* colourspace array.
Returns
-------
:class:`numpy.ndarray`
*Munsell* *Colorlab* specification.
Raises
------
ValueError
If the given *CIE xyY* colourspace array is not within MacAdam
limits.
RuntimeError
If the maximum iterations count has been reached without converging to
a result.
"""
x, y, Y | |
content,
requires,
attachments,
name,
version_name,
document_type,
document_format,
target_type,
tags,
):
ssm_document = Document(
name=name,
version_name=version_name,
content=content,
document_type=document_type,
document_format=document_format,
requires=requires,
attachments=attachments,
target_type=target_type,
tags=tags,
)
_validate_document_info(
content=content,
name=name,
document_type=document_type,
document_format=document_format,
)
if self._documents.get(ssm_document.name):
raise DocumentAlreadyExists("The specified document already exists.")
self._documents[ssm_document.name] = {
"documents": {ssm_document.document_version: ssm_document},
"default_version": ssm_document.document_version,
"latest_version": ssm_document.document_version,
}
return self._generate_document_description(ssm_document)
def delete_document(self, name, document_version, version_name, force):
documents = self._documents.get(name, {}).get("documents", {})
keys_to_delete = set()
if documents:
default_version = self._documents[name]["default_version"]
if (
documents[default_version].document_type
== "ApplicationConfigurationSchema"
and not force
):
raise InvalidDocumentOperation(
"You attempted to delete a document while it is still shared. "
"You must stop sharing the document before you can delete it."
)
if document_version and document_version == default_version:
raise InvalidDocumentOperation(
"Default version of the document can't be deleted."
)
if document_version or version_name:
# We delete only a specific version
delete_doc = self._find_document(name, document_version, version_name)
# we can't delete only the default version
if (
delete_doc
and delete_doc.document_version == default_version
and len(documents) != 1
):
raise InvalidDocumentOperation(
"Default version of the document can't be deleted."
)
if delete_doc:
keys_to_delete.add(delete_doc.document_version)
else:
raise InvalidDocument("The specified document does not exist.")
else:
# We are deleting all versions
keys_to_delete = set(documents.keys())
for key in keys_to_delete:
del self._documents[name]["documents"][key]
if len(self._documents[name]["documents"].keys()) == 0:
del self._documents[name]
else:
old_latest = self._documents[name]["latest_version"]
if old_latest not in self._documents[name]["documents"].keys():
leftover_keys = self._documents[name]["documents"].keys()
int_keys = []
for key in leftover_keys:
int_keys.append(int(key))
self._documents[name]["latest_version"] = str(sorted(int_keys)[-1])
else:
raise InvalidDocument("The specified document does not exist.")
def _find_document(
self, name, document_version=None, version_name=None, strict=True
):
if not self._documents.get(name):
raise InvalidDocument("The specified document does not exist.")
documents = self._documents[name]["documents"]
ssm_document = None
if not version_name and not document_version:
# Retrieve default version
default_version = self._documents[name]["default_version"]
ssm_document = documents.get(default_version)
elif version_name and document_version:
for doc_version, document in documents.items():
if (
doc_version == document_version
and document.version_name == version_name
):
ssm_document = document
break
else:
for doc_version, document in documents.items():
if document_version and doc_version == document_version:
ssm_document = document
break
if version_name and document.version_name == version_name:
ssm_document = document
break
if strict and not ssm_document:
raise InvalidDocument("The specified document does not exist.")
return ssm_document
def get_document(self, name, document_version, version_name, document_format):
ssm_document = self._find_document(name, document_version, version_name)
if not document_format:
document_format = ssm_document.document_format
else:
_validate_document_format(document_format=document_format)
return self._generate_document_information(ssm_document, document_format)
def update_document_default_version(self, name, document_version):
ssm_document = self._find_document(name, document_version=document_version)
self._documents[name]["default_version"] = document_version
base = {
"Name": ssm_document.name,
"DefaultVersion": document_version,
}
if ssm_document.version_name:
base["DefaultVersionName"] = ssm_document.version_name
return base
def update_document(
self,
content,
attachments,
name,
version_name,
document_version,
document_format,
target_type,
):
_validate_document_info(
content=content,
name=name,
document_type=None,
document_format=document_format,
strict=False,
)
if not self._documents.get(name):
raise InvalidDocument("The specified document does not exist.")
if (
self._documents[name]["latest_version"] != document_version
and document_version != "$LATEST"
):
raise InvalidDocumentVersion(
"The document version is not valid or does not exist."
)
if version_name and self._find_document(
name, version_name=version_name, strict=False
):
raise DuplicateDocumentVersionName(
"The specified version name is a duplicate."
)
old_ssm_document = self._find_document(name)
new_ssm_document = Document(
name=name,
version_name=version_name,
content=content,
document_type=old_ssm_document.document_type,
document_format=document_format,
requires=old_ssm_document.requires,
attachments=attachments,
target_type=target_type,
tags=old_ssm_document.tags,
document_version=str(int(self._documents[name]["latest_version"]) + 1),
)
for doc_version, document in self._documents[name]["documents"].items():
if document.content == new_ssm_document.content:
raise DuplicateDocumentContent(
"The content of the association document matches another document. "
"Change the content of the document and try again."
)
self._documents[name]["latest_version"] = str(
int(self._documents[name]["latest_version"]) + 1
)
self._documents[name]["documents"][
new_ssm_document.document_version
] = new_ssm_document
return self._generate_document_description(new_ssm_document)
def describe_document(self, name, document_version, version_name):
ssm_document = self._find_document(name, document_version, version_name)
return self._generate_document_description(ssm_document)
def list_documents(
self, document_filter_list, filters, max_results=10, next_token="0"
):
if document_filter_list:
raise ValidationException(
"DocumentFilterList is deprecated. Instead use Filters."
)
next_token = int(next_token)
results = []
dummy_token_tracker = 0
# Sort to maintain next token adjacency
for document_name, document_bundle in sorted(self._documents.items()):
if len(results) == max_results:
# There's still more to go so we need a next token
return results, str(next_token + len(results))
if dummy_token_tracker < next_token:
dummy_token_tracker = dummy_token_tracker + 1
continue
default_version = document_bundle["default_version"]
ssm_doc = self._documents[document_name]["documents"][default_version]
if filters and not _document_filter_match(filters, ssm_doc):
# If we have filters enabled, and we don't match them,
continue
else:
results.append(self._generate_document_list_information(ssm_doc))
# If we've fallen out of the loop, theres no more documents. No next token.
return results, ""
def delete_parameter(self, name):
return self._parameters.pop(name, None)
def delete_parameters(self, names):
result = []
for name in names:
try:
del self._parameters[name]
result.append(name)
except KeyError:
pass
return result
def describe_parameters(self, filters, parameter_filters):
if filters and parameter_filters:
raise ValidationException(
"You can use either Filters or ParameterFilters in a single request."
)
self._validate_parameter_filters(parameter_filters, by_path=False)
result = []
for param_name in self._parameters:
ssm_parameter = self.get_parameter(param_name, False)
if not self._match_filters(ssm_parameter, parameter_filters):
continue
if filters:
for filter in filters:
if filter["Key"] == "Name":
k = ssm_parameter.name
for v in filter["Values"]:
if k.startswith(v):
result.append(ssm_parameter)
break
elif filter["Key"] == "Type":
k = ssm_parameter.type
for v in filter["Values"]:
if k == v:
result.append(ssm_parameter)
break
elif filter["Key"] == "KeyId":
k = ssm_parameter.keyid
if k:
for v in filter["Values"]:
if k == v:
result.append(ssm_parameter)
break
continue
result.append(ssm_parameter)
return result
def _validate_parameter_filters(self, parameter_filters, by_path):
for index, filter_obj in enumerate(parameter_filters or []):
key = filter_obj["Key"]
values = filter_obj.get("Values", [])
if key == "Path":
option = filter_obj.get("Option", "OneLevel")
else:
option = filter_obj.get("Option", "Equals")
if not re.match(r"^tag:.+|Name|Type|KeyId|Path|Label|Tier$", key):
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.key".format(
index=(index + 1)
),
value=key,
constraint="Member must satisfy regular expression pattern: tag:.+|Name|Type|KeyId|Path|Label|Tier",
)
)
if len(key) > 132:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.key".format(
index=(index + 1)
),
value=key,
constraint="Member must have length less than or equal to 132",
)
)
if len(option) > 10:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.option".format(
index=(index + 1)
),
value="over 10 chars",
constraint="Member must have length less than or equal to 10",
)
)
if len(values) > 50:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.values".format(
index=(index + 1)
),
value=values,
constraint="Member must have length less than or equal to 50",
)
)
if any(len(value) > 1024 for value in values):
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.values".format(
index=(index + 1)
),
value=values,
constraint="[Member must have length less than or equal to 1024, Member must have length greater than or equal to 1]",
)
)
self._raise_errors()
filter_keys = []
for filter_obj in parameter_filters or []:
key = filter_obj["Key"]
values = filter_obj.get("Values")
if key == "Path":
option = filter_obj.get("Option", "OneLevel")
else:
option = filter_obj.get("Option", "Equals")
if not by_path and key == "Label":
raise InvalidFilterKey(
"The following filter key is not valid: Label. Valid filter keys include: [Path, Name, Type, KeyId, Tier]."
)
if by_path and key in ["Name", "Path", "Tier"]:
raise InvalidFilterKey(
"The following filter key is not valid: {key}. Valid filter keys include: [Type, KeyId].".format(
key=key
)
)
if not values:
raise InvalidFilterValue(
"The following filter values are missing : null for filter key Name."
)
if key in filter_keys:
raise InvalidFilterKey(
"The following filter is duplicated in the request: Name. A request can contain only one occurrence of a specific filter."
)
if key == "Path":
if option not in ["Recursive", "OneLevel"]:
raise InvalidFilterOption(
"The following filter option is not valid: {option}. Valid options include: [Recursive, OneLevel].".format(
option=option
)
)
if any(value.lower().startswith(("/aws", "/ssm")) for value in values):
raise ValidationException(
'Filters for common parameters can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"When using global parameters, please specify within a global namespace."
)
for value in values:
if value.lower().startswith(("/aws", "/ssm")):
raise ValidationException(
'Filters for common parameters can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"When using global parameters, please specify within a global namespace."
)
if (
"//" in value
or not value.startswith("/")
or not re.match("^[a-zA-Z0-9_.-/]*$", value)
):
raise ValidationException(
'The parameter doesn\'t meet the parameter name requirements. The parameter name must begin with a forward slash "/". '
'It can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"It must use only letters, numbers, or the following symbols: . (period), - (hyphen), _ (underscore). "
'Special characters are not allowed. All sub-paths, if specified, must use the forward slash symbol "/". '
"Valid example: /get/parameters2-/by1./path0_."
)
if key == "Tier":
for value in values:
if value not in ["Standard", "Advanced", "Intelligent-Tiering"]:
raise InvalidFilterOption(
"The following filter value is not valid: {value}. Valid values include: [Standard, Advanced, Intelligent-Tiering].".format(
value=value
)
)
if key == "Type":
for value in values:
if | |
<filename>acstools/acs_destripe_plus.py
#!/usr/bin/env python
"""
Fully calibrate post-SM4 ACS/WFC exposures using the standalone
:ref:`acsdestripe` tool to remove stripes between ACSCCD and ACSCTE
steps in CALACS.
This script runs CALACS (8.3.1 or higher only) and ``acs_destripe``
on ACS/WFC images. Input files must be RAW full-frame or supported subarray
ACS/WFC exposures taken after SM4. Resultant outputs are science-ready
FLT and FLC (if applicable) files.
This script is useful for when built-in CALACS destriping algorithm
using overscans is insufficient or unavailable.
For more information, see
`Removal of Bias Striping Noise from Post-SM4 ACS WFC Images <http://www.stsci.edu/hst/acs/software/destripe/>`_.
Examples
--------
In Python:
>>> from acstools import acs_destripe_plus
>>> acs_destripe_plus.destripe_plus(
... 'j12345678_raw.fits', suffix='strp', maxiter=15, sigrej=2.0,
... scimask1='mymask_sci1.fits', scimask2='mymask_sci2.fits',
... clobber=False, cte_correct=True)
From command line::
% acs_destripe_plus [-h] [--suffix SUFFIX] [--stat STAT]
[--maxiter MAXITER] [--sigrej SIGREJ]
[--lower [LOWER]] [--upper [UPPER]]
[--binwidth BINWIDTH] [--sci1_mask SCI1_MASK]
[--sci2_mask SCI2_MASK] [--dqbits [DQBITS]]
[--rpt_clean RPT_CLEAN] [--atol [ATOL]] [--nocte]
[--clobber] [-q] [--version]
input
"""
#
# HISTORY:
# 16APR2014 Leonardo version 1.0
# Based on instructions from <NAME>
# 11SEP2014 Ogaz added capabilities for full frame processing
# and stripe masking
# 29OCT2014 Ogaz clean up for posting final script for users
# 18NOV2014 Ogaz changed options/switches
# 12DEC2014 Lim incorporated script into ACSTOOLS
# 11MAR2015 Lim added parameters to be passed into acs_destripe
# 12MAR2015 (v0.3.0) Cara added capability to use DQ mask;
# added support for multiple input files and wildcards in the file
# names. Cara added weighted (by NPix) background computations
# (especially important for vigneted filters).
# See Tickets #1178 & #1180.
# 31MAR2015 (v0.4.0) Cara added repeated de-stripe iterations (to improve
# corrections in the "RAW" space) and support for various
# statistics modes. See Ticket #1183.
# 12JAN2016 (v0.4.1) Lim added new subarray modes that are allowed CTE corr.
# STDLIB
import logging
import os
import subprocess # nosec
# ASTROPY
from astropy.time import Time
# THIRD-PARTY
import numpy as np
try:
# This supports PIXVALUE
from stsci.tools import stpyfits as fits
except ImportError:
# Falls back to Astropy
from astropy.io import fits
# LOCAL
from . import acs_destripe
from . import acs2d
from . import acsccd
from . import acscte
__taskname__ = 'acs_destripe_plus'
__version__ = '0.4.2'
__vdate__ = '31-May-2018'
__author__ = '<NAME>, <NAME> (ACS Team), STScI'
__all__ = ['destripe_plus']
SM4_DATE = Time('2008-01-01')
SUBARRAY_LIST = [
'WFC1-2K', 'WFC1-POL0UV', 'WFC1-POL0V', 'WFC1-POL60V',
'WFC1-POL60UV', 'WFC1-POL120V', 'WFC1-POL120UV', 'WFC1-SMFL',
'WFC1-IRAMPQ', 'WFC1-MRAMPQ', 'WFC2-2K', 'WFC2-ORAMPQ',
'WFC2-SMFL', 'WFC2-POL0UV', 'WFC2-POL0V', 'WFC2-MRAMPQ',
'WFC1A-512', 'WFC1A-1K', 'WFC1A-2K', 'WFC1B-512', 'WFC1B-1K', 'WFC1B-2K',
'WFC2C-512', 'WFC2C-1K', 'WFC2C-2K', 'WFC2D-512', 'WFC2D-1K', 'WFC2D-2K']
logging.basicConfig()
LOG = logging.getLogger(__taskname__)
LOG.setLevel(logging.INFO)
def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15,
sigrej=2.0, lower=None, upper=None, binwidth=0.3,
scimask1=None, scimask2=None,
dqbits=None, rpt_clean=0, atol=0.01,
cte_correct=True, clobber=False, verbose=True):
r"""Calibrate post-SM4 ACS/WFC exposure(s) and use
standalone :ref:`acsdestripe`.
This takes a RAW image and generates a FLT file containing
its calibrated and destriped counterpart.
If CTE correction is performed, FLC will also be present.
Parameters
----------
inputfile : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*raw.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product of ``acs_destripe``.
This only affects the intermediate output file that will
be automatically renamed to ``*blv_tmp.fits`` during the processing.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <https://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <https://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's `imagestats` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
scimask1 : str or list of str
Mask images for *calibrated* ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
scimask2 : str or list of str
Mask images for *calibrated* ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `scimask1` and `scimask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
cte_correct : bool
Perform CTE | |
given netcdf files
considering the units and the calendar type used. This is a wrapper to the
netCDF4.date2num function to account for calendar strangeness in ROMS
Parameters
----------
dates : array of datetime.datetime
Values to convert
nc : netCDF4.Dataset,
netcdf input file
tvar : string, optional
time variable to load. If not specified, it will find the
time variable from predefined
Returns
-------
ndarray,
Array of values in the correct units/calendar of the netCDF file
"""
tvar = tvar if tvar else get_timevar(nc)
calendar, _ = _get_calendar(nc.variables[tvar])
# Convert the times
return netCDF4.date2num(dates,
nc.variables[tvar].units,
calendar=calendar)
def num2date(nc, tvar=None, records=None, as_datetime=True, epoch=None):
"""
Load the time vector from a netCDF file as a datetime array, accounting
for units and the calendar type used. This is a wrapper to the
netCDF4.num2date function to account for calendar strangeness in ROMS
Parameters
----------
nc : netCDF4.Dataset,
netcdf input file
tvar : string, optional
time variable to load. If not specified, it will find the
time variable from predefined
records : array or slice, optional
the indices of records to load
as_datetime : boolean, optional
convert the result to an array of datetimes [default]
epoch : datetime.datetime, optional
if you would like the values relative to an epoch, then
specify the epoch to remove.
Returns
-------
ndarray,
Array of datetimes if no epoch is supplied. If epoch, array
is in days since epoch
"""
import datetime
records = records if records is not None else np.s_[:]
tvar = tvar if tvar else get_timevar(nc)
if tvar not in nc.variables:
warn(f"{nc.filepath()} does not have a recognizable time dimension.")
return list()
calendar, convert = _get_calendar(nc.variables[tvar])
# Load the times
times = np.atleast_1d(netCDF4.num2date(nc.variables[tvar][records],
nc.variables[tvar].units,
calendar=calendar))
# If we don't have datetime instances, convert to datetime if we can
if (as_datetime or convert) and \
(not isinstance(times[0], datetime.datetime)
and times[0].datetime_compatible):
times = np.array([datetime.datetime.strptime(
t.strftime('%Y-%m-%d %H:%M:%S'),
'%Y-%m-%d %H:%M:%S') for t in
times])
if not epoch:
return times
else:
return np.asarray([(t - epoch).total_seconds() * secs2day for t in times])
def get_timevar(nc):
"""
Find the appropriate time variable (bry_time, ocean_time, etc.) from a
given netcdf file
Parameters
----------
nc : netCDF4.Dataset netcdf input file
Returns
-------
time: string
"""
for time in ("ocean_time", "time", "bry_time", "wind_time",
"clim_time", "frc_time", "zeta_time"):
if time in nc.variables:
return time
return None
def get_reftime(nc, epoch=default_epoch):
"""
Given a ROMS netCDF4 file, return the reference time for the file. This
is the timebase of the record dimension in the format:
"<units> since <reftime>"
Parameters
----------
nc : netCDF4 dataset
Input ROMS file
epoch_str : string, optional
If lacking units, use this string as the units
Returns
-------
timebase : datetime
datetime of the origin for the file
time : string
name of variable used to generate the base (None if default)
"""
try:
tvar=get_timevar(nc)
calendar, _=_get_calendar(nc.variables[tvar])
return netCDF4.num2date(0, nc.variables[tvar].units,
calendar=calendar), tvar
except AttributeError:
return epoch, None
def omega(grid, u, v, zeta=0, scale=True, work=False):
"""
Compute the vertical velocity on s-grid.
Parameters
----------
grid : seapy.model.grid,
The grid to use for the calculations
u : ndarray,
The u-field in time
v : ndarray,
The v-field in time
zeta : ndarray, optional,
The zeta-field in time
scale : bool, optional,
If [True], return omega in [m s**-1];
If False, return omega in [m**3 s**-1]
work : bool, optional,
If True, return the work arrays:
z_r : ndarray,
Depth on rho-grid (time-varying if zeta != 0)
z_w : ndarray,
Depth on w-grid (time-varying if zeta != 0)
thick_u : ndarray
Thickness of the u-grid
thick_v : ndarray
Thickness of the v-grid
If False, return only omega
Returns
-------
omega : ndarray,
Vertical Velocity on s-grid
"""
grid=seapy.model.asgrid(grid)
u=np.ma.array(u)
v=np.ma.array(v)
zeta=np.ma.array(zeta)
# Check the sizes
while u.ndim < 4:
u=u[np.newaxis, ...]
while v.ndim < 4:
v=v[np.newaxis, ...]
while zeta.ndim < 3:
zeta=zeta[np.newaxis, ...]
# Get the model grid parameters for the given thickness
thick_u=u * 0
thick_v=v * 0
z_r=np.ma.zeros((u.shape[0], u.shape[1], zeta.shape[1], zeta.shape[2]))
z_w=np.ma.zeros((u.shape[0], u.shape[1] + 1,
zeta.shape[1], zeta.shape[2]))
for i in range(zeta.shape[0]):
s_w, cs_w=seapy.roms.stretching(
grid.vstretching, grid.theta_s, grid.theta_b, grid.hc,
grid.n, w_grid=True)
z_r[i, ...]=seapy.roms.depth(grid.vtransform, grid.h, grid.hc,
s_w, cs_w, zeta=zeta[i, ...],
w_grid=False)
z_w[i, ...]=seapy.roms.depth(grid.vtransform, grid.h, grid.hc,
s_w, cs_w, zeta=zeta[i, ...],
w_grid=True)
thick_rho=np.squeeze(z_w[i, 1:, :, :] - z_w[i, :-1, :, :])
thick_u[i, ...]=seapy.model.rho2u(thick_rho)
thick_v[i, ...]=seapy.model.rho2v(thick_rho)
z_r[z_r > 50000]=np.ma.masked
z_w[z_w > 50000]=np.ma.masked
# Compute W (omega)
Huon=u * thick_u * seapy.model.rho2u(grid.dn)
Hvom=v * thick_v * seapy.model.rho2v(grid.dm)
W=z_w * 0
for k in range(grid.n):
W[:, k + 1, :-2, :-2]=W[:, k, :-2, :-2] - \
(Huon[:, k, 1:-1, 1:] - Huon[:, k, 1:-1, :-1]
+ Hvom[:, k, 1:, 1:-1] - Hvom[:, k, :-1, 1:-1])
wrk=W[:, -1:, :, :] / (z_w[:, -1:, :, :] - z_w[:, 0:1, :, :])
W[:, :-1, :, :]=W[:, :-1, :, :] - wrk * \
(z_w[:, :-1, :, :] - z_w[:, 0:1, :, :])
W[:, -1, :, :]=0
if scale:
W *= grid.pn * grid.pm
if work:
return W, z_r, z_w, thick_u, thick_v
else:
return W
def wvelocity(grid, u, v, zeta=0):
"""
Compute "true" vertical velocity
Parameters
----------
grid : seapy.model.grid,
The grid to use for the calculations
u : ndarray,
The u-field in time
v : ndarray,
The v-field in time
zeta : ndarray, optional,
The zeta-field in time
Returns
-------
w : ndarray,
Vertical Velocity
"""
grid=seapy.model.asgrid(grid)
u=np.ma.array(u)
v=np.ma.array(v)
zeta=np.ma.array(zeta)
# Check the sizes
while u.ndim < 4:
u=u[np.newaxis, ...]
while v.ndim < 4:
v=v[np.newaxis, ...]
while zeta.ndim < 3:
zeta=zeta[np.newaxis, ...]
# Get omega
W, z_r, z_w, thick_u, thick_v=omega(grid, u, v, zeta, scale=True,
work=True)
# Compute quasi-horizontal motions (Ui + Vj)*GRAD s(z)
vert=z_r * 0
# U-contribution
wrk=u * (z_r[:, :, :, 1:] - z_r[:, :, :, :-1]) * \
(grid.pm[:, 1:] - grid.pm[:, :-1])
vert[:, :, :, 1:-1]=0.25 * (wrk[:, :, :, :-1] + wrk[:, :, :, 1:])
# V-contribution
wrk = v * (z_r[:, :, 1:, :] - z_r[:, :, :-1, :]) * \
(grid.pn[1:, :] - grid.pn[:-1, :])
vert[:, :, 1:-1, :] += 0.25 * (wrk[:, :, :-1, :] + wrk[:, :, 1:, :])
# Compute barotropic velocity [ERROR IN FORMULATION RIGHT NOW]
wrk = np.zeros((vert.shape[0], vert.shape[2], vert.shape[3]))
ubar = np.sum(u * thick_u, axis=1) / np.sum(thick_u, axis=1)
vbar = np.sum(v * thick_v, axis=1) / np.sum(thick_v, axis=1)
# wrk[:, 1:-1, 1:-1] = (ubar[:, 1:-1, :-1] - ubar[:, 1:-1, 1:] +
# vbar[:, :-1, 1:-1] - vbar[:, 1:, 1:-1])
# Shift vert from rho to w
wvel = z_w * 0
# First two layers
slope = (z_r[:, 0, :, :] - z_w[:, 0, :, :]) / \
(z_r[:, 1, :, :] - z_r[:, 0, :, :])
wvel[:, 0, :, :] = 0.375 * (vert[:, 0, :, :] - slope *
(vert[:, 1, :, :] - vert[:, 0, :, :])) + \
0.75 * vert[:, 0, :, :] - \
0.125 * vert[:, 1, :, :]
wvel[:, 1, :, :] = W[:, 1, :, :] + wrk + \
0.375 * vert[:, 0, :, :] + \
0.75 * vert[:, 1, :, :] - 0.125 * vert[:, 2, :, :]
# Middle of the grid
wvel[:, 2:-2, :, :] = W[:, 2:-2, :, :] + \
wrk[:, np.newaxis, :, :] + \
0.5625 * (vert[:, 1:-2, :, :] + vert[:, 2:-1, :, :]) - \
0.0625 * (vert[:, :-3, :, :] + vert[:, 3:, :, :])
# Upper two layers
slope = (z_w[:, -1, :, :] - z_r[:, -1, :, :]) / \
(z_r[:, -1, :, :] - z_r[:, -2, :, :])
wvel[:, -1, :, :] = wrk + 0.375 * (vert[:, -1, :, :] + slope *
(vert[:, -1, :, :] - vert[:, -2, :, :])) + \
0.75 * vert[:, -1, :, :] - \
0.0625 * vert[:, -2, :, :]
wvel[:, -2, :, :] = W[:, -2, :, :] + 0.375 * vert[:, -1, :, :] + \
wrk + 0.75 * vert[:, -2, :, :] - \
0.125 * vert[:, -3, | |
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.findHost(wwn='123456789012345'),
mock.call.findHost(wwn='123456789054321'),
mock.call.createHost(
self.FAKE_HOST,
FCWwns=['123456789012345', '123456789054321'],
optional={'domain': None, 'persona': 1}),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
def test_create_invalid_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'), {
'name': 'fakehost.foo',
'FCPaths': [{'wwn': '123456789012345'}, {
'wwn': '123456789054321'}]}]
mock_client.findHost.return_value = 'fakehost.foo'
host = self.driver._create_host(self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.findHost(wwn='123456789012345'),
mock.call.getHost('fakehost.foo')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
def test_create_modify_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [{
'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'}, {
'wwn': '123456789054321'}]}]
host = self.driver._create_host(self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345', '123456789054321'],
'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_modify_host_with_new_wwn(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
getHost_ret1 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789054321'}]}
getHost_ret2 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}
mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2]
host = self.driver._create_host(self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345'], 'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_modify_host_with_unknown_wwn_and_new_wwn(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
getHost_ret1 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789054321'},
{'wwn': 'xxxxxxxxxxxxxxx'}]}
getHost_ret2 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'},
{'wwn': 'xxxxxxxxxxxxxxx'}]}
mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2]
host = self.driver._create_host(self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345'], 'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 3)
class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase):
TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d'
TARGET_LUN = 186
properties = {
'driver_volume_type': 'iscsi',
'data':
{'target_discovered': True,
'target_iqn': TARGET_IQN,
'target_lun': TARGET_LUN,
'target_portal': '1.1.1.2:1234'}}
def setup_driver(self, config=None, mock_conf=None):
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(
conf=config,
m_conf=mock_conf,
driver=hpdriver.HP3PARISCSIDriver)
expected = [
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
privatekey=HP3PAR_SAN_SSH_PRIVATE,
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT),
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.getCPG(HP3PAR_CPG),
mock.call.logout(),
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.getPorts(),
mock.call.logout()]
mock_client.assert_has_calls(expected)
mock_client.reset_mock()
return mock_client
def test_initialize_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.findHost.return_value = self.FAKE_HOST
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': self.TARGET_LUN, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': self.TARGET_LUN,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
result = self.driver.initialize_connection(self.volume, self.connector)
expected = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.findHost(iqn='iqn.1993-08.org.debian:01:222'),
mock.call.getHost(self.FAKE_HOST),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname='fakehost',
portPos={'node': 8, 'slot': 1, 'cardPort': 1}),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.logout()]
mock_client.assert_has_calls(expected)
self.assertDictMatch(result, self.properties)
def test_get_volume_stats(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {'serialNumber':
'1234'}
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'iSCSI')
self.assertEqual(stats['total_capacity_gb'], 'infinite')
self.assertEqual(stats['free_capacity_gb'], 'infinite')
expected = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getStorageSystemInfo(),
mock.call.logout()]
mock_client.assert_has_calls(expected)
self.assertEqual(stats['storage_protocol'], 'iSCSI')
self.assertEqual(stats['total_capacity_gb'], 'infinite')
self.assertEqual(stats['free_capacity_gb'], 'infinite')
cpg2 = self.cpgs[0].copy()
cpg2.update({'SDGrowth': {'limitMiB': 8192}})
mock_client.getCPG.return_value = cpg2
const = 0.0009765625
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'iSCSI')
total_capacity_gb = 8192 * const
self.assertEqual(stats['total_capacity_gb'], total_capacity_gb)
free_capacity_gb = int(
(8192 - self.cpgs[0]['UsrUsage']['usedMiB']) * const)
self.assertEqual(stats['free_capacity_gb'], free_capacity_gb)
def test_create_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.findHost.return_value = None
mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN}
host = self.driver._create_host(self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.findHost(iqn='iqn.1993-08.org.debian:01:222'),
mock.call.createHost(
self.FAKE_HOST,
optional={'domain': None, 'persona': 1},
iscsiNames=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
def test_create_invalid_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'),
{'name': 'fakehost.foo'}]
mock_client.findHost.return_value = 'fakehost.foo'
host = self.driver._create_host(self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.findHost(iqn='iqn.1993-08.org.debian:01:222'),
mock.call.getHost('fakehost.foo')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
def test_create_modify_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
{'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}]
host = self.driver._create_host(self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.modifyHost(
self.FAKE_HOST,
{'pathOperation': 1,
'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_get_least_used_nsp_for_host_single(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
#Setup a single ISCSI IP
iscsi_ips = ["10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports()
nsp = self.driver._get_least_used_nsp_for_host('newhost')
self.assertEqual(nsp, "1:8:1")
def test_get_least_used_nsp_for_host_new(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
#Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports()
# Host 'newhost' does not yet have any iscsi paths,
# so the 'least used' is returned
nsp = self.driver._get_least_used_nsp_for_host('newhost')
self.assertEqual(nsp, "1:8:2")
def test_get_least_used_nsp_for_host_reuse(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
#Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports()
# hosts 'foo' and 'bar' already have active iscsi paths
# the same one should be used
nsp = self.driver._get_least_used_nsp_for_host('foo')
self.assertEqual(nsp, "1:8:2")
nsp = self.driver._get_least_used_nsp_for_host('bar')
self.assertEqual(nsp, "1:8:1")
def test_get_least_used_nps_for_host_fc(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS1_RET
mock_client.getVLUNs.return_value = VLUNS5_RET
#Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports()
nsp = self.driver._get_least_used_nsp_for_host('newhost')
self.assertNotEqual(nsp, "0:6:3")
self.assertEqual(nsp, "1:8:1")
def test_invalid_iscsi_ip(self):
config = self.setup_configuration()
config.hp3par_iscsi_ips = ['10.10.220.250', '10.10.220.251']
config.iscsi_ip_address = '10.10.10.10'
mock_conf = {
'getPorts.return_value': {
'members': [
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.220.252',
'linkState': 4,
'device': [],
'iSCSIName': self.TARGET_IQN,
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': self.TARGET_IQN,
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8}]}}
# no valid ip addr should be configured.
self.assertRaises(exception.InvalidInput,
self.setup_driver,
config=config,
mock_conf=mock_conf)
def test_get_least_used_nsp(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
ports = [
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
# in use count
vluns = self.driver.common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(vluns['members'],
['0:2:1', '1:8:1'])
self.assertEqual(nsp, '1:8:1')
ports = [
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
# in use count
vluns = self.driver.common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(vluns['members'],
['0:2:1', '1:2:1'])
self.assertEqual(nsp, '1:2:1')
ports = [
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}]
| |
import numpy as np
import math
import time
from scipy.spatial.transform import Rotation
from ..common import Vec, equal_angle
from .realtime.constants import *
class Joints(Vec):
'''
A vector of 6 elements representing joint properties. The order is: base, shoulder, elbow, wrist1, wrist2, wrist3.
'''
BASE = 0
SHOULDER = 1
ELBOW = 2
WRIST1 = 3
WRIST2 = 4
WRIST3 = 5
def __init__(self, base=0, shoulder=0, elbow=0, wrist1=0, wrist2=0, wrist3=0):
super().__init__(6)
self.array[:] = [base, shoulder, elbow, wrist1, wrist2, wrist3]
def allclose(self, array, tolerance = UR_JOINTS_POSITION_TOLERANCE):
if isinstance(array, Tool):
raise TypeError("Cannot compare joint positions with tool pose.")
if np.allclose(self.array, array, rtol=0, atol=tolerance):
return True
for i in range(len(array)):
if not equal_angle(self.array[i], array[i], tolerance):
return False
return True
class JointSpeeds(Joints):
pass
class Tool(Vec):
'''
A vector of 6 elements representing tool position and orientation. Orientation is stored as axis angles. The order is: x, y, z, rx, ry, rz.
'''
X = 0
Y = 1
Z = 2
RX = 3
RY = 4
RZ = 5
def __init__(self, x=0, y=0, z=0, rx=0, ry=0, rz=0):
super().__init__(6)
self.array[:] = [x, y, z, rx, ry, rz]
def allclose(self, array, position_tolerance = UR_TOOL_POSITION_TOLERANCE, rotation_tolerance = UR_TOOL_ROTATION_TOLERANCE):
if isinstance(array, Joints):
raise TypeError("Cannot compare tool pose with joint positions.")
if not np.allclose(self.array[:3], array[:3], rtol=0, atol=position_tolerance):
return False
# normalize the two rotation vectors first
a = Rotation.from_rotvec(self.array[3:6]).as_euler("xyz")
b = Rotation.from_rotvec(array[3:6]).as_euler("xyz")
return equal_angle(a[0], b[0], rotation_tolerance) \
and equal_angle(a[1], b[1], rotation_tolerance) \
and equal_angle(a[2], b[2], rotation_tolerance)
@staticmethod
def from_xyzrpy(xyzrpy):
r = Rotation.from_euler("xyz", xyzrpy[3:]).as_rotvec()
return Tool.fromarray(np.concatenate((xyzrpy[:3], r)))
def to_xyzrpy(self):
r = Rotation.from_rotvec(self.array[3:]).as_euler("xyz")
return np.concatenate((self.array[:3], r))
def position(self):
return self.array[:3]
def orientation(self):
return Rotation.from_rotvec(self.array[3:]).as_quat()
class ToolSpeed(Tool):
pass
################################################################
## Arm state.
################################################################
class State(Vec):
'''
The state of a 6-dof arm, encoded as a 72-valued vector.
'''
_BUFFER_SIZE = UR_STATE_ENTRIES_COUNT
_TIME = UR_STATE_TIME
_CMD_ID = UR_STATE_CMD_ID
_STATUS = UR_STATE_STATUS
_JOINT_POSITIONS = slice(*UR_STATE_JOINT_POSITIONS)
_JOINT_SPEEDS = slice(*UR_STATE_JOINT_SPEEDS)
_TOOL_POSE = slice(*UR_STATE_TOOL_POSE)
_TOOL_SPEED = slice(*UR_STATE_TOOL_SPEED)
_TARGET_JOINT_POSITIONS = slice(*UR_STATE_TARGET_JOINT_POSITIONS)
_TARGET_JOINT_SPEEDS = slice(*UR_STATE_TARGET_JOINT_SPEEDS)
_TARGET_TOOL_POSE = slice(*UR_STATE_TARGET_TOOL_POSE)
_TARGET_TOOL_SPEED = slice(*UR_STATE_TARGET_TOOL_SPEED)
_TOOL_FORCE = slice(*UR_STATE_TOOL_FORCE)
_JOINT_TORQUES = slice(*UR_STATE_JOINT_TORQUES)
_TOOL_ACCELERATION = slice(*UR_STATE_TOOL_ACCELERATION)
_SENSOR_FORCE = slice(*UR_STATE_SENSOR_FORCE)
_STATUS_FLAG_MOVING = UR_STATUS_FLAG_MOVING
_STATUS_FLAG_CONTACT = UR_STATUS_FLAG_CONTACT
_STATUS_FLAG_DEADMAN = UR_STATUS_FLAG_DEADMAN
_STATUS_FLAG_DONE = UR_STATUS_FLAG_DONE
_STATUS_FLAG_GOAL_REACHED = UR_STATUS_FLAG_GOAL_REACHED
def __init__(self):
super().__init__(State._BUFFER_SIZE)
def time(self):
'''The time when the response was generated, in robot time'''
return self[State._TIME]
def cmd_id(self):
'''The id of the last drive command, set to client time when command was submitted.'''
return self[State._CMD_ID]
def is_moving(self):
'''True if the arm is stopped, false if the arm is moving.'''
return int(self[State._STATUS]) & State._STATUS_FLAG_MOVING != 0
def is_contact(self):
''' True if the arm is experiencing a force greater than threshold specified with the last move command.'''
return int(self[State._STATUS]) & State._STATUS_FLAG_CONTACT != 0
def is_deadman_switch_triggered(self):
'''True if the arm stopped because of a deadman switch, false otherwise.'''
return int(self[State._STATUS]) & State._STATUS_FLAG_DEADMAN != 0
def is_goal_reached(self):
''' True if the arm reached the goal established by the last move command, False otherwise. The flag is meaningful only when is_done() is also True'''
return int(self[State._STATUS]) & State._STATUS_FLAG_GOAL_REACHED != 0
def is_done(self):
''' True if the arm completed the last move command.'''
return int(self[State._STATUS]) & State._STATUS_FLAG_DONE != 0
def joint_positions(self):
'''The current actual joint angular position vector in rad : [Base, Shoulder, Elbow, Wrist1, Wrist2, Wrist3]'''
return Joints.fromarray(self[State._JOINT_POSITIONS], False)
def joint_speeds(self):
'''The current actual joint angular velocity vector in rad/s: [Base, Shoulder, Elbow, Wrist1, Wrist2, Wrist3]'''
return JointSpeeds.fromarray(self[State._JOINT_SPEEDS], False)
def tool_pose(self):
'''The current actual TCP vector : ([X, Y, Z, Rx, Ry, Rz])'''
return Tool.fromarray(self[State._TOOL_POSE], False)
def tool_speed(self):
'''The current actual TCP velocity vector; ([X, Y, Z, Rx, Ry, Rz])'''
return ToolSpeed.fromarray(self[State._TOOL_SPEED], False)
def target_joint_positions(self):
'''The current target joint angular position vector in rad: [Base, Shoulder, Elbow, Wrist1, Wrist2, Wrist3]'''
return Joints.fromarray(self[State._TARGET_JOINT_POSITIONS], False)
def target_joint_speeds(self):
'''The current target joint angular velocity vector in rad/s: [Base, Shoulder, Elbow, Wrist1, Wrist2, Wrist3]'''
return JointSpeeds.fromarray(self[State._TARGET_JOINT_SPEEDS], False)
def target_tool_pose(self):
'''The current target TCP vector; ([X, Y, Z, Rx, Ry, Rz])'''
return Tool.fromarray(self[State._TARGET_TOOL_POSE], False)
def target_tool_speed(self):
'''The TCP speed. The first three values are the cartesian speeds along x,y,z, and the last three define the current rotation axis, rx,ry,rz, and the length|rz,ry,rz|defines the angular velocity in radians/s'''
return ToolSpeed.fromarray(self[State._TARGET_TOOL_SPEED], False)
def tool_force(self):
'''Returns the wrench (Force/Torque vector) at the TCP.
The external wrench is computed based on the error between the joint torques required to stay on the trajectory and the expected joint torques.
The function returns ”p[Fx (N), Fy(N), Fz(N), TRx (Nm), TRy (Nm), TRz (Nm)]”,
where Fx, Fy, and Fx are the forces in the axes of the robot base coordinate system measured in Newtons, and TRx, TRy, and TRz are the torques around these axes measyred in Newton times Meters. '''
return Tool.fromarray(self[State._TOOL_FORCE], False)
def joint_torques(self):
'''The torque on the joints, corrected by the torque needed to move the robot itself (gravity, friction, etc.), returned as a vector of length 6.'''
return Joints.fromarray(self[State._JOINT_TORQUES], False)
def tool_acceleration(self):
'''The accelerometer reading, in the robot base coordinate system'''
return self[State._TOOL_ACCELERATION]
def sensor_force(self):
'''The forces and moments reported by the Force/Torque sensor mounted on the wrist, in the axes of the robot base coordinate system'''
return Tool.fromarray(self[State._SENSOR_FORCE], False)
def _set_state_flag(self, flag, value):
if value:
self[State._STATUS] = int(self[State._STATUS]) | flag
else:
self[State._STATUS] = int(self[State._STATUS]) & (0xFFFFFFFF ^ flag)
################################################################
## Arm commands
################################################################
class Command(Vec):
'''
Represents a command that can be sent to the arm.
The move target can be joint speeds, or an absolute position, specified either as a tool pose or as joint angles.
In either case, the arm stops when contact is detected, that is, if fabs(actual_force - expected_force) > max_force.
The type of reaction to contact is determined by the contact_handling parameter.
An optional motion controller choice can be specified using the controller parameter.
Other commands (read and config) can be initialized using the as_xxx methods.
'''
_BUFFER_SIZE = UR_CMD_ENTRIES_COUNT
_ID = UR_CMD_ID
_KIND = UR_CMD_KIND
_CONFIG_MASS = UR_CMD_CONFIG_MASS
_CONFIG_TOOL_COG = slice(*UR_CMD_CONFIG_TOOL_COG)
_CONFIG_TOOL_TIP = slice(*UR_CMD_CONFIG_TOOL_TIP)
_MOVE_TARGET = slice(*UR_CMD_MOVE_TARGET)
_MOVE_MAX_SPEED = UR_CMD_MOVE_MAX_SPEED
_MOVE_MAX_ACCELERATION = UR_CMD_MOVE_MAX_ACCELERATION
_MOVE_FORCE_LOW_BOUND = slice(*UR_CMD_MOVE_FORCE_LOW_BOUND)
_MOVE_FORCE_HIGH_BOUND = slice(*UR_CMD_MOVE_FORCE_HIGH_BOUND)
_MOVE_CONTACT_HANDLING = UR_CMD_MOVE_CONTACT_HANDLING
_MOVE_CONTROLLER = UR_CMD_MOVE_CONTROLLER
def __init__(self):
super().__init__(Command._BUFFER_SIZE)
self[Command._KIND] = UR_CMD_KIND_ESTOP
def make(self,
kind = UR_CMD_KIND_READ,
target=UR_ZERO,
max_speed=UR_DEFAULT_MAX_SPEED,
max_acc=UR_DEFAULT_ACCELERATION,
force_low_bound:Tool=UR_DEFAULT_FORCE_LOW_BOUND,
force_high_bound:Tool=UR_DEFAULT_FORCE_HIGH_BOUND,
contact_handling=0,
controller_flags=0):
self[Command._KIND] = kind
self[Command._MOVE_TARGET] = target
self[Command._MOVE_MAX_SPEED] = max_speed
self[Command._MOVE_MAX_ACCELERATION] =max_acc
self[Command._MOVE_FORCE_LOW_BOUND] = force_low_bound if force_low_bound is not None else UR_FORCE_IGNORE_LOW
self[Command._MOVE_FORCE_HIGH_BOUND] = force_high_bound if force_high_bound is not None else UR_FORCE_IGNORE_HIGH
self[Command._MOVE_CONTACT_HANDLING] = contact_handling
self[Command._MOVE_CONTROLLER] = controller_flags
return self
def id(self): return self[Command._ID]
def kind(self): return self[Command._KIND]
def target(self):
if self[Command._KIND] == UR_CMD_KIND_MOVE_TOOL_POSE or self[Command._KIND] == UR_CMD_KIND_MOVE_TOOL_LINEAR:
return Tool.fromarray(self[Command._MOVE_TARGET], False)
elif self[Command._KIND] == UR_CMD_KIND_MOVE_JOINT_POSITIONS:
return Joints.fromarray(self[Command._MOVE_TARGET], False)
return JointSpeeds.fromarray(self[Command._MOVE_TARGET], False)
def max_speed(self): return self[Command._MOVE_MAX_SPEED]
def max_acceleration(self): return self[Command._MOVE_MAX_ACCELERATION]
def force_low_bound(self): return Tool.fromarray(self[Command._MOVE_FORCE_LOW_BOUND], False)
def force_high_bound(self): return Tool.fromarray(self[Command._MOVE_FORCE_HIGH_BOUND], False)
def contact_handling(self): return self[Command._MOVE_CONTACT_HANDLING]
def controller_flags(self): return self[Command._MOVE_CONTROLLER]
def is_move_command(self): return self[Command._KIND] > UR_CMD_KIND_ESTOP and self[Command._KIND] < UR_CMD_KIND_READ
def _goal_reached(self, state):
if self[Command._KIND] == UR_CMD_KIND_MOVE_JOINT_SPEEDS:
return self.target().allclose(state.joint_speeds(), UR_SPEED_TOLERANCE)
elif self[Command._KIND] == UR_CMD_KIND_MOVE_JOINT_POSITIONS:
return self.target().allclose(state.joint_positions()) and not state.is_moving()
elif self[Command._KIND] == UR_CMD_KIND_MOVE_TOOL_POSE or self[Command._KIND] == UR_CMD_KIND_MOVE_TOOL_LINEAR:
return self.target().allclose(state.tool_pose()) and not state.is_moving()
else:
raise Exception("Invalid command type")
class Arm:
_READ_CMD = Command()
def __init__(self, controller):
self.controller = controller
self.command = Command()
self.state = State()
def __execute(self, blocking):
self.command[Command._ID] = time.time()
self.controller.execute(self.command, self.state)
while blocking and not self.state.is_done():
self.controller.execute(self.command, self.state)
def execute(self, command, blocking):
self.command[:] = command
self.__execute(blocking)
def step(self):
self.controller.execute(self.command, self.state)
def read(self):
self.execute(Arm._READ_CMD, False)
def get_inverse_kinematics(self, target_position):
if type(target_position) is not Tool:
raise TypeError('Argument target_position must be of type Tool.')
self.command.make(
kind = UR_CMD_KIND_IK_QUERY,
target = target_position)
self.__execute(blocking=False)
return self.state.target_joint_positions().clone()
def move(self,
target_position,
max_speed=UR_DEFAULT_MAX_SPEED,
max_acc=UR_DEFAULT_ACCELERATION,
force_low_bound=UR_DEFAULT_FORCE_LOW_BOUND,
force_high_bound=UR_DEFAULT_FORCE_HIGH_BOUND,
contact_handling=0,
controller_flags=0,
blocking=True):
if type(target_position) is Joints:
cmd_type = UR_CMD_KIND_MOVE_JOINT_POSITIONS
elif type(target_position) is Tool:
cmd_type = UR_CMD_KIND_MOVE_TOOL_POSE
elif type(target_position) is JointSpeeds:
cmd_type = UR_CMD_KIND_MOVE_JOINT_SPEEDS
else:
raise TypeError('Argument target_position must be of type Tool, Joints or JointSpeeds. '
'Use [Tool|Joints|JointSpeeds].fromarray() to wrap an existing array.')
self.command.make(
kind = cmd_type,
target = target_position,
max_speed=max_speed,
max_acc=max_acc,
force_low_bound=force_low_bound,
force_high_bound=force_high_bound,
contact_handling=contact_handling,
controller_flags=controller_flags)
self.__execute(blocking)
def speed(self,
target_speed,
acc=UR_DEFAULT_ACCELERATION,
force_low_bound=UR_DEFAULT_FORCE_LOW_BOUND,
force_high_bound=UR_DEFAULT_FORCE_HIGH_BOUND,
contact_handling=0,
controller_flags=0,
blocking=True):
if type(target_speed) is not JointSpeeds:
raise TypeError("Speed can only be specified | |
= self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'Nic'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/lans/{lanId}/nics/{nicId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_lans_nics_get(self, datacenter_id, lan_id, **kwargs): # noqa: E501
"""List Lan Members # noqa: E501
You can retrieve a list of nics attached to a lan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_lans_nics_get(datacenter_id, lan_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the datacenter (required)
:type datacenter_id: str
:param lan_id: The unique ID of the LAN (required)
:type lan_id: str
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param offset: the first element (of the total list of elements) to include in the response (use together with <code>limit</code> for pagination)
:type offset: int
:param limit: the maximum number of elements to return (use together with <code>offset</code> for pagination)
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: LanNics
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_lans_nics_get_with_http_info(datacenter_id, lan_id, **kwargs) # noqa: E501
def datacenters_lans_nics_get_with_http_info(self, datacenter_id, lan_id, **kwargs): # noqa: E501
"""List Lan Members # noqa: E501
You can retrieve a list of nics attached to a lan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_lans_nics_get_with_http_info(datacenter_id, lan_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the datacenter (required)
:type datacenter_id: str
:param lan_id: The unique ID of the LAN (required)
:type lan_id: str
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param offset: the first element (of the total list of elements) to include in the response (use together with <code>limit</code> for pagination)
:type offset: int
:param limit: the maximum number of elements to return (use together with <code>offset</code> for pagination)
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(LanNics, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'lan_id',
'pretty',
'depth',
'x_contract_number',
'offset',
'limit'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_lans_nics_get" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_lans_nics_get`") # noqa: E501
# verify the required parameter 'lan_id' is set
if self.api_client.client_side_validation and ('lan_id' not in local_var_params or # noqa: E501
local_var_params['lan_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `lan_id` when calling `datacenters_lans_nics_get`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_lans_nics_get`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_lans_nics_get`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `offset` when calling `datacenters_lans_nics_get`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 10000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `datacenters_lans_nics_get`, must be a value less than or equal to `10000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `datacenters_lans_nics_get`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'lan_id' in local_var_params:
path_params['lanId'] = local_var_params['lan_id'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'LanNics'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/lans/{lanId}/nics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_lans_nics_post(self, datacenter_id, lan_id, nic, **kwargs): # noqa: E501
"""Attach a nic # noqa: E501
This will attach a pre-existing nic to a lan. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_lans_nics_post(datacenter_id, lan_id, nic, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the datacenter (required)
:type datacenter_id: str
:param lan_id: The unique ID of the LAN (required)
:type lan_id: str
:param nic: Nic to be | |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally.task import service
from rally_openstack.common.services.network import net_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _args_adapter(arguments_map):
def wrapper(func):
def decorator(*args, **kwargs):
for source, dest in arguments_map.items():
if source in kwargs:
if dest in kwargs:
raise TypeError(
f"{func.__name__}() accepts either {dest} keyword "
f"argument or {source} but both were specified.")
kwargs[dest] = kwargs.pop(source)
return func(*args, **kwargs)
return decorator
return wrapper
_NETWORK_ARGS_MAP = {
"provider:network_type": "provider_network_type",
"provider:physical_network": "provider_physical_network",
"provider:segmentation_id": "provider_segmentation_id",
"router:external": "router_external"
}
def _create_network_arg_adapter():
"""A decorator for converting neutron's create kwargs to look pythonic."""
return _args_adapter(_NETWORK_ARGS_MAP)
class _NoneObj(object):
def __len__(self):
return 0
_NONE = _NoneObj()
def _clean_dict(**kwargs):
"""Builds a dict object from keyword arguments ignoring nullable values."""
return dict((k, v) for k, v in kwargs.items() if v != _NONE)
@service.service(service_name="neutron", service_type="network", version="2.0")
class NeutronService(service.Service):
"""A helper class for Neutron API"""
def __init__(self, *args, **kwargs):
super(NeutronService, self).__init__(*args, **kwargs)
self._cached_supported_extensions = None
self._client = None
@property
def client(self):
if self._client is None:
self._client = self._clients.neutron()
return self._client
def create_network_topology(
self, network_create_args=None,
router_create_args=None, router_per_subnet=False,
subnet_create_args=None, subnets_count=1, subnets_dualstack=False
):
"""Create net infrastructure(network, router, subnets).
:param network_create_args: A dict with creation arguments for a
network. The format is equal to the create_network method
:param router_create_args: A dict with creation arguments for an
external router that will add an interface to each created subnet.
The format is equal to the create_subnet method
In case of None value (default behaviour), no router is created.
:param router_per_subnet: whether or not to create router per subnet
or use one router for all subnets.
:param subnet_create_args: A dict with creation arguments for
subnets. The format is equal to the create_subnet method.
:param subnets_count: Number of subnets to create per network.
Defaults to 1
:param subnets_dualstack: Whether subnets should be of both IPv4 and
IPv6 (i.e first subnet will be created for IPv4, the second for
IPv6, the third for IPv4,..). If subnet_create_args includes one of
('cidr', 'start_cidr', 'ip_version') keys, subnets_dualstack
parameter will be ignored.
"""
subnet_create_args = dict(subnet_create_args or {})
network = self.create_network(**(network_create_args or {}))
subnet_create_args["network_id"] = network["id"]
routers = []
if router_create_args is not None:
for i in range(subnets_count if router_per_subnet else 1):
routers.append(self.create_router(**router_create_args))
subnets = []
ip_versions = itertools.cycle([4, 6] if subnets_dualstack else [4])
use_subnets_dualstack = (
"cidr" not in subnet_create_args
and "start_cidr" not in subnet_create_args
and "ip_version" not in subnet_create_args
)
for i in range(subnets_count):
if use_subnets_dualstack:
subnet_create_args["ip_version"] = next(ip_versions)
if routers:
if router_per_subnet:
router = routers[i]
else:
router = routers[0]
subnet_create_args["router_id"] = router["id"]
subnets.append(self.create_subnet(**subnet_create_args))
network["subnets"] = [s["id"] for s in subnets]
return {
"network": network,
"subnets": subnets,
"routers": routers
}
def delete_network_topology(self, topo):
"""Delete network topology
This method was developed to provide a backward compatibility with old
neutron helpers. It is not recommended way and we suggest to use
cleanup manager instead.
:param topo: Network topology as create_network_topology returned
"""
for router in topo["routers"]:
self.remove_gateway_from_router(router["id"])
network_id = topo["network"]["id"]
for port in self.list_ports(network_id=network_id):
self.delete_port(port)
for subnet in self.list_subnets(network_id=network_id):
self.delete_subnet(subnet["id"])
self.delete_network(network_id)
for router in topo["routers"]:
self.delete_router(router["id"])
@atomic.action_timer("neutron.create_network")
@_create_network_arg_adapter()
def create_network(self,
project_id=_NONE,
admin_state_up=_NONE,
dns_domain=_NONE,
mtu=_NONE,
port_security_enabled=_NONE,
provider_network_type=_NONE,
provider_physical_network=_NONE,
provider_segmentation_id=_NONE,
qos_policy_id=_NONE,
router_external=_NONE,
segments=_NONE,
shared=_NONE,
vlan_transparent=_NONE,
description=_NONE,
availability_zone_hints=_NONE):
"""Create neutron network.
:param project_id: The ID of the project that owns the resource. Only
administrative and users with advsvc role can specify a project ID
other than their own. You cannot change this value through
authorization policies.
:param admin_state_up: The administrative state of the network,
which is up (true) or down (false).
:param dns_domain: A valid DNS domain.
:param mtu: The maximum transmission unit (MTU) value to address
fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6.
:param port_security_enabled: The port security status of the network.
Valid values are enabled (true) and disabled (false). This value is
used as the default value of port_security_enabled field of a
newly created port.
:param provider_network_type: The type of physical network that this
network should be mapped to. For example, flat, vlan, vxlan,
or gre. Valid values depend on a networking back-end.
:param provider_physical_network: The physical network where this
network should be implemented. The Networking API v2.0 does not
provide a way to list available physical networks.
For example, the Open vSwitch plug-in configuration file defines
a symbolic name that maps to specific bridges on each compute host.
:param provider_segmentation_id: The ID of the isolated segment on the
physical network. The network_type attribute defines the
segmentation model. For example, if the network_type value is vlan,
this ID is a vlan identifier. If the network_type value is gre,
this ID is a gre key.
:param qos_policy_id: The ID of the QoS policy associated with the
network.
:param router_external: Indicates whether the network has an external
routing facility that’s not managed by the networking service.
:param segments: A list of provider segment objects.
:param shared: Indicates whether this resource is shared across all
projects. By default, only administrative users can change
this value.
:param vlan_transparent: Indicates the VLAN transparency mode of the
network, which is VLAN transparent (true) or not VLAN
transparent (false).
:param description: A human-readable description for the resource.
Default is an empty string.
:param availability_zone_hints: The availability zone candidate for
the network.
:returns: neutron network dict
"""
body = _clean_dict(
name=self.generate_random_name(),
tenant_id=project_id,
admin_state_up=admin_state_up,
dns_domain=dns_domain,
mtu=mtu,
port_security_enabled=port_security_enabled,
qos_policy_id=qos_policy_id,
segments=segments,
shared=shared,
vlan_transparent=vlan_transparent,
description=description,
availability_zone_hints=availability_zone_hints,
**{
"provider:network_type": provider_network_type,
"provider:physical_network": provider_physical_network,
"provider:segmentation_id": provider_segmentation_id,
"router:external": router_external
}
)
resp = self.client.create_network({"network": body})
return resp["network"]
@atomic.action_timer("neutron.show_network")
def get_network(self, network_id, fields=_NONE):
"""Get network by ID
:param network_id: Network ID to fetch data for
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
resp = self.client.show_network(network_id, **body)
return resp["network"]
def find_network(self, network_id_or_name):
"""Find network by identifier (id or name)
:param network_id_or_name: Network ID or name
"""
for net in self.list_networks():
if network_id_or_name in (net["name"], net["id"]):
return net
raise exceptions.GetResourceFailure(
resource="network",
err=f"no name or id matches {network_id_or_name}")
@atomic.action_timer("neutron.update_network")
@_create_network_arg_adapter()
def update_network(self,
network_id,
name=_NONE,
admin_state_up=_NONE,
dns_domain=_NONE,
mtu=_NONE,
port_security_enabled=_NONE,
provider_network_type=_NONE,
provider_physical_network=_NONE,
provider_segmentation_id=_NONE,
qos_policy_id=_NONE,
router_external=_NONE,
segments=_NONE,
shared=_NONE,
description=_NONE,
is_default=_NONE):
"""Update neutron network.
:param network_id: ID of the network to update
:param name: Human-readable name of the network.
:param admin_state_up: The administrative state of the network,
which is up (true) or down (false).
:param dns_domain: A valid DNS domain.
:param mtu: The maximum transmission unit (MTU) value to address
fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6.
:param port_security_enabled: The port security status of the network.
Valid values are enabled (true) and disabled (false). This value is
used as the default value of port_security_enabled field of a
newly created port.
:param provider_network_type: The type of physical network that this
network should be mapped to. For example, flat, vlan, vxlan,
or gre. Valid values depend on a networking back-end.
:param provider_physical_network: The physical network where this
network should be implemented. The Networking API v2.0 does not
provide a way to list available physical networks.
For example, the Open vSwitch plug-in configuration file defines
a symbolic name that maps to specific bridges on each compute host.
:param provider_segmentation_id: The ID of the isolated segment on the
physical network. The network_type attribute defines the
segmentation model. For example, if the network_type value is vlan,
this ID is a vlan identifier. If the network_type value is gre,
this ID is | |
import logging
import json
import os
import re
import time
from subprocess import CalledProcessError
from .system import exec_shell
class CloudCliError(Exception):
pass
class AWSCli:
def __init__(self, bin_path=None):
# aws CLI supported versions interval
self.min_version = (0, 0, 0)
self.max_version = (1, 19, 18)
# Path to look up for executable
self.bin_path = None
# Force aws CLI binary path if bin_path exists and contains
# aws file.
if bin_path is not None and os.path.exists(bin_path):
if os.path.exists(os.path.join(bin_path, 'aws')):
self.bin_path = bin_path
pass
def check_version(self):
"""
Verify aws CLI version, based on the interval formed by min_version and
max_version.
aws CLI version is fetched using the command: aws --version
"""
try:
output = exec_shell([self.bin("aws"), "--version"])
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise Exception(
"aws CLI executable seems to be missing. Please install it or "
"check your PATH variable"
)
version = None
# Parse command output and extract the version number
pattern = re.compile(r"^aws-cli\/([0-9]+)\.([0-9]+)\.([0-9]+) ")
for line in output.decode("utf-8").split("\n"):
m = pattern.search(line)
if m:
version = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
break
if version is None:
raise Exception("Unable to parse aws CLI version")
logging.info("aws CLI version: %s", '.'.join(map(str, version)))
# Verify if the version fetched is supported
for i in range(0, 3):
min = self.min_version[i]
max = self.max_version[i]
if version[i] < max:
# If current digit is below the maximum value, no need to
# check others digits, we are good
break
if version[i] not in list(range(min, max + 1)):
raise Exception(
("aws CLI version %s not supported, must be between %s and"
" %s") % (
'.'.join(map(str, version)),
'.'.join(map(str, self.min_version)),
'.'.join(map(str, self.max_version)),
)
)
def bin(self, binary):
"""
Return binary's path
"""
if self.bin_path is not None:
return os.path.join(self.bin_path, binary)
else:
return binary
def check_instance_type_availability(self, instance_type, region):
try:
output = exec_shell([
self.bin("aws"),
"ec2",
"describe-instance-type-offerings",
"--location-type availability-zone",
"--filters Name=instance-type,Values=%s" % instance_type,
"--region %s" % region,
"--output json"
])
result = json.loads(output.decode("utf-8"))
logging.debug("Command output: %s", result)
if len(result["InstanceTypeOfferings"]) == 0:
raise CloudCliError(
"Instance type %s not available in region %s"
% (instance_type, region)
)
except ValueError:
# JSON decoding error
logging.error("Failed to decode JSON data")
logging.error("Output: %s", output.decode("utf-8"))
raise CloudCliError(
"Failed to decode JSON data, please check the logs for details"
)
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise CloudCliError(
"Failed to execute the following command, please check the "
"logs for details: %s" % e.cmd
)
def get_image_id(self, image, region):
try:
output = exec_shell([
self.bin("aws"),
"ec2",
"describe-images",
"--filters Name=name,Values=\"%s\"" % image,
"--query 'sort_by(Images, &Name)[-1]'",
"--region %s" % region,
"--output json"
])
result = json.loads(output.decode("utf-8"))
logging.debug("Command output: %s", result)
if result.get('State') == 'available':
return result.get('ImageId')
except ValueError:
# JSON decoding error
logging.error("Failed to decode JSON data")
logging.error("Output: %s", output.decode("utf-8"))
raise CloudCliError(
"Failed to decode JSON data, please check the logs for details"
)
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise CloudCliError(
"Failed to execute the following command, please check the "
"logs for details: %s" % e.cmd
)
def check_instances_availability(self, region):
try:
output = exec_shell([
self.bin("aws"),
"ec2",
"wait",
"instance-status-ok",
"--region %s" % region
])
logging.debug("Command output: %s", output.decode("utf-8"))
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise CloudCliError(
"Failed to execute the following command, please check the "
"logs for details: %s" % e.cmd
)
class AWSRDSCli(AWSCli):
def check_instance_type_availability(self, instance_type, region):
try:
output = exec_shell([
self.bin("aws"),
"rds",
"describe-reserved-db-instances-offerings",
"--product-description postgresql",
"--region %s" % region,
"--db-instance-class %s" % instance_type,
"--output json"
])
result = json.loads(output.decode("utf-8"))
logging.debug("Command output: %s", result)
if len(result["ReservedDBInstancesOfferings"]) == 0:
raise CloudCliError(
"Instance type %s not available in region %s"
% (instance_type, region)
)
except ValueError:
# JSON decoding error
logging.error("Failed to decode JSON data")
logging.error("Output: %s", output.decode("utf-8"))
raise CloudCliError(
"Failed to decode JSON data, please check the logs for details"
)
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise CloudCliError(
"Failed to execute the following command, please check the "
"logs for details: %s" % e.cmd
)
class AWSRDSAuroraCli(AWSRDSCli):
pass
class AzureCli:
def __init__(self, bin_path=None):
# azure CLI supported versions interval
self.min_version = (0, 0, 0)
self.max_version = (2, 20, 0)
# Path to look up for executable
self.bin_path = None
# Force azure CLI binary path if bin_path exists and contains
# az file.
if bin_path is not None and os.path.exists(bin_path):
if os.path.exists(os.path.join(bin_path, 'az')):
self.bin_path = bin_path
def check_version(self):
"""
Verify azure CLI version, based on the interval formed by min_version and
max_version.
azure CLI version is fetched using the command: az --version
"""
try:
output = exec_shell([self.bin("az"), "--version"])
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise Exception(
"azure CLI executable seems to be missing. Please install it or "
"check your PATH variable"
)
version = None
# Parse command output and extract the version number
pattern = re.compile(r"^azure-cli\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for line in output.decode("utf-8").split("\n"):
m = pattern.search(line)
if m:
version = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
break
if version is None:
raise Exception("Unable to parse azure CLI version")
logging.info("azure CLI version: %s", '.'.join(map(str, version)))
# Verify if the version fetched is supported
for i in range(0, 3):
min = self.min_version[i]
max = self.max_version[i]
if version[i] < max:
# If current digit is below the maximum value, no need to
# check others digits, we are good
break
if version[i] not in list(range(min, max + 1)):
raise Exception(
("azure CLI version %s not supported, must be between %s and"
" %s") % (
'.'.join(map(str, version)),
'.'.join(map(str, self.min_version)),
'.'.join(map(str, self.max_version)),
)
)
def bin(self, binary):
"""
Return binary's path
"""
if self.bin_path is not None:
return os.path.join(self.bin_path, binary)
else:
return binary
def check_instance_type_availability(self, instance_type, region):
try:
output = exec_shell([
self.bin("az"),
"vm",
"list-sizes",
"--location %s" % region,
"--query \"[?name == '%s']\"" % instance_type,
"--output json"
])
result = json.loads(output.decode("utf-8"))
logging.debug("Command output: %s", result)
if len(result) == 0:
raise CloudCliError(
"Instance type %s not available in region %s"
% (instance_type, region)
)
except ValueError:
# JSON decoding error
logging.error("Failed to decode JSON data")
logging.error("Output: %s", output.decode("utf-8"))
raise CloudCliError(
"Failed to decode JSON data, please check the logs for details"
)
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise CloudCliError(
"Failed to execute the following command, please check the "
"logs for details: %s" % e.cmd
)
def check_image_availability(self, publisher, offer, sku, region):
try:
output = exec_shell([
self.bin("az"),
"vm",
"image",
"list",
"--all",
"-p \"%s\"" % publisher,
"-f \"%s\"" % offer,
"-s \"%s\"" % sku,
"-l %s" % region,
"--query",
"\"[?offer == '%s' && sku =='%s']\"" % (offer, sku),
"--output json"
])
result = json.loads(output.decode("utf-8"))
logging.debug("Command output: %s", result)
if len(result) == 0:
raise CloudCliError(
"Image %s:%s:%s not available in region %s"
% (publisher, offer, sku, region)
)
except ValueError:
# JSON decoding error
logging.error("Failed to decode JSON data")
logging.error("Output: %s", output.decode("utf-8"))
raise CloudCliError(
"Failed to decode JSON data, please check the logs for details"
)
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise CloudCliError(
"Failed to execute the following command, please check the "
"logs for details: %s" % e.cmd
)
def check_instances_availability(self, project_name):
try:
output = exec_shell([
self.bin("az"),
"vm",
"wait",
"--ids",
"$(%s vm list -g \"%s_edb_resource_group\" --query \"[].id\" -o tsv)"
% (self.bin("az"), project_name),
"--created"
])
logging.debug("Command output: %s", output.decode("utf-8"))
except CalledProcessError as e:
logging.error("Failed to execute the command: %s", e.cmd)
logging.error("Return code is: %s", e.returncode)
logging.error("Output: %s", e.output)
raise CloudCliError(
"Failed to execute the following command, please check the "
"logs for details: %s" % e.cmd
)
class GCloudCli:
def __init__(self, bin_path=None):
# gcloud CLI supported versions interval
self.min_version = (0, | |
<reponame>dhill2522/HERON
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
"""
pyomo-based dispatch strategy
"""
import os
import sys
import time as time_mod
from functools import partial
import platform
import numpy as np
import pyomo.environ as pyo
from pyomo.opt import SolverStatus, TerminationCondition
# allows pyomo to solve on threaded processes
import pyutilib.subprocess.GlobalData
pyutilib.subprocess.GlobalData.DEFINE_SIGNAL_HANDLERS_DEFAULT = False
from .Dispatcher import Dispatcher
from .DispatchState import DispatchState, NumpyState
try:
import _utils as hutils
except (ModuleNotFoundError, ImportError):
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import _utils as hutils
# Choose solver; CBC is a great choice unless we're on Windows
if platform.system() == 'Windows':
SOLVER = 'glpk'
else:
SOLVER = 'cbc'
class Pyomo(Dispatcher):
"""
Dispatches using rolling windows in Pyomo
"""
naming_template = {'comp prod': '{comp}|{res}|prod',
'comp transfer': '{comp}|{res}|trans',
'comp max': '{comp}|{res}|max',
'comp ramp up': '{comp}|{res}|rampup',
'comp ramp down': '{comp}|{res}|rampdown',
'conservation': '{res}|consv',
}
### INITIALIZATION
@classmethod
def get_input_specs(cls):
"""
Set acceptable input specifications.
@ In, None
@ Out, specs, InputData, specs
"""
specs = InputData.parameterInputFactory('Dispatcher', ordered=False, baseNode=None)
# TODO specific for pyomo dispatcher
return specs
def __init__(self):
"""
Constructor.
@ In, None
@ Out, None
"""
self.name = 'PyomoDispatcher' # identifying name
self._window_len = 24 # time window length to dispatch at a time # FIXME user input
def read_input(self, specs):
"""
Read in input specifications.
@ In, specs, RAVEN InputData, specifications
@ Out, None
"""
print('DEBUGG specs:', specs)
### API
def dispatch(self, case, components, sources, meta):
"""
Performs dispatch.
@ In, case, HERON Case, Case that this dispatch is part of
@ In, components, list, HERON components available to the dispatch
@ In, sources, list, HERON source (placeholders) for signals
@ In, meta, dict, additional variables passed through
@ Out, disp, DispatchScenario, resulting dispatch
"""
t_start, t_end, t_num = self.get_time_discr()
time = np.linspace(t_start, t_end, t_num) # Note we don't care about segment/cluster here
resources = sorted(list(hutils.get_all_resources(components))) # list of all active resources
# pre-build results structure
## we can use NumpyState here so we don't need to worry about a Pyomo model object
dispatch = NumpyState()# dict((comp.name, dict((res, np.zeros(len(time))) for res in comp.get_resources())) for comp in components)
dispatch.initialize(components, meta['HERON']['resource_indexer'], time)
# rolling window
start_index = 0
final_index = len(time)
# TODO window overlap! ( )[ ] -> ( [ ) ]
while start_index < final_index:
end_index = start_index + self._window_len
if end_index > final_index:
end_index = final_index # + 1?
specific_time = time[start_index:end_index]
print('DEBUGG starting window {} to {}'.format(start_index, end_index))
start = time_mod.time()
subdisp = self.dispatch_window(specific_time,
case, components, sources, resources,
meta)
end = time_mod.time()
print('DEBUGG solve time: {} s'.format(end-start))
# store result in corresponding part of dispatch
for comp in components:
for res, values in subdisp[comp.name].items():
dispatch.set_activity_vector(comp, res, start_index, end_index, values)
start_index = end_index
return dispatch
### INTERNAL
def dispatch_window(self, time,
case, components, sources, resources,
meta):
"""
Dispatches one part of a rolling window.
@ In, time, np.array, value of time to evaluate
@ In, case, HERON Case, Case that this dispatch is part of
@ In, components, list, HERON components available to the dispatch
@ In, sources, list, HERON source (placeholders) for signals
@ In, resources, list, sorted list of all resources in problem
@ In, meta, dict, additional variables passed through
@ Out, result, dict, results of window dispatch
"""
# build the Pyomo model
# TODO abstract this model as much as possible BEFORE, then concrete initialization per window
m = pyo.ConcreteModel()
# indices
C = np.arange(0, len(components), dtype=int) # indexes component
R = np.arange(0, len(resources), dtype=int) # indexes resources
# T = np.arange(start_index, end_index, dtype=int) # indexes resources
T = np.arange(0, len(time), dtype=int) # indexes resources
m.C = pyo.Set(initialize=C)
m.R = pyo.Set(initialize=R)
m.T = pyo.Set(initialize=T)
m.Times = time
m.resource_index_map = meta['HERON']['resource_indexer'] # maps the resource to its index WITHIN APPLICABLE components (sparse matrix)
# e.g. component: {resource: local index}, ... etc}
# properties
m.Case = case
m.Components = components
m.Activity = PyomoState()
m.Activity.initialize(m.Components, m.resource_index_map, m.Times, m)
# constraints and variables
for comp in components:
# NOTE: "fixed" components could hypothetically be treated differently
## however, in order for the "production" variable for components to be treatable as the
## same as other production variables, we create components with limitation
## lowerbound == upperbound == capacity (just for "fixed" dispatch components)
prod_name = self._create_production(m, comp) # variables
self._create_capacity(m, comp, prod_name) # capacity constraints
self._create_transfer(m, comp, prod_name) # transfer functions (constraints)
# ramp rates TODO ## INCLUDING previous-time boundary condition TODO
self._create_conservation(m, resources) # conservation of resources (e.g. production == consumption)
self._create_objective(meta, m) # objective
# start a solution search
done_and_checked = False
attempts = 0
while not done_and_checked:
attempts += 1
print(f'DEBUGG solve attempt {attempts} ...:')
# solve
soln = pyo.SolverFactory(SOLVER).solve(m)
# check solve status
if soln.solver.status == SolverStatus.ok and soln.solver.termination_condition == TerminationCondition.optimal:
print('DEBUGG ... solve was successful!')
else:
print('DEBUGG ... solve was unsuccessful!')
print('DEBUGG ... status:', soln.solver.status)
print('DEBUGG ... termination:', soln.solver.termination_condition)
raise RuntimeError
# try validating
print('DEBUGG ... validating ...')
validation_errs = self.validate(m.Components, m.Activity, m.Times)
if validation_errs:
done_and_checked = False
print('DEBUGG ... validation concerns raised:')
for e in validation_errs:
print('DEBUGG ... ... Time {t} ({time}) Component "{c}" Resource "{r}": {m}'
.format(t=e['time_index'],
time=e['time'],
c=e['component'].name,
r=e['resource'],
m=e['msg']))
self._create_production_limit(m, e)
# go back and solve again
# raise NotImplementedError('Validation failed, but idk how to handle that yet')
else:
print('DEBUGG Solve successful and no validation concerns raised.')
done_and_checked = True
if attempts > 100:
raise RuntimeError('Exceeded validation attempt limit!')
#soln.write() # DEBUGG
self._debug_print_soln(m) # DEBUGG
# return dict of numpy arrays
result = self._retrieve_solution(m)
return result
### PYOMO Element Constructors
def _create_production_limit(self, m, validation):
"""
Creates pyomo production constraint given validation errors
@ In, m, pyo.ConcreteModel, associated model
@ In, validation, dict, information from Validator about limit violation
@ Out, None
"""
# TODO could validator write a symbolic expression on request? That'd be sweet.
comp = validation['component']
resource = validation['resource']
r = m.resource_index_map[comp][resource]
t = validation['time_index']
limit = validation['limit']
limit_type = validation['limit_type']
prod_name = f'{comp.name}_production'
rule = partial(self._prod_limit_rule, prod_name, r, limit, limit_type, t)
constr = pyo.Constraint(rule=rule)
counter = 1
name_template = '{c}_{r}_{t}_vld_limit_constr_{{i}}'.format(c=comp.name,
r=resource,
t=t)
# make sure we get a unique name for this constraint
name = name_template.format(i=counter)
while getattr(m, name, None) is not None:
counter += 1
name = name_template.format(i=counter)
setattr(m, name, constr)
print(f'DEBUGG added validation constraint "{name}"')
def _create_production(self, m, comp):
"""
Creates production pyomo variable object for a component
@ In, m, pyo.ConcreteModel, associated model
@ In, comp, HERON Component, component to make production variables for
@ Out, prod_name, str, name of production variable
"""
name = comp.name
# create pyomo indexer for this component's resources
res_indexer = pyo.Set(initialize=range(len(m.resource_index_map[comp])))
setattr(m, f'{name}_res_index_map', res_indexer)
# production variable depends on resource, time
# # TODO if transfer function is linear, isn't this technically redundant? Maybe only need one resource ...
## Method 1: set variable bounds directly --> not working! why??
#lower, upper, domain = self._get_prod_bounds(comp)
#prod = pyo.Var(res_indexer, m.T, bounds=(lower, upper)) #within=domain,
## Method 2: set capacity as a seperate constraint
prod = pyo.Var(res_indexer, m.T, initialize=0)
prod_name = '{c}_production'.format(c=name)
setattr(m, prod_name, prod)
return prod_name
def _create_capacity(self, m, comp, prod_name):
"""
Creates pyomo capacity constraints
@ In, m, pyo.ConcreteModel, associated model
@ In, comp, HERON Component, component to make variables for
@ In, prod_name, str, name of production variable
@ Out, None
"""
name = comp.name
cap_res = comp.get_capacity_var() # name of resource that defines capacity
r = m.resource_index_map[comp][cap_res] # production index of the governing resource
# production is always lower than capacity
## NOTE get_capacity returns (data, meta) and data is dict
## TODO does this work with, e.g., ARMA-based capacities?
### -> "time" is stored on "m" and could be used to correctly evaluate the capacity
cap = comp.get_capacity(None, None, None, None)[0][cap_res] # value of capacity limit (units of governing resource)
rule = partial(self._capacity_rule, prod_name, r, cap)
constr = pyo.Constraint(m.T, rule=rule)
setattr(m, '{c}_{r}_capacity_constr'.format(c=name, r=cap_res), constr)
# minimum production
print('DEBUGG dispatchable?', comp.name, comp.is_dispatchable())
if comp.is_dispatchable() == 'fixed':
minimum = cap
var = getattr(m, prod_name)
values = var.get_values()
for k in values:
values[k] = cap
var.set_values(values)
else:
minimum = | |
<gh_stars>1-10
# coding=utf-8
"""
Module responsible for the definition of functions that convert Raw units (available in the
acquisition files returned by OpenSignals) and sample units to physical units like mV, A, ºC,
s,..., accordingly to the sensor under analysis.
Available Functions
-------------------
[Public]
raw_to_phy
Function that converts each sample value in raw units to a physical unit taking into account
the respective transfer function for the sensor and device specified as an input.
generate_time
Considering the acquisition sampling rate and the number of samples that compose
the signal, this function will return a time axis in seconds.
Observations/Comments
---------------------
None
/\
"""
import numpy
from .aux_functions import _is_a_url, _generate_download_google_link
from .load import load
def raw_to_phy(sensor, device, raw_signal, resolution, option):
"""
-----
Brief
-----
Function for converting raw units to physical units.
-----------
Description
-----------
Each sensor and device has a specific transfer function that models the inputs to outputs. This transfer function
is, thus, used in order to convert the raw units that are measured to physical units that originated the data.
This functions makes the conversion of raw units to physical units, using the information of sensor and device.
----------
Parameters
----------
sensor : str
Sensor label:
- "ECG"
- "EMG"
- "TEMP"
- "BVP"
- "SpO2.HEAD"
- "SpO2.FING"
- "SpO2.ARM"
- "EEG"
- "EDA"
device : str
PLUX device label:
- "bioplux"
- "bioplux_exp"
- "biosignalsplux"
- "rachimeter"
- "channeller"
- "swifter"
- "ddme_openbanplux"
raw_signal : list
Raw signal samples.
resolution : int
Resolution selected during acquisition.
option : str (optional)
Output units (only available in certain sensors):
- "mV"
- "V"
- "C" (Celsius)
- "K" (Kelvin)
- "Ohm"
- "A"
- "uA"
(When is not applicable a warning message is raised).
Returns
-------
out : list
Signal in the new scale.
"""
raw_signal = numpy.array(raw_signal)
# Check if resolution has the correct data format.
if not isinstance(resolution, int) and not isinstance(resolution, numpy.int32):
raise RuntimeError("The specified resolution needs to be an integer.")
out = None
if sensor == "TEMP":
vcc = 3.0
available_dev_1 = ["bioplux", "bioplux_exp", "biosignalsplux", "rachimeter", "channeller",
"swifter", "ddme_openbanplux"]
available_dev_2 = ["bitalino", "bitalino_rev", "bitalino_riot"]
if option == "Ohm":
if device in available_dev_1:
out = (1e4 * raw_signal) / (2**resolution - raw_signal)
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
elif option == "K":
a_0 = 1.12764514e-3
a_1 = 2.34282709e-4
a_2 = 8.77303013e-8
out = 1 / (a_0 + a_1 * numpy.log(raw_to_phy(sensor, device, list(raw_signal),
resolution, option="Ohm")) + a_2 *
((numpy.log(raw_to_phy(sensor, device, list(raw_signal), resolution,
option="Ohm"))) ** 3))
elif option == "C":
if device in available_dev_1:
out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,
option="K")) - 273.15
elif device in available_dev_2:
out = ((raw_signal / (2 ** resolution)) * vcc - 0.5) * 100
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
else:
raise RuntimeError("The selected output unit is invalid for the sensor under analysis.")
elif sensor == "EMG":
available_dev_1 = ["bioplux", "bioplux_exp", "biosignalsplux", "rachimeter", "channeller",
"swifter", "ddme_openbanplux"]
available_dev_2 = ["bitalino"]
available_dev_3 = ["bitalino_rev", "bitalino_riot"]
if option == "mV":
if device in available_dev_1:
vcc = 3.0
offset = 0.5
gain = 1
elif device in available_dev_2:
vcc = 3.3
offset = 0.5
gain = 1.008
elif device in available_dev_3:
vcc = 3.3
offset = 0.5
gain = 1.009
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
out = (raw_signal * vcc / (2 ** resolution) - vcc * offset) / gain
elif option == "V":
out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,
option="mV")) / 1000
else:
raise RuntimeError("The selected output unit is invalid for the sensor under analysis.")
elif sensor == "ECG":
available_dev_1 = ["bioplux", "bioplux_exp", "biosignalsplux", "rachimeter", "channeller",
"swifter", "ddme_openbanplux"]
available_dev_2 = ["bitalino", "bitalino_rev", "bitalino_riot"]
if option == "mV":
if device in available_dev_1:
vcc = 3.0
offset = 0.5
gain = 1.019
elif device in available_dev_2:
vcc = 3.3
offset = 0.5
gain = 1.1
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
out = (raw_signal * vcc / (2 ** resolution) - vcc * offset) / gain
elif option == "V":
out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,
option="mV")) / 1000
else:
raise RuntimeError("The selected output unit is invalid for the sensor under analysis.")
elif sensor == "BVP":
available_dev_1 = ["bioplux", "bioplux_exp", "biosignalsplux", "rachimeter", "channeller",
"swifter", "ddme_openbanplux"]
if option == "uA":
vcc = 3.0
if device in available_dev_1:
offset = 0
gain = 0.190060606
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
out = (raw_signal * vcc / (2 ** resolution) - vcc * offset) / gain
elif option == "A":
out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,
option="uA")) * 1e-6
else:
raise RuntimeError("The selected output unit is invalid for the sensor under analysis.")
elif sensor in ["SpO2.ARM", "SpO2.HEAD", "SpO2.FING"]:
available_dev_1 = ["channeller", "biosignalsplux", "swifter"]
scale_factor = None
if "ARM" in sensor or "FING" in sensor:
scale_factor = 1.2
elif "HEAD" in sensor:
scale_factor = 0.15
if option == "uA":
if device in available_dev_1:
out = scale_factor * (raw_signal / (2 ** resolution))
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
elif option == "A":
out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,
option="uA")) * 1e-6
else:
raise RuntimeError("The selected output unit is invalid for the sensor under analysis.")
elif sensor == "ACC":
available_dev_1 = ["bioplux", "bioplux_exp", "biosignalsplux", "rachimeter", "channeller",
"swifter", "ddme_openbanplux"]
if option == "g":
if device in available_dev_1:
Cm = 28000.0
CM = 38000.0
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
out = 2.0*((2**(16.0 - resolution) * raw_signal - Cm) / (CM - Cm)) - 1.0
else:
raise RuntimeError("The selected output unit is invalid for the sensor under analysis.")
elif sensor == "EEG":
available_dev_1 = ["bioplux", "bioplux_exp", "biosignalsplux", "rachimeter", "channeller", "swifter",
"ddme_openbanplux"]
available_dev_2 = ["bitalino_rev", "bitalino_riot"]
if option == "uV":
if device in available_dev_1:
vcc = 3.0
offset = 0.5
gain = 0.041990
elif device in available_dev_2:
vcc = 3.3
offset = 0.5
gain = 0.040000
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
out = ((raw_signal * vcc / (2 ** resolution)) - vcc * offset) / gain
elif option == "V":
out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,
option="uV")) * 1e6
else:
raise RuntimeError("The selected output unit is invalid for the sensor under analysis.")
elif sensor == "EDA":
available_dev_1 = ["bioplux", "bioplux_exp", "biosignalsplux", "rachimeter", "channeller", "swifter",
"biosignalspluxsolo"]
available_dev_2 = ["bitalino"]
available_dev_3 = ["bitalino_rev", "bitalino_riot"]
if option == "uS":
if device in available_dev_1:
vcc = 3.0
offset = 0
gain = 0.12
elif device in available_dev_2:
return 1.0 / (1.0 - (raw_signal / (2 ** resolution)))
elif device in available_dev_3:
vcc = 3.3
offset = 0
gain = 0.132
else:
raise RuntimeError("The output specified unit does not have a defined transfer "
"function for the used device.")
out = ((raw_signal * vcc / (2 ** resolution)) - vcc * offset) / gain
elif option == "S":
out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,
option="uS")) * 1e6
else:
raise RuntimeError("The selected output unit is invalid for the sensor under analysis.")
else:
raise RuntimeError("The specified sensor is not valid or for now is not available for unit "
"conversion.")
return out
def generate_time(signal, sample_rate=1000):
"""
-----
Brief
-----
Function intended to generate a time axis of the input signal.
-----------
Description
-----------
The time axis generated by the acquisition process originates a set of consecutive values that represents the
advancement of time, but does not have specific units.
Once the acquisitions are made with specific sampling frequencies, it is possible to calculate the time instant
of each sample by multiplying that value by the sampling frequency.
The current function maps the values |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.