input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import random, copy
import matplotlib.cm as cm
import itertools
import scipy.stats
import math
import statistics
import pysan.multisequence as pysan_ms
from itertools import combinations
random.seed('1<PASSWORD>')
def generate_sequence(length, alphabet):
"""
Generates a random sequence of a given length, given an alphabet of elements.
This is useful for benchmarking function performance, and creating examples in the docs.
Example
--------
>>> ps.generate_sequence(12, [1,2,3])
[2, 3, 3, 3, 2, 2, 2, 1, 3, 3, 2, 2]
"""
return [random.choice(alphabet) for x in range(length)]
def full_analysis(sequence):
"""
UC Computes a collection of information on a given sequence plus a collection of plots.
"""
details = describe(sequence)
sequence_plot = plot_sequence(sequence)
tm = plot_transition_matrix(sequence)
element_counts = get_element_counts(sequence)
element_prevalence = plot_element_counts(sequence)
bigrams = plot_ngram_counts(sequence, 2)
trigrams = plot_ngram_counts(sequence, 3)
print(details)
print(element_counts, element_prevalence)
sequence_plot.show()
tm.show()
bigrams.show()
trigrams.show()
return None
def describe(sequence):
"""
Computes descriptive properties of a given sequence, returning a dictionary containing the keys:
{'length', 'alphabet', 'sequence_universe', 'unique_bigrams', 'bigram_universe', 'entropy'}.
Example
---------
>>> sequence = [1,1,2,1,2,2,3,4,2]
>>> ps.describe(sequence) #doctest: +NORMALIZE_WHITESPACE
{'length': 9,
'alphabet': {1, 2, 3, 4},
'is_recurrent': True,
'entropy': 0.8763576394898526,
'complexity': 0.6885628567541515,
'turbulence': 7.868228975239414,
'element_counts': {1: 3, 2: 4, 3: 1, 4: 1},
'first_positions': {1: 0, 2: 2, 3: 6, 4: 7},
'ntransitions': 6,
'sequence_universe': 262144,
'distinct_subsequences': 175,
'unique_bigrams': 7,
'bigram_universe': 16,
'longest_spell': {'element': 1, 'count': 2, 'start': 0}}
"""
details = {
'length': len(sequence),
'alphabet': get_alphabet(sequence),
'is_recurrent': is_recurrent(sequence),
'entropy' : get_entropy(sequence),
'complexity': get_complexity(sequence),
'turbulence': get_turbulence(sequence),
'element_counts': get_element_counts(sequence),
'first_positions': get_first_positions(sequence),
'ntransitions' : get_ntransitions(sequence),
'sequence_universe': get_ngram_universe(sequence, len(sequence)),
'distinct_subsequences': get_ndistinct_subsequences(sequence),
'unique_bigrams': len(get_unique_ngrams(sequence, 2)),
'bigram_universe' : get_ngram_universe(sequence, 2),
'longest_spell': get_longest_spell(sequence)
# spell durations here
}
return details
# ====================================================================================
# SUMMARY STATISTICS
# ====================================================================================
def is_recurrent(sequence):
"""
Returns true if the given sequence is recurrent (elements can exist more than once), otherwise returns false.
Example
---------
>>> sequence = [1,2,3,4,5]
>>> ps.is_recurrent(sequence)
False
>>> sequence = [1,1,2,2,3]
>>> ps.is_recurrent(sequence)
True
"""
element_counts = get_element_counts(sequence)
truths = [count > 1 for element, count in element_counts.items()]
if True in truths:
return True
return False
def get_entropy(sequence):
"""
Computes the normalised `Shannon entropy <https://en.wikipedia.org/wiki/Entropy_(information_theory)>`_ of a given sequence, using the `scipy.stats.entropy <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html>`_ implementation.
Note that this measure is insensitive to transition frequency or event order, so should be used in conjunction with other measures.
Example
--------
>>> low_entropy_sequence = [1,1,1,1,1,1,1,2]
>>> ps.get_entropy(low_entropy_sequence)
0.543...
>>> high_entropy_sequence = [1,2,2,3,4,3]
>>> ps.get_entropy(high_entropy_sequence)
0.959...
"""
alphabet = get_alphabet(sequence)
entropy = 0
for state in alphabet:
proportion_occurances = sequence.count(state) / len(sequence)
entropy += proportion_occurances * math.log(proportion_occurances)
maximal_occurances = 1 / len(alphabet)
alphabet_entropy = sum([maximal_occurances * math.log(maximal_occurances) for x in alphabet])
if alphabet_entropy == 0:
return 0
return -entropy / -alphabet_entropy
def get_turbulence(sequence):
"""
Computes turbulence for a given sequence, based on `Elzinga & Liefbroer's 2007 definition <https://www.researchgate.net/publication/225402919_De-standardization_of_Family-Life_Trajectories_of_Young_Adults_A_Cross-National_Comparison_Using_Sequence_Analysis>`_ which is also implemented in the `TraMineR <http://traminer.unige.ch/doc/seqST.html>`_ sequence analysis library.
Example
--------
>>> sequence = [1,1,2,2,3]
>>> ps.get_turbulence(sequence)
5.228...
"""
phi = get_ndistinct_subsequences(sequence)
#print('phi', phi)
state_durations = [value for key, value in get_spells(sequence)]
#print('durations', state_durations)
#print('mean duration', statistics.mean(state_durations))
variance_of_state_durations = statistics.variance(state_durations)
#print('variance', variance_of_state_durations)
tbar = statistics.mean(state_durations)
maximum_state_duration_variance = (len(sequence) - 1) * (1 - tbar) ** 2
#print('smax', maximum_state_duration_variance)
top_right = maximum_state_duration_variance + 1
bot_right = variance_of_state_durations + 1
turbulence = math.log2(phi * (top_right / bot_right))
#print('turbulence', turbulence)
return turbulence
def get_complexity(sequence):
"""
Computes the complexity of a given sequence, based on TraMineR's `seqici <http://traminer.unige.ch/doc/seqici.html>`_ method.
"""
alphabet = get_alphabet(sequence)
pre_log = 1 / len(alphabet)
hmax = -math.log(pre_log)
#print('hmax', hmax)
if hmax == 0:
return 0 # all identical elements, no complexity
hs = get_entropy(sequence)
#print('hs', hs)
qs = get_ntransitions(sequence)
#print('qs', qs)
qmax = len(sequence) - 1
#print('qmax', qmax)
norm_transitions = qs / qmax
norm_entropy = hs / hmax
#print('nt', norm_transitions)
#print('ne', norm_entropy)
complexity = math.sqrt(norm_transitions * norm_entropy)
#print('complexity', complexity)
return complexity
def get_routine(sequence, duration):
"""
Computes a normalised measure of routine within a sequence for a given duration within that sequence.
E.g. with a sequence where each element is one day, calling get_routine() with a duration of 7 would look at weekly routines.
Note that this routine measure is identical to the multisequence measure of synchrony, but applied within-sequence in duration length chunks.
Example
---------
>>> sequence = [1,1,2,2,3,1,1,2,3,2,1,1,3,2,2]
>>> ps.get_routine(sequence, 5)
0.4
"""
if len(sequence) % duration != 0:
raise Exception('sequence not divisible by interval, check data input')
num_cycles = int(len(sequence) / duration)
cycles = [sequence[n * duration:n * duration + duration] for n in range(num_cycles)]
return pysan_ms.get_synchrony(cycles)
# ====================================================================================
# ELEMENTS
# ====================================================================================
def get_alphabet(sequence):
"""
Computes the alphabet of a given sequence (set of its unique elements).
Parameters
----------
sequence : int
A sequence of elements, encoded as integers e.g. [1,3,2,1].
Example
----------
>>> sequence = [1,1,2,1,2,2,3,4,2]
>>> ps.get_alphabet(sequence)
{1, 2, 3, 4}
"""
return set(sequence)
def get_first_positions(sequence):
"""
Reports the first occurance of each element in the sequence in a dictionary, with each element as keys, and their first position as values.
Example
---------
>>> sequence = [1,1,2,3,4]
>>> ps.get_first_positions(sequence)
{1: 0, 2: 2, 3: 3, 4: 4}
"""
unique_elements = list(set(sequence))
first_positions = {}
for element in unique_elements:
first_positions[element] = sequence.index(element)
return first_positions
def get_element_counts(sequence):
"""
Counts the number of occurances for each element in a sequence, returning a dictionary containing the elements as keys and their counts as values.
Example
---------
>>> sequence = [1,1,2,1,2,2,3,4,2]
>>> ps.get_element_counts(sequence)
{1: 3, 2: 4, 3: 1, 4: 1}
"""
alphabet = get_alphabet(sequence)
counts = {}
for element in alphabet:
counts[element] = sequence.count(element)
return counts
def get_element_frequency(sequence):
"""
Computes the relative frequency (aka prevalence or unconditional probability) of each element in a sequence, returning a dictionary where each key is an element and each value is that elements relative frequency.
Example
---------
>>> sequence = [1,1,2,1,2,2,3,4,2,1]
>>> ps.get_element_frequency(sequence)
{1: 0.4, 2: 0.4, 3: 0.1, 4: 0.1}
"""
alphabet = get_alphabet(sequence)
prevalences = {}
for element in alphabet:
prevalences[element] = sequence.count(element) / len(sequence)
return prevalences
# ====================================================================================
# SUBSEQUENCES
# ====================================================================================
# NGRAMS
def get_subsequences(sequence):
"""
Computes the actual possible subsequences in a given sequence, returning them as a list of lists.
Note that this does not include a single empty list as a subsequence.
This method is based on a similar implementation available `here <https://www.w3resource.com/python-exercises/list/python-data-type-list-exercise-33.php>`_.
Example
--------
>>> sequence = [1,2,3]
>>> ps.get_subsequences(sequence)
[[1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]
"""
subsequences = []
for i in range(0, len(sequence)+1):
temp = [list(x) for x in combinations(sequence, i)]
if len(temp)>0:
subsequences.extend(temp)
return subsequences[1:]
def get_ndistinct_subsequences(sequence):
"""
Computes the number of distinct subsequences for a given sequence, based on original implementation by
<NAME> available `here <https://www.geeksforgeeks.org/count-distinct-subsequences/>`_.
Example
--------
>>> sequence = [1,2,1,3]
>>> ps.get_ndistinct_subsequences(sequence)
14
"""
# this implementation works on strings, so parse non-strings to strings
if sequence is not str:
sequence = [str(e) for e in sequence]
# create an array to store index of last
last = [-1 for i in range(256 + 1)] # hard-coded value needs explaining -ojs
# length of input string
sequence_length = len(sequence)
# dp[i] is going to store count of discount subsequence of length of i
dp = [-2 for i in range(sequence_length + 1)]
# empty substring has only one subseqence
dp[0] = 1
# Traverse through all lengths from 1 to n
for i in range(1, sequence_length + 1):
# number of subseqence with substring str[0...i-1]
dp[i] = 2 * dp[i - 1]
# if current character has appeared before, then remove all subseqences ending with previous occurrence.
if last[ord(sequence[i - 1])] != -1:
dp[i] = dp[i] - dp[last[ord(sequence[i - 1])]]
last[ord(sequence[i - 1])] = i - 1
return dp[sequence_length]
def get_unique_ngrams(sequence, n):
"""
Creates a list of all unique ngrams found in a given sequence.
Example
---------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_unique_ngrams(sequence, 3) #doctest: +NORMALIZE_WHITESPACE
[[2, 1, 1],
[1, 1, 4],
[1, 4, 2],
[4, 2, 2],
[2, 2, 3],
[2, 3, 4],
[3, 4, 2],
[4, 2, 1]]
"""
unique_ngrams = []
for x in range(len(sequence) - n + 1):
this_ngram = sequence[x:x + n]
if str(this_ngram) not in unique_ngrams:
unique_ngrams.append(str(this_ngram))
return [eval(x) for x in unique_ngrams]
def get_all_ngrams(sequence, n):
"""
Creates a list of all ngrams found in a given sequence.
Example
---------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_unique_ngrams(sequence, 3) #doctest: +NORMALIZE_WHITESPACE
[[2, 1, 1],
[1, 1, 4],
[1, 4, 2],
[4, 2, 2],
[2, 2, 3],
[2, 3, 4],
[3, 4, 2],
[4, 2, 1]]
"""
all_ngrams = []
for x in range(len(sequence) - n + 1):
this_ngram = sequence[x:x + n]
all_ngrams.append(this_ngram)
return all_ngrams
def get_ngram_universe(sequence, n):
"""
Computes the universe of possible ngrams given a sequence. Where n is equal to the length of the sequence, the resulting number represents the sequence universe.
Example
--------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_ngram_universe(sequence, 3)
64
"""
# if recurrance is possible, the universe is given by k^t (SSA pg 68)
k = len(set(sequence))
if k > 10 and n > 10:
return 'really big'
return k**n
def get_ngram_counts(sequence, n):
"""
Computes the prevalence of ngrams in a sequence, returning a dictionary where each key is an ngram, and each value is the number of times that ngram appears in the sequence.
Parameters
-------------
sequence : list(int)
A sequence of elements, encoded as integers e.g. [1,3,2,1].
n: int
The number of elements in the ngrams to extract.
Example
---------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_ngram_counts(sequence, 3) #doctest: +NORMALIZE_WHITESPACE
{'[2, 1, 1]': 2,
'[1, 1, 4]': 1,
'[1, 4, 2]': 1,
'[4, 2, 2]': 1,
'[2, 2, 3]': 1,
'[2, 3, 4]': 1,
'[3, 4, 2]': 1,
'[4, 2, 1]': 1}
"""
ngrams = get_unique_ngrams(sequence, n)
ngram_counts = {str(i):0 for i in ngrams}
for x in range(len(sequence) - n + 1):
this_ngram = sequence[x:x + n]
ngram_counts[str(this_ngram)] += 1
return ngram_counts
# TRANSITIONS
def get_transitions(sequence):
"""
Extracts a list of transitions from a | |
"""
Train utils
**************************************
Losses, training loops and helper functions used during training.
"""
from pennylane import numpy as np
import autograd.numpy as np
from autograd.numpy import exp
import itertools
import time
def hinge_loss(labels, predictions, type='L2'):
"""
Args:
labels:
predictions:
type: (Default value = 'L2')
Returns:
"""
loss = 0
for l, p in zip(labels, predictions):
if type == 'L1':
loss = loss + np.abs(l - p) # L1 loss
elif type == 'L2':
loss = loss + (l - p) ** 2 # L2 loss
loss = loss / labels.shape[0]
return loss
def ohe_accuracy(labels, predictions):
"""
Args:
labels:
predictions:
Returns:
"""
loss = 0
for l, p in zip(labels, predictions):
loss += np.argmax(l) == np.argmax(p)
return loss / labels.shape[0]
def wn_accuracy(labels, predictions):
"""
Args:
labels:
predictions:
Returns:
"""
loss = 0
#tol = 0.05
tol = 0.1
for l, p in zip(labels, predictions):
if abs(l - p) < tol:
loss = loss + 1
loss = loss / labels.shape[0]
return loss
def mse(labels, predictions):
"""
Args:
labels:
predictions:
Returns:
"""
# print(labels.shape, predictions.shape)
loss = 0
for l, p in zip(labels, predictions):
loss += np.sum((l - p) ** 2)
return loss / labels.shape[0]
def make_predictions(circuit,pre_trained_vals,X,Y,**kwargs):
"""
Args:
circuit:
pre_trained_vals:
X:
Y:
**kwargs:
Returns:
"""
if kwargs['readout_layer']=='one_hot':
var = pre_trained_vals
elif kwargs['readout_layer']=="weighted_neuron":
var = pre_trained_vals
# make final predictions
if kwargs['readout_layer']=='one_hot':
final_predictions = np.stack([circuit(var, x) for x in X])
acc=ohe_accuracy(Y,predictions)
elif kwargs['readout_layer']=='weighted_neuron':
from autograd.numpy import exp
n = kwargs.get('nqubits')
w = var[:,-1]
theta = var[:,:-1].numpy()
final_predictions = [int(np.round(2.*(1.0/(1.0+exp(np.dot(-w,circuit(theta, features=x)))))- 1.,1)) for x in X]
acc=wn_accuracy(Y,predictions)
return final_predictions,acc
def train_circuit(circuit, parameter_shape,X_train, Y_train, batch_size, learning_rate,**kwargs):
"""train a circuit classifier
Args:
circuit(qml.QNode): A circuit that you want to train
parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
the second one is the number of layers in the circuit architecture.
X_train(np.ndarray): An array of floats of size (M, n) to be used as training data.
Y_train(np.ndarray): An array of size (M,) which are the categorical labels
associated to the training data.
batch_size(int): Batch size for the circuit training.
learning_rate(float): The learning rate/step size of the optimizer.
kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters:
nsteps (int) : Number of training steps.
optim (pennylane.optimize instance): Optimizer used during the training of the circuit.
Pass as qml.OptimizerName.
Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8)
The first element is the maximum number of parameters among all architectures,
the second is the maximum inference time among all architectures in terms of computing time,
the third one is the maximum inference time among all architectures in terms of the number of CNOTS
in the circuit
rate_type (string): Determines the type of error rate in the W-coefficient.
If rate_type == 'accuracy', the inference time of the circuit
is equal to the time it takes to evaluate the accuracy of the trained circuit with
respect to a validation batch three times the size of the training batch size and
the error rate is equal to 1-accuracy (w.r.t. to a validation batch).
If rate_type == 'accuracy', the inference time of the circuit is equal to the time
it takes to train the circuit (for nsteps training steps) and compute the cost at
each step and the error rate is equal to the cost after nsteps training steps.
**kwargs:
Returns:
W_: W-coefficient, trained weights
"""
#print('batch_size',batch_size)
# fix the seed while debugging
#np.random.seed(1337)
def ohe_cost_fcn(params, circuit, ang_array, actual):
"""use MAE to start
Args:
params:
circuit:
ang_array:
actual:
Returns:
"""
predictions = (np.stack([circuit(params, x) for x in ang_array]) + 1) * 0.5
return mse(actual, predictions)
def wn_cost_fcn(params, circuit, ang_array, actual):
"""use MAE to start
Args:
params:
circuit:
ang_array:
actual:
Returns:
"""
w = params[:,-1]
theta = params[:,:-1]
#print(w.shape,w,theta.shape,theta)
predictions = np.asarray([2.*(1.0/(1.0+exp(np.dot(-w,circuit(theta, features=x)))))- 1. for x in ang_array])
return mse(actual, predictions)
if kwargs['readout_layer']=='one_hot':
var = np.zeros(parameter_shape)
elif kwargs['readout_layer']=="weighted_neuron":
var = np.hstack((np.zeros(parameter_shape),np.random.random((kwargs['nqubits'],1))-0.5))
rate_type = kwargs['rate_type']
inf_time = kwargs['inf_time']
optim = kwargs['optim']
numcnots = kwargs['numcnots']
Tmax = kwargs['Tmax'] #Tmax[0] is maximum parameter size, Tmax[1] maximum inftime (timeit),Tmax[2] maximum number of entangling gates
num_train = len(Y_train)
validation_size = int(0.1*num_train)
opt = optim(stepsize=learning_rate) #all optimizers in autograd module take in argument stepsize, so this works for all
start = time.time()
for _ in range(kwargs['nsteps']):
batch_index = np.random.randint(0, num_train, (batch_size,))
X_train_batch = np.asarray(X_train[batch_index])
Y_train_batch = np.asarray(Y_train[batch_index])
if kwargs['readout_layer']=='one_hot':
var, cost = opt.step_and_cost(lambda v: ohe_cost_fcn(v, circuit, X_train_batch, Y_train_batch), var)
elif kwargs['readout_layer']=='weighted_neuron':
var, cost = opt.step_and_cost(lambda v: wn_cost_fcn(v, circuit, X_train_batch, Y_train_batch), var)
end = time.time()
cost_time = (end - start)
if kwargs['rate_type'] == 'accuracy':
validation_batch = np.random.randint(0, num_train, (validation_size,))
X_validation_batch = np.asarray(X_train[validation_batch])
Y_validation_batch = np.asarray(Y_train[validation_batch])
start = time.time() # add in timeit function from Wbranch
if kwargs['readout_layer']=='one_hot':
predictions = np.stack([circuit(var, x) for x in X_validation_batch])
elif kwargs['readout_layer']=='weighted_neuron':
n = kwargs.get('nqubits')
w = var[:,-1]
theta = var[:,:-1]
predictions = [int(np.round(2.*(1.0/(1.0+exp(np.dot(-w,circuit(theta, features=x)))))- 1.,1)) for x in X_validation_batch]
end = time.time()
inftime = (end - start) / len(X_validation_batch)
if kwargs['readout_layer']=='one_hot':
err_rate = (1.0 - ohe_accuracy(Y_validation_batch,predictions))+10**-7 #add small epsilon to prevent divide by 0 errors
#print('error rate:',err_rate)
#print('weights: ',var)
elif kwargs['readout_layer']=='weighted_neuron':
err_rate = (1.0 - wn_accuracy(Y_validation_batch,predictions))+10**-7 #add small epsilon to prevent divide by 0 errors
#print('error rate:',err_rate)
#print('weights: ',var)
elif kwargs['rate_type'] == 'batch_cost':
err_rate = (cost) + 10**-7 #add small epsilon to prevent divide by 0 errors
#print('error rate:',err_rate)
#print('weights: ',var)
inftime = cost_time
# QHACK #
if kwargs['inf_time'] =='timeit':
W_ = (Tmax[0] - len(var) / (Tmax[0])) * (Tmax[1] - inftime) / (Tmax[1]) * (1. / err_rate)
elif kwargs['inf_time']=='numcnots':
nc_ = numcnots
W_ = (Tmax[0] - len(var) / (Tmax[0])) * (Tmax[2] - nc_) / (Tmax[2]) * (1. / err_rate)
return W_,var
def evaluate_w(circuit, n_params, X_train, Y_train, **kwargs):
"""together with the function train_circuit(...) this executes lines 7-8 in the Algorithm 1 pseudo code of (de Wynter 2020)
batch_sets and learning_rates are lists, if just single values needed then pass length-1 lists
Args:
circuit:
n_params:
X_train:
Y_train:
**kwargs:
Returns:
"""
Wmax = 0.0
batch_sets = kwargs.get('batch_sizes')
learning_rates=kwargs.get('learning_rates')
hyperparameter_space = list(itertools.product(batch_sets, learning_rates))
for idx, sdx in hyperparameter_space:
wtemp, weights = train_circuit(circuit, n_params,X_train, Y_train, batch_size=idx, learning_rate=sdx, **kwargs)
if wtemp >= Wmax:
Wmax = wtemp
saved_weights = weights
return Wmax, saved_weights
def train_best(circuit, pre_trained_vals,X_train, Y_train, batch_size, learning_rate,**kwargs):
"""train a circuit classifier
Args:
circuit(qml.QNode): A circuit that you want to train
parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
the second one is the number of layers in the circuit architecture.
X_train(np.ndarray): An array of floats of size (M, n) to be used as training data.
Y_train(np.ndarray): An array of size (M,) which are the categorical labels
associated to the training data.
batch_size(int): Batch size for the circuit training.
learning_rate(float): The learning rate/step size of the optimizer.
kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters:
nsteps (int) : Number of training steps.
optim (pennylane.optimize instance): Optimizer used during the training of the circuit.
Pass as qml.OptimizerName.
Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8)
The first element is the maximum number of parameters among all architectures,
the second is the maximum inference time among all architectures in terms of computing time,
the third one is the maximum inference time among all architectures in terms of the number of CNOTS
in the circuit
rate_type (string): Determines the type of error rate in the W-coefficient.
If rate_type == 'accuracy', the inference time of the circuit
is equal to the time it takes to evaluate the accuracy of the trained circuit with
respect to a validation batch three times the size of the training batch size and
the error rate is equal to 1-accuracy (w.r.t. to a validation batch).
If rate_type | |
# -*- coding: utf-8 -*-
from __future__ import print_function
import abc
import copy
import functools
import hashlib
import os
import pathlib2
from .._protos.public.modeldb.versioning import Dataset_pb2 as _DatasetService
from ..external import six
from .._internal_utils import (
_file_utils,
_request_utils,
_utils,
)
from ..repository import _blob
DEFAULT_DOWNLOAD_DIR = "mdb-data-download" # to be in cwd
class _Dataset(_blob.Blob):
"""
Base class for dataset versioning. Not for human consumption.
"""
_CANNOT_DOWNLOAD_ERROR = RuntimeError(
"this dataset cannot be used for downloads;"
" consider using `commit.get()` or `dataset_version.get_content()"
" to obtain a download-capable dataset"
" if ModelDB-managed versioning was enabled"
)
def __init__(self, paths=None, enable_mdb_versioning=False):
super(_Dataset, self).__init__()
self._components_map = dict() # paths to Component objects
self._mdb_versioned = enable_mdb_versioning
# to enable download() with ModelDB-managed versioning
# using commit.get()
self._commit = None
self._blob_path = None
# using dataset_version.get_content()
self._dataset_version = None
def __repr__(self):
lines = ["{} Version".format(self.__class__.__name__)]
components = self._components_map.values()
components = sorted(components, key=lambda component: component.path)
for component in components:
lines.extend(repr(component).splitlines())
return "\n ".join(lines)
def __add__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
new = copy.deepcopy(self)
return new.__iadd__(other)
def __iadd__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
self_keys = set(self._components_map.keys())
other_keys = set(other._components_map.keys())
intersection = list(self_keys & other_keys)
if intersection:
raise ValueError("dataset already contains paths: {}".format(intersection))
if self._mdb_versioned != other._mdb_versioned:
raise ValueError("datasets must have same value for `enable_mdb_versioning`")
self._add_components(other._components_map.values())
return self
@classmethod
def _create_empty(cls):
return cls([])
def _add_components(self, components):
self._components_map.update({
component.path: component
for component
in components
})
@abc.abstractmethod
def _prepare_components_to_upload(self):
pass
@abc.abstractmethod
def _clean_up_uploaded_components(self):
pass
def _set_commit_and_blob_path(self, commit, blob_path):
"""
Associate this blob with a commit and path to enable downloads.
Parameters
----------
commit : :class:`verta.repository.Commit`
Commit this blob was gotten from.
blob_path : str
Location of this blob within its Repository.
"""
# TODO: raise error if _dataset_version already set
self._commit = commit
self._blob_path = blob_path
def _set_dataset_version(self, dataset_version):
"""
Associate this blob with a dataset version to enable downloads.
Parameters
----------
dataset_version : :class:`~verta.dataset.entities.DatasetVersion`
Dataset version this blob was gotten from.
"""
# TODO: raise error if _commit already set
self._dataset_version = dataset_version
@property
def _is_downloadable(self):
"""
Whether this has a linked commit or dataset version to download from.
"""
if self._commit and self._blob_path:
return True
elif self._dataset_version:
return True
else:
return False
@property
def _conn(self):
"""
Co-opts the ``_conn`` from associated commit or dataset version.
"""
if self._commit:
return self._commit._conn
elif self._dataset_version:
return self._dataset_version._conn
else:
raise self._CANNOT_DOWNLOAD_ERROR
def _get_url_for_artifact(self, path, method):
if self._commit and self._blob_path:
return self._commit._get_url_for_artifact(self._blob_path, path, method)
elif self._dataset_version:
return self._dataset_version._get_url_for_artifact(path, method)
else:
raise self._CANNOT_DOWNLOAD_ERROR
# TODO: there is too much happening in this method's body
def _get_components_to_download(self, component_path=None, download_to_path=None):
"""
Identify components to be downloaded, along with their local destination paths.
Parameters
----------
component_path : str, optional
Path to directory or file within blob.
download_to_path : str, optional
Local path to download to.
Returns
-------
components_to_download : dict
Map of component paths to local destination paths.
downloaded_to_path : str
Absolute path where file(s) were downloaded to. Matches `download_to_path` if it was
provided as an argument.
"""
implicit_download_to_path = download_to_path is None
if component_path is not None:
# look for an exact match with `component_path` as a file
for path in self.list_paths():
if path == component_path:
if implicit_download_to_path:
# default to filename from `component_path`, in cwd
local_path = os.path.basename(component_path)
# avoid collision with existing file
local_path = _file_utils.without_collision(local_path)
else:
# exactly where the user requests
local_path = download_to_path
return ({path: local_path}, os.path.abspath(local_path))
# no exact match, so it's a folder download (or nonexistent path)
# figure out where files are going to be downloaded to
if implicit_download_to_path:
if component_path is None:
downloaded_to_path = DEFAULT_DOWNLOAD_DIR
# avoid collision with existing directory
downloaded_to_path = _file_utils.without_collision(downloaded_to_path)
else: # need to automatically determine directory
# NOTE: if `component_path` == "s3://" with any trailing slashes, it becomes "s3:"
downloaded_to_path = pathlib2.Path(component_path).name # final path component
if downloaded_to_path in {".", "..", "/", "s3:"}:
# rather than dump everything into cwd, use new child dir
downloaded_to_path = DEFAULT_DOWNLOAD_DIR
# avoid collision with existing directory
downloaded_to_path = _file_utils.without_collision(downloaded_to_path)
else:
# exactly where the user requests
downloaded_to_path = download_to_path
# collect paths in blob and map them to download locations
components_to_download = dict()
if component_path is None:
# download all
for path in self.list_paths():
local_path = os.path.join(
downloaded_to_path,
_file_utils.remove_prefix_dir(path, "s3:"),
)
components_to_download[path] = local_path
else:
# look for files contained in `component_path` as a directory
component_path_as_dir = component_path if component_path.endswith('/') else component_path+'/'
for path in self.list_paths():
if path.startswith(component_path_as_dir):
# rebase from `component_path` onto `downloaded_to_path`
# Implicit `download_to_path` example:
# component_blob.path = "coworker/downloads/data/info.csv"
# component_path = "coworker/downloads"
# downloaded_to_path = "downloads" or "downloads 1", etc.
# local_path = "downloads/data/info.csv"
# Explicit `download_to_path` example:
# component_blob.path = "coworker/downloads/data/info.csv"
# component_path = "coworker/downloads"
# downloaded_to_path = "my-data"
# local_path = "my-data/data/info.csv"
local_path = os.path.join(
downloaded_to_path,
_file_utils.remove_prefix_dir(path, prefix_dir=component_path),
)
components_to_download[path] = local_path
if not components_to_download:
raise KeyError("no components found for path {}".format(component_path))
return (components_to_download, os.path.abspath(downloaded_to_path))
@staticmethod
def _is_hidden_to_spark(path):
# PySpark ignores certain files and raises a "does not exist" error
# https://stackoverflow.com/a/38479545
return os.path.basename(path).startswith(('_', '.'))
@classmethod
def with_spark(cls, sc, paths):
"""
Creates a dataset blob with a SparkContext instance.
Parameters
----------
sc : pyspark.SparkContext
SparkContext instance.
paths : list of strs
List of paths to binary input data file(s).
Returns
-------
dataset :ref:`blob <blobs>`
Dataset blob capturing the metadata of the binary files.
"""
if isinstance(paths, six.string_types):
paths = [paths]
rdds = list(map(sc.binaryFiles, paths))
rdd = functools.reduce(lambda a,b: a.union(b), rdds)
def get_component(entry):
filepath, content = entry
return Component(
path=filepath,
size=len(content),
# last_modified=metadata['modificationTime'], # handle timezone?
md5=hashlib.md5(content).hexdigest(),
)
result = rdd.map(get_component)
result = result.collect()
obj = cls._create_empty()
obj._add_components(result)
return obj
@abc.abstractmethod
def add(self, paths):
pass
def download(self, component_path=None, download_to_path=None):
"""
Downloads `component_path` from this dataset if ModelDB-managed versioning was enabled.
Parameters
----------
component_path : str, optional
Original path of the file or directory in this dataset to download. If not provided,
all files will be downloaded.
download_to_path : str, optional
Path to download to. If not provided, the file(s) will be downloaded into a new path in
the current directory. If provided and the path already exists, it will be overwritten.
Returns
-------
downloaded_to_path : str
Absolute path where file(s) were downloaded to. Matches `download_to_path` if it was
provided as an argument.
"""
if not self._is_downloadable:
raise self._CANNOT_DOWNLOAD_ERROR
implicit_download_to_path = download_to_path is None
components_to_download, downloaded_to_path = self._get_components_to_download(
component_path,
download_to_path,
)
for path in components_to_download: # component paths
local_path = components_to_download[path] # dict will be updated near end of iteration
# create parent dirs
pathlib2.Path(local_path).parent.mkdir(parents=True, exist_ok=True)
# TODO: clean up empty parent dirs if something later fails
url = self._get_url_for_artifact(path, "GET").url
# stream download to avoid overwhelming memory
with _utils.make_request("GET", url, self._conn, stream=True) as response:
_utils.raise_for_http_error(response)
print("downloading {} from ModelDB".format(path))
if (implicit_download_to_path
and len(components_to_download) == 1): # single file download
# update `downloaded_to_path` in case changed to avoid overwrite
downloaded_to_path = _request_utils.download_file(response, local_path, overwrite_ok=False)
else:
# don't update `downloaded_to_path` here because we are either downloading:
# - single file with an explicit destination, so `local_path` won't change
# - directory, so individual path's `local_path` isn't important
_request_utils.download_file(response, local_path, overwrite_ok=True)
return os.path.abspath(downloaded_to_path)
def list_paths(self):
"""
Returns the paths of all components in this dataset.
Returns
-------
component_paths : list of str
Paths of all components.
"""
return list(sorted(
component.path
for component
in self._components_map.values()
))
def list_components(self):
"""
Returns the components in this dataset.
Returns
-------
components : list of :class:`~verta.dataset._dataset.Component`
Components.
"""
components = self._components_map.values()
return list(sorted(components, key=lambda component: component.path))
class Component(object):
"""
A dataset component returned by ``dataset.list_components()``.
Attributes
----------
path : str
File path.
base_path : str
Prefix of `path`.
size : int
File size.
last_modified : int
Unix time when this file was last modified.
sha256 : str
SHA-256 checksum.
md5 : str
MD5 checksum.
"""
def __init__(
self,
path, size=None, last_modified=None,
sha256=None, md5=None,
base_path=None,
internal_versioned_path=None, local_path=None):
# metadata
self.path = path
self.size = size
self.last_modified = last_modified
# checksums
self.sha256 = sha256
self.md5 = md5
# base path
self.base_path = base_path
# ModelDB versioning
self._internal_versioned_path = internal_versioned_path
self._local_path = local_path
def | |
the replay-memory.
# These have HIGH estimation errors for the Q-values.
idx_hi = np.random.choice(self.idx_err_hi,
size=self.num_samples_err_hi,
replace=False)
# Combine the indices.
idx = np.concatenate((idx_lo, idx_hi))
# Get the batches of states and Q-values.
states_batch = self.states[idx]
q_values_batch = self.q_values[idx]
return states_batch, q_values_batch
def all_batches(self, batch_size=128):
"""
Iterator for all the states and Q-values in the replay-memory.
It returns the indices for the beginning and end, as well as
a progress-counter between 0.0 and 1.0.
This function is not currently being used except by the function
estimate_all_q_values() below. These two functions are merely
included to make it easier for you to experiment with the code
by showing you an easy and efficient way to loop over all the
data in the replay-memory.
"""
# Start index for the current batch.
begin = 0
# Repeat until all batches have been processed.
while begin < self.num_used:
# End index for the current batch.
end = begin + batch_size
# Ensure the batch does not exceed the used replay-memory.
if end > self.num_used:
end = self.num_used
# Progress counter.
progress = end / self.num_used
# Yield the batch indices and completion-counter.
yield begin, end, progress
# Set the start-index for the next batch to the end of this batch.
begin = end
def estimate_all_q_values(self, model):
"""
Estimate all Q-values for the states in the replay-memory
using the model / Neural Network.
Note that this function is not currently being used. It is provided
to make it easier for you to experiment with this code, by showing
you an efficient way to iterate over all the states and Q-values.
:param model:
Instance of the NeuralNetwork-class.
"""
print("Re-calculating all Q-values in replay memory ...")
# Process the entire replay-memory in batches.
for begin, end, progress in self.all_batches():
# Print progress.
msg = "\tProgress: {0:.0%}"
msg = msg.format(progress)
print_progress(msg)
# Get the states for the current batch.
states = self.states[begin:end]
# Calculate the Q-values using the Neural Network
# and update the replay-memory.
self.q_values[begin:end] = model.get_q_values(states=states)
# Newline.
print()
def print_statistics(self):
"""Print statistics for the contents of the replay-memory."""
print("Replay-memory statistics:")
# Print statistics for the Q-values before they were updated
# in update_all_q_values().
msg = "\tQ-values Before, Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}"
print(msg.format(np.min(self.q_values_old),
np.mean(self.q_values_old),
np.max(self.q_values_old)))
# Print statistics for the Q-values after they were updated
# in update_all_q_values().
msg = "\tQ-values After, Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}"
print(msg.format(np.min(self.q_values),
np.mean(self.q_values),
np.max(self.q_values)))
# Print statistics for the difference in Q-values before and
# after the update in update_all_q_values().
q_dif = self.q_values - self.q_values_old
msg = "\tQ-values Diff., Min: {0:5.2f}, Mean: {1:5.2f}, Max: {2:5.2f}"
print(msg.format(np.min(q_dif),
np.mean(q_dif),
np.max(q_dif)))
# Print statistics for the number of large estimation errors.
# Don't use the estimation error for the last state in the memory,
# because its Q-values have not been updated.
err = self.estimation_errors[:-1]
err_count = np.count_nonzero(err > self.error_threshold)
msg = "\tNumber of large errors > {0}: {1} / {2} ({3:.1%})"
print(msg.format(self.error_threshold, err_count,
self.num_used, err_count / self.num_used))
# How much of the replay-memory is used by states with end_life.
end_life_pct = np.count_nonzero(self.end_life) / self.num_used
# How much of the replay-memory is used by states with end_episode.
end_episode_pct = np.count_nonzero(self.end_episode) / self.num_used
# How much of the replay-memory is used by states with non-zero reward.
reward_nonzero_pct = np.count_nonzero(self.rewards) / self.num_used
# Print those statistics.
msg = "\tend_life: {0:.1%}, end_episode: {1:.1%}, reward non-zero: {2:.1%}"
print(msg.format(end_life_pct, end_episode_pct, reward_nonzero_pct))
########################################################################
class LinearControlSignal:
"""
A control signal that changes linearly over time.
This is used to change e.g. the learning-rate for the optimizer
of the Neural Network, as well as other parameters.
TensorFlow has functionality for doing this, but it uses the
global_step counter inside the TensorFlow graph, while we
want the control signals to use a state-counter for the
game-environment. So it is easier to make this in Python.
"""
def __init__(self, start_value, end_value, num_iterations, repeat=False):
"""
Create a new object.
:param start_value:
Start-value for the control signal.
:param end_value:
End-value for the control signal.
:param num_iterations:
Number of iterations it takes to reach the end_value
from the start_value.
:param repeat:
Boolean whether to reset the control signal back to the start_value
after the end_value has been reached.
"""
# Store arguments in this object.
self.start_value = start_value
self.end_value = end_value
self.num_iterations = num_iterations
self.repeat = repeat
# Calculate the linear coefficient.
self._coefficient = (end_value - start_value) / num_iterations
def get_value(self, iteration):
"""Get the value of the control signal for the given iteration."""
if self.repeat:
iteration %= self.num_iterations
if iteration < self.num_iterations:
value = iteration * self._coefficient + self.start_value
else:
value = self.end_value
return value
########################################################################
class EpsilonGreedy:
"""
The epsilon-greedy policy either takes a random action with
probability epsilon, or it takes the action for the highest
Q-value.
If epsilon is 1.0 then the actions are always random.
If epsilon is 0.0 then the actions are always argmax for the Q-values.
Epsilon is typically decreased linearly from 1.0 to 0.1
and this is also implemented in this class.
During testing, epsilon is usually chosen lower, e.g. 0.05 or 0.01
"""
def __init__(self, num_actions,
epsilon_testing=0.05,
num_iterations=1e6,
start_value=1.0, end_value=0.1,
repeat=False):
"""
:param num_actions:
Number of possible actions in the game-environment.
:param epsilon_testing:
Epsilon-value when testing.
:param num_iterations:
Number of training iterations required to linearly
decrease epsilon from start_value to end_value.
:param start_value:
Starting value for linearly decreasing epsilon.
:param end_value:
Ending value for linearly decreasing epsilon.
:param repeat:
Boolean whether to repeat and restart the linear decrease
when the end_value is reached, or only do it once and then
output the end_value forever after.
"""
# Store parameters.
self.num_actions = num_actions
self.epsilon_testing = epsilon_testing
# Create a control signal for linearly decreasing epsilon.
self.epsilon_linear = LinearControlSignal(num_iterations=num_iterations,
start_value=start_value,
end_value=end_value,
repeat=repeat)
def get_epsilon(self, iteration, training):
"""
Return the epsilon for the given iteration.
If training==True then epsilon is linearly decreased,
otherwise epsilon is a fixed number.
"""
if training:
epsilon = self.epsilon_linear.get_value(iteration=iteration)
else:
epsilon = self.epsilon_testing
return epsilon
def get_action(self, q_values, iteration, training):
"""
Use the epsilon-greedy policy to select an action.
:param q_values:
These are the Q-values that are estimated by the Neural Network
for the current state of the game-environment.
:param iteration:
This is an iteration counter. Here we use the number of states
that has been processed in the game-environment.
:param training:
Boolean whether we are training or testing the
Reinforcement Learning agent.
:return:
action (integer), epsilon (float)
"""
epsilon = self.get_epsilon(iteration=iteration, training=training)
# With probability epsilon.
if np.random.random() < epsilon:
# Select a random action.
action = np.random.randint(low=0, high=self.num_actions)
else:
# Otherwise select the action that has the highest Q-value.
action = np.argmax(q_values)
return action, epsilon
########################################################################
class NeuralNetwork:
"""
Creates a Neural Network for Reinforcement Learning (Q-Learning).
Functions are provided for estimating Q-values from states of the
game-environment, and for optimizing the Neural Network so it becomes
better at estimating the Q-values.
"""
def __init__(self, num_actions, replay_memory):
"""
:param num_actions:
Number of discrete actions for the game-environment.
:param replay_memory:
Object-instance of the ReplayMemory-class.
"""
# Replay-memory used for sampling random batches.
self.replay_memory = replay_memory
# Path for saving/restoring checkpoints.
self.checkpoint_path = os.path.join(checkpoint_dir, "checkpoint")
# Placeholder variable for inputting states into the Neural Network.
# A state is a multi-dimensional array holding image-frames from
# the game-environment.
self.x = tf.placeholder(dtype=tf.float32, shape=[None] + state_shape, name='x')
# Placeholder variable for inputting the learning-rate to the optimizer.
self.learning_rate = tf.placeholder(dtype=tf.float32, shape=[])
# Placeholder variable for inputting the target Q-values
# that we want the Neural Network to be able to estimate.
self.q_values_new = tf.placeholder(tf.float32,
shape=[None, num_actions],
name='q_values_new')
# This is a hack that allows us to save/load the counter for
# the number of states processed in the game-environment.
# We will keep it as a variable in the TensorFlow-graph
# even though it will not actually be used by TensorFlow.
self.count_states = tf.Variable(initial_value=0,
| |
<filename>sdk/python/pulumi_exoscale/instance_pool.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['InstancePoolArgs', 'InstancePool']
@pulumi.input_type
class InstancePoolArgs:
def __init__(__self__, *,
size: pulumi.Input[int],
template_id: pulumi.Input[str],
zone: pulumi.Input[str],
affinity_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
deploy_target_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
elastic_ip_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_prefix: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
ipv6: Optional[pulumi.Input[bool]] = None,
key_pair: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
network_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_offering: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
user_data: Optional[pulumi.Input[str]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a InstancePool resource.
:param pulumi.Input[int] size: The number of Compute instance members the Instance Pool manages.
:param pulumi.Input[str] template_id: The ID of the instance [template][template] to use when creating Compute instances. Usage of the [`compute_template`][d-compute_template] data source is recommended.
:param pulumi.Input[str] zone: The name of the [zone][zone] to deploy the Instance Pool into.
:param pulumi.Input[Sequence[pulumi.Input[str]]] affinity_group_ids: A list of [Anti-Affinity Group][r-affinity] IDs (at creation time only).
:param pulumi.Input[str] deploy_target_id: A Deploy Target ID.
:param pulumi.Input[str] description: The description of the Instance Pool.
:param pulumi.Input[int] disk_size: The managed Compute instances disk size.
:param pulumi.Input[Sequence[pulumi.Input[str]]] elastic_ip_ids: A list of [Elastic IP][eip-doc] IDs.
:param pulumi.Input[str] instance_prefix: The string to add as prefix to managed Compute instances name (default: `pool`).
:param pulumi.Input[str] instance_type: The managed Compute instances [type][type] (format: `FAMILY.SIZE`, e.g. `standard.medium`, `memory.huge`).
:param pulumi.Input[bool] ipv6: Enable IPv6 on managed Compute instances (default: `false`).
:param pulumi.Input[str] key_pair: The name of the [SSH key pair][sshkeypair] to install when creating Compute instances.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: A map of key/value labels.
:param pulumi.Input[str] name: The name of the Instance Pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] network_ids: A list of [Private Network][privnet-doc] IDs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: A list of [Security Group][r-security_group] IDs (at creation time only).
:param pulumi.Input[str] service_offering: **Deprecated** The managed Compute instances size. Replaced by `instance_type`.
:param pulumi.Input[str] user_data: A [cloud-init][cloudinit] configuration to apply when creating Compute instances. Whenever possible don't base64-encode neither gzip it yourself, as this will be automatically taken care of on your behalf by the provider.
:param pulumi.Input[Sequence[pulumi.Input[str]]] virtual_machines: The list of Instance Pool members (Compute instance IDs).
"""
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "template_id", template_id)
pulumi.set(__self__, "zone", zone)
if affinity_group_ids is not None:
pulumi.set(__self__, "affinity_group_ids", affinity_group_ids)
if deploy_target_id is not None:
pulumi.set(__self__, "deploy_target_id", deploy_target_id)
if description is not None:
pulumi.set(__self__, "description", description)
if disk_size is not None:
pulumi.set(__self__, "disk_size", disk_size)
if elastic_ip_ids is not None:
pulumi.set(__self__, "elastic_ip_ids", elastic_ip_ids)
if instance_prefix is not None:
pulumi.set(__self__, "instance_prefix", instance_prefix)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if ipv6 is not None:
pulumi.set(__self__, "ipv6", ipv6)
if key_pair is not None:
pulumi.set(__self__, "key_pair", key_pair)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if network_ids is not None:
pulumi.set(__self__, "network_ids", network_ids)
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if service_offering is not None:
warnings.warn("""This attribute has been replaced by \"instance_type\".""", DeprecationWarning)
pulumi.log.warn("""service_offering is deprecated: This attribute has been replaced by \"instance_type\".""")
if service_offering is not None:
pulumi.set(__self__, "service_offering", service_offering)
if state is not None:
pulumi.set(__self__, "state", state)
if user_data is not None:
pulumi.set(__self__, "user_data", user_data)
if virtual_machines is not None:
pulumi.set(__self__, "virtual_machines", virtual_machines)
@property
@pulumi.getter
def size(self) -> pulumi.Input[int]:
"""
The number of Compute instance members the Instance Pool manages.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: pulumi.Input[int]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="templateId")
def template_id(self) -> pulumi.Input[str]:
"""
The ID of the instance [template][template] to use when creating Compute instances. Usage of the [`compute_template`][d-compute_template] data source is recommended.
"""
return pulumi.get(self, "template_id")
@template_id.setter
def template_id(self, value: pulumi.Input[str]):
pulumi.set(self, "template_id", value)
@property
@pulumi.getter
def zone(self) -> pulumi.Input[str]:
"""
The name of the [zone][zone] to deploy the Instance Pool into.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: pulumi.Input[str]):
pulumi.set(self, "zone", value)
@property
@pulumi.getter(name="affinityGroupIds")
def affinity_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of [Anti-Affinity Group][r-affinity] IDs (at creation time only).
"""
return pulumi.get(self, "affinity_group_ids")
@affinity_group_ids.setter
def affinity_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "affinity_group_ids", value)
@property
@pulumi.getter(name="deployTargetId")
def deploy_target_id(self) -> Optional[pulumi.Input[str]]:
"""
A Deploy Target ID.
"""
return pulumi.get(self, "deploy_target_id")
@deploy_target_id.setter
def deploy_target_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deploy_target_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Instance Pool.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="diskSize")
def disk_size(self) -> Optional[pulumi.Input[int]]:
"""
The managed Compute instances disk size.
"""
return pulumi.get(self, "disk_size")
@disk_size.setter
def disk_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size", value)
@property
@pulumi.getter(name="elasticIpIds")
def elastic_ip_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of [Elastic IP][eip-doc] IDs.
"""
return pulumi.get(self, "elastic_ip_ids")
@elastic_ip_ids.setter
def elastic_ip_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "elastic_ip_ids", value)
@property
@pulumi.getter(name="instancePrefix")
def instance_prefix(self) -> Optional[pulumi.Input[str]]:
"""
The string to add as prefix to managed Compute instances name (default: `pool`).
"""
return pulumi.get(self, "instance_prefix")
@instance_prefix.setter
def instance_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_prefix", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
The managed Compute instances [type][type] (format: `FAMILY.SIZE`, e.g. `standard.medium`, `memory.huge`).
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter
def ipv6(self) -> Optional[pulumi.Input[bool]]:
"""
Enable IPv6 on managed Compute instances (default: `false`).
"""
return pulumi.get(self, "ipv6")
@ipv6.setter
def ipv6(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ipv6", value)
@property
@pulumi.getter(name="keyPair")
def key_pair(self) -> Optional[pulumi.Input[str]]:
"""
The name of the [SSH key pair][sshkeypair] to install when creating Compute instances.
"""
return pulumi.get(self, "key_pair")
@key_pair.setter
def key_pair(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_pair", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of key/value labels.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Instance Pool.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkIds")
def network_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of [Private Network][privnet-doc] IDs.
"""
return pulumi.get(self, "network_ids")
@network_ids.setter
def network_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "network_ids", value)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of [Security Group][r-security_group] IDs (at creation time only).
"""
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter(name="serviceOffering")
def service_offering(self) -> Optional[pulumi.Input[str]]:
"""
**Deprecated** The managed Compute instances size. Replaced by `instance_type`.
"""
return pulumi.get(self, "service_offering")
@service_offering.setter
def service_offering(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_offering", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="userData")
def user_data(self) -> Optional[pulumi.Input[str]]:
"""
A [cloud-init][cloudinit] configuration to apply when creating Compute instances. Whenever possible don't base64-encode neither gzip it yourself, as this will be automatically taken care of on your behalf by the provider.
"""
return pulumi.get(self, "user_data")
@user_data.setter
def user_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_data", value)
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of Instance Pool members (Compute instance IDs).
"""
return pulumi.get(self, "virtual_machines")
@virtual_machines.setter
def virtual_machines(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "virtual_machines", value)
@pulumi.input_type
class _InstancePoolState:
def __init__(__self__, *,
affinity_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
deploy_target_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
elastic_ip_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_prefix: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
ipv6: Optional[pulumi.Input[bool]] = None,
key_pair: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
network_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_offering: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
state: Optional[pulumi.Input[str]] = None,
template_id: Optional[pulumi.Input[str]] = None,
user_data: Optional[pulumi.Input[str]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering InstancePool resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] affinity_group_ids: A list of [Anti-Affinity Group][r-affinity] IDs (at creation time only).
:param pulumi.Input[str] deploy_target_id: A Deploy Target ID.
:param pulumi.Input[str] description: The description of the Instance Pool.
:param pulumi.Input[int] disk_size: The managed Compute instances disk size.
:param pulumi.Input[Sequence[pulumi.Input[str]]] elastic_ip_ids: A list of [Elastic IP][eip-doc] IDs.
:param pulumi.Input[str] instance_prefix: The string to | |
#
# COPYRIGHT (C) 2012-2013 TCS Ltd
#
"""
.. module:: snpdb
:platform: Unix, Windows, MacOSX
:synopsis: Class for accessing SNP information from an sqlite
database
.. moduleauthor:: <NAME> (<EMAIL>); modified by <EMAIL>
Class for accessing variant information from an sqlite database. This is
primarily intended for reference databases of variants such as dbSNP,
1000 Genomes, etc.
"""
from gcn.etc import dbconfig
from gcn.lib.io import db
import os
from collections import namedtuple
from gcn.lib.databases.ready2upload import lib_clinvar
BENIGN,UNKNOWN2,PATHOGENIC = range(-1,2)
clitag = {BENIGN:'benign',UNKNOWN2:'vus',PATHOGENIC:'pathogenic'}
def classify_clnsig(clinvar_sigs):
if '2' in clinvar_sigs:
if '5' in clinvar_sigs:
return UNKNOWN2
else:
return BENIGN
elif '5' in clinvar_sigs:
return PATHOGENIC
elif '3' in clinvar_sigs:
if '4' in clinvar_sigs:
return UNKNOWN2
else:
return BENIGN
elif '4' in clinvar_sigs:
return PATHOGENIC
else:
return UNKNOWN2
class SNPDB(db.DB):
"""Class to retrieve SNP information from a SNP database
The class makes the following assumptions
1. The name of the table is snps
2. The table defines the following columns
a) chrom
b) pos
c) id
d) ref
e) alt
The class is initialized with the name of the database to connect to.
The name should be defined in `gcn.etc.dbconfig.DBCONFIG`. Alternatively
the full path qualified name of the database may be passed
"""
def __init__(self, name):
"""Class initialization
Argument
name (string): Name of the database or filename for the database
"""
self.name = name
super(SNPDB, self).__init__()
if name in dbconfig.DBCONFIG:
self.load(name=name)
elif os.path.exists(name):
self.load(db=name)
else:
raise ValueError('No such database %s' % name)
def snp_by_location(self, chromosome, position):
"""Retrieve details of a SNP associated with a given
chromosomal location.
Args:
chromosome (str): Chromosome name in the form chrN where
N=1-23,X,Y
position (integer): Location on the chromosome, using 0-based
indexing
Returns:
SNP associated with the position as an instance of a `SNP`
namedtuple
"""
stmt = 'SELECT * FROM snps WHERE chrom=(?) AND pos=(?)'
if chromosome[:3] == 'chr':
chromosome = chromosome[3:]
if hasattr(self,'SNP'):
C = self.SNP
else:
C = self.SNP = self.make_namedtuple('snps', 'SNP')
return [C._make(r) for r in self.execute(stmt, (chromosome, position))]
def has_snp(self, chromosome, position, ref=None, alt=None):
"""Check if a SNP is present in the database given postion
and location. Optionally the ref and alt allele may also be
specified.
Args:
chromosome(str): Chromosome name in the form chrN where
N=1-23,X,Y
position (integer): Location on the chromosome, using 0-based
indexing
ref (str): Reference Allele (Optional)
alt (str): Alternate Allele (Optional)
Returns:
True if the SNP is present and False otherwise
"""
if chromosome[:3] == 'chr':
chromosome = chromosome[3:]
args = (chromosome, position)
stmt = 'SELECT * FROM snps WHERE chrom=(?) AND pos=(?)'
if ref is not None:
stmt += ' AND ref=(?)'
args = args + (ref,)
if alt is not None:
stmt += ' AND alt=(?) limit 1'
args = args + (alt,)
results = self.execute(stmt, args).fetchone()
if results:
return True
else:
return False
def get_meta_info(self, attrib_id):
"""Returns the meta data for the given attribute id.
Args:
attrib_id(str): VCF Attribute Identifier
Eg. AF, GMAF, OM etc
Returns:
Named Tuple comprising of the fields -
id, count, type, description
"""
TYPEMAP = {'int': 'Integer',
'float': 'Float',
'text': 'String'}
meta_info = self.make_namedtuple('info', 'metainfo')
stmt = 'SELECT * FROM info WHERE id=(?)'
c = self.conn.cursor()
r = c.execute(stmt, ('info' + '_' + attrib_id, )).fetchall()
if r:
idfr, idfr_type, idfr_count, idfr_desc = r[0]
ctype = TYPEMAP[idfr_type]
if ctype == 'Integer':
if idfr_count == '0':
ctype = 'Flag'
return meta_info._make((attrib_id, ctype, idfr_count, idfr_desc))
else:
return
def iterate_db(self, chromosome):
"""Retrieve the SNPs present in the given chromosome.
Args:
chromosome (str): Chromosome name in the form chrN where
N=1-23,X,Y
Returns:
SNP associated with the position as an instance of a `SNP`
namedtuple
"""
stmt = "SELECT * FROM snps WHERE chrom='%s'"
if chromosome[:3] == 'chr':
chromosome = chromosome[3:]
if hasattr(self,'SNP'):
C = self.SNP
else:
C = self.SNP = self.make_namedtuple('snps', 'SNP')
for r in self.execute(stmt % chromosome):
yield C._make(r)
def is_clinically_associated(self, rsid):
"""This method is valid only for querying Clinvardb.
Check if a SNP is present in the database and annotated with
disease causing annotation. For this the 'CLNDBN' tag is checked
if its value is not '.' and instead hold a disease name.
Args:
rsid(str): dbSNP ID
Returns:
True if the SNP is disease associated and False otherwise
"""
stmt = "SELECT * FROM snps WHERE id='%s' and info_CLNDBN != '.'"\
% (rsid)
results = self.execute(stmt).fetchone()
if results:
return True
else:
return False
def select_all_hgmd(self):
""" This method is valid only for querying Clinvardb.
select variant key and clnsig (benign or pathogenic only)
"""
if self.name!='HGMDDB':
raise EnvironmentError('this method only supports for hgmddb')
outs = []
stmt = "SELECT chrom, pos, id, ref, alt, info_VC from snps"
for chrm,pos,id,ref,alt,clisig in self.execute(stmt).fetchall():
if clisig in ['FP','R']:
sig='benign_%s'%self.name
elif clisig in ['DM','DM?']:
sig='pathogenic_%s'%self.name
else:
sig='vus_%s'%self.name
outs.append([chrm,pos,id,ref,alt,sig])
return outs
def select_all_clinvar(self):
""" This method is valid only for querying Clinvardb.
select variant key and clnsig (benign or pathogenic only)
"""
outs = []
if self.name!='CLINVARDB':
raise EnvironmentError('this method only supports for clinvardb')
stmt = "SELECT chrom, pos, id, ref, alt, info_CLNSIG from snps"
for chrm,pos,id,ref,alt,clisig in self.execute(stmt).fetchall():
sigclass = classify_clnsig(clisig.split('|'))
#print '%s|%s'%(sigclass,self.name) #debug
sig='%s_%s'%(clitag[sigclass],self.name)
outs.append([chrm,pos,id,ref,alt,sig])
return outs
def select_snps(self,min_maf,max_maf,sample_rate=1.0,snp_tag='.'):
import random
sampling = False
num2 = {}
if sample_rate < 1.0:
sampling = True
stmt = "select count(*) from snps where info_AF > %g and info_AF < %g" % (min_maf,max_maf)
outs = self.execute(stmt).fetchone()
R = int(outs[0])
R1 = int(round(sample_rate*R))
print 'sampling %d out of %d'%(R1,R)
nums = range(R)
random.shuffle(nums)
nums = nums[:R1]
num2 = {}
for j in nums:
num2[j]=True
stmt = "select chrom, pos, id, ref, alt, info_AF from snps where info_AF > %g and info_AF < %g" % (min_maf,max_maf)
r = 0
outs = []
print 'selecting ...'
for chrom,pos,id,ref,alt,maf in self.execute(stmt).fetchall():
if not sampling or (sampling and r in num2):
outs.append([chrom,pos,id,ref,alt,snp_tag])
r += 1
print 'done.'
return outs
def get_hgmd_genes(self, clnsig2sels=['DM','DM?']):
""" This method return (genes) for HGMD
Args:
type (str): types of clinical significance
Returns:
genes (tuple): gene1,gene2,...
"""
if self.name!='HGMDDB':
raise EnvironmentError('this method only supports for hgmddb')
genes = []
stmt = "SELECT info_Gene, info_VC FROM snps"
results = self.execute(stmt).fetchall()
if results:
for gene,vc in results:
if vc in clnsig2sels:
if gene not in genes:
genes.append(gene)
return tuple(genes)
def get_clinvar_genes(self, clnsig2sels=['pathogenic']):
""" This method return (genes) for Clinvar
Args:
type (str): types of clinical significance
Returns:
genes (tuple): gene1,gene2,...
"""
if self.name!='CLINVARDB':
raise EnvironmentError('this method only supports for clinvardb')
genes = []
stmt = "SELECT info_CLNSIG, info_GENEINFO FROM snps"
results = self.execute(stmt).fetchall()
if results:
for clisig, geneinfo in results:
if clisig and geneinfo:
sigclass = classify_clnsig(clisig.split('|'))
if clitag[sigclass] in clnsig2sels:
gene = geneinfo.split(':')[0]
if gene not in genes:
genes.append(gene)
return tuple(genes)
def get_snp_coord(self, snpid):
"""This method return (chrom, pos, ref, alts) for the given
snpid.
Args:
snpid(str): dbSNP ID
Returns:
snp_coord(tuple): (chrom, pos, ref, [alt1, alt2, ])
"""
stmt = "SELECT chrom, pos, ref, alt FROM snps WHERE id='%s' limit 1"\
% (snpid)
results = self.execute(stmt).fetchone()
if results:
alts = results[3].split(',')
results = list(results)
results[3] = alts
return tuple(results)
else:
return None
# for now the following are implemented as functions. If in the future
# there is a need to define methods specific to these databases
# these can be converted to classes while still supporting the current
# API
def KGDB():
"""Return an instance of `SNPDB` configured to search 1000 Genomes
Variations"""
return SNPDB('KGDB')
def DBSNP():
"""Return an instance of `SNPDB` configured to search the NCBI dbSNP
database"""
return SNPDB('DBSNP')
class ClinvarDB(SNPDB):
def __init__(self):
#open db connection
self.name = 'CLINVARDB'
super(ClinvarDB, self).__init__(self.name)
if self.name in dbconfig.DBCONFIG:
self.load(name=self.name)
elif os.path.exists(self.name):
self.load(db=self.name)
else:
raise ValueError('No such database %s' % self.name)
# self.report = ClinvarReport()
def count_lofs(self, goi_tup=None, vartypes={}):
LOF, NSM = range(2)
region_sql = "SELECT chrom, pos, ref, alt, info_DATE, info_NSF, info_NSM, info_NSN, info_PM, info_SPLOC, info_CLNSIG, info_VLD, info_GENEINFO from snps"
fields = "chrom cpos cref calt cdate cnsf cnsm cnsn cpm csploc csig cvld gene".split()
region_tup = namedtuple('region_tup', fields)
if goi_tup:
region_cond_sql = "%s where chrom = (?) and pos between (?) and (?)" % region_sql
results = self.execute(region_cond_sql, (goi_tup.chrom, goi_tup.start_bp, goi_tup.end_bp)).fetchall()
else:
results = self.execute(region_sql).fetchall()
for res in results:
rec = region_tup._make(res)
# create variant position key
pos_key = '%s_%s_%s_%s' | |
file for later
execution.
If you have a Windows system, often these executable machine language programs have a
suffix of .exe or .dll which stand for executable and dynamic link
library respectively. In Linux and Macintosh, there is no suffix that uniquely marks
a file as executable.
If you were to open an executable file in a text editor, it would look
completely crazy and be unreadable:
It is not easy to read or write machine language, so it is nice that we have
compilers that allow us to write in high-level
languages like Python or C.
Now at this point in our discussion of compilers and interpreters, you should
be wondering a bit about the Python interpreter itself. What language is
it written in? Is it written in a compiled language? When we type
python, what exactly is happening?
The Python interpreter is written in a high-level language called C.
You can look at the actual source code for the Python interpreter by
going to www.python.org and working your way to their source code.
So Python is a program itself and it is compiled into machine code.
When you installed Python on your computer (or the vendor installed it),
you copied a machine-code copy of the translated Python program onto your
system. In Windows, the executable machine code for Python itself is likely
in a file.
That is more than you really need to know to be a Python programmer, but
sometimes it pays to answer those little nagging questions right at
the beginning.
Writing a program
Typing commands into the Python interpreter is a great way to experiment
with Python's features, but it is not recommended for solving more complex problems.
When we want to write a program,
we use a text editor to write the Python instructions into a file,
which is called a script. By
convention, Python scripts have names that end with .py.
script
To execute the script, you have to tell the Python interpreter
the name of the file. In a Unix or Windows command window,
you would type python hello.py as follows:
We call the Python interpreter and tell it to read its source code from
the file hello.py instead of prompting us for lines of Python code
interactively.
You will notice that there was no need to have quit() at the end of
the Python program in the file. When Python is reading your source code
from a file, it knows to stop when it reaches the end of the file.
9006 What is a program?
The definition of a program at its most basic is a sequence
of Python statements that have been crafted to do something.
Even our simple hello.py script is a program. It is a one-line
program and is not particularly useful, but in the strictest definition,
it is a Python program.
It might be easiest to understand what a program is by thinking about a problem
that a program might be built to solve, and then looking at a program
that would solve that problem.
Lets say you are doing Social Computing research on Facebook posts and
you are interested in the most frequently used word in a series of posts.
You could print out the stream of Facebook posts and pore over the text
looking for the most common word, but that would take a long time and be very
mistake prone. You would be smart to write a Python program to handle the
task quickly and accurately so you can spend the weekend doing something
fun.
For example, look at the following text about a clown and a car. Look at the
text and figure out the most common word and how many times it occurs.
Then imagine that you are doing this task looking at millions of lines of
text. Frankly it would be quicker for you to learn Python and write a
Python program to count the words than it would be to manually
scan the words.
The even better news is that I already came up with a simple program to
find the most common word in a text file. I wrote it,
tested it, and now I am giving it to you to use so you can save some time.
You don't even need to know Python to use this program. You will need to get through
Chapter ten of this book to fully understand the awesome Python techniques that were
used to make the program. You are the end user, you simply use the program and marvel
at its cleverness and how it saved you so much manual effort.
You simply type the code
into a file called words.py and run it or you download the source
code from http://www.py4e.com/code3/ and run it.
This is a good example of how Python and the Python language are acting as an intermediary
between you (the end user) and me (the programmer). Python is a way for us to exchange useful
instruction sequences (i.e., programs) in a common language that can be used by anyone who
installs Python on their computer. So neither of us are talking to Python,
instead we are communicating with each other through Python.
The building blocks of programs
In the next few chapters, we will learn more about the vocabulary, sentence structure,
paragraph structure, and story structure of Python. We will learn about the powerful
capabilities of Python and how to compose those capabilities together to create useful
programs.
There are some low-level conceptual patterns that we use to construct programs. These
constructs are not just for Python programs, they are part of every programming language
from machine language up to the high-level languages.
description
Get data from the outside world. This might be
reading data from a file, or even some kind of sensor like
a microphone or GPS. In our initial programs, our input will come from the user
typing data on the keyboard.
Display the results of the program on a screen
or store them in a file or perhaps write them to a device like a
speaker to play music or speak text.
Perform statements one after
another in the order they are encountered in the script.
Check for certain conditions and
then execute or skip a sequence of statements.
Perform some set of statements
repeatedly, usually with
some variation.
Write a set of instructions once and give them a name
and then reuse those instructions as needed throughout your program.
description
It sounds almost too simple to be true, and of course it is never
so simple. It is like saying that walking is simply
putting one foot in front of the other. The art
of writing a program is composing and weaving these
basic elements together many times over to produce something
that is useful to its users.
The word counting program above directly uses all of
these patterns except for one.
What could possibly go wrong?
As we saw in our earliest conversations with Python, we must
communicate very precisely when we write Python code. The smallest
deviation or mistake will cause Python to give up looking at your
program.
Beginning programmers often take the fact that Python leaves no
room for errors as evidence that Python is mean, hateful, and cruel.
While Python seems to like everyone else, Python knows them
personally and holds a grudge against them. Because of this grudge,
Python takes our perfectly written programs and rejects them as
unfit just to torment us.
There is little to be gained by arguing with Python. It is just a tool.
It has no emotions and it is happy and ready to serve you whenever you
need it. Its error messages sound harsh, but they are just Python's
call for help. It has looked at what you typed, and it simply cannot
understand what you have entered.
Python is much more like a dog, loving you unconditionally, having a few
key words that it understands, looking you with a sweet look on its
face (>>>), and waiting for you to say something it understands.
When Python says SyntaxError: invalid syntax, it is simply wagging
its tail and saying, You seemed to say something but I just don't
understand what you meant, but please keep talking to me (>>>).
As your programs become increasingly sophisticated, you will encounter three
general types of errors:
description
These are the first errors you will make and the easiest
to fix. A syntax error means that you have violated the grammar rules of Python.
Python | |
based on managed rules.
"""
return pulumi.get(self, "tag_key_scope")
@tag_key_scope.setter
def tag_key_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_key_scope", value)
@property
@pulumi.getter(name="tagValueScope")
def tag_value_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
return pulumi.get(self, "tag_value_scope")
@tag_value_scope.setter
def tag_value_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_value_scope", value)
@pulumi.input_type
class _RuleState:
def __init__(__self__, *,
config_rule_trigger_types: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
exclude_resource_ids_scope: Optional[pulumi.Input[str]] = None,
input_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
maximum_execution_frequency: Optional[pulumi.Input[str]] = None,
region_ids_scope: Optional[pulumi.Input[str]] = None,
resource_group_ids_scope: Optional[pulumi.Input[str]] = None,
resource_types_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
risk_level: Optional[pulumi.Input[int]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
scope_compliance_resource_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_detail_message_type: Optional[pulumi.Input[str]] = None,
source_identifier: Optional[pulumi.Input[str]] = None,
source_maximum_execution_frequency: Optional[pulumi.Input[str]] = None,
source_owner: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tag_key_scope: Optional[pulumi.Input[str]] = None,
tag_value_scope: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Rule resources.
:param pulumi.Input[str] config_rule_trigger_types: The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
:param pulumi.Input[str] description: The description of the Config Rule.
:param pulumi.Input[str] exclude_resource_ids_scope: The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, custom rule this field is empty.
:param pulumi.Input[Mapping[str, Any]] input_parameters: Threshold value for managed rule triggering.
:param pulumi.Input[str] maximum_execution_frequency: The frequency of the compliance evaluations, it is required if the ConfigRuleTriggerTypes value is ScheduledNotification. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`.
:param pulumi.Input[str] region_ids_scope: The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[str] resource_group_ids_scope: The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
:param pulumi.Input[Sequence[pulumi.Input[str]]] resource_types_scopes: Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
:param pulumi.Input[int] risk_level: The risk level of the Config Rule. Valid values: `1`: Critical ,`2`: Warning , `3`: Info.
:param pulumi.Input[str] rule_name: The name of the Config Rule.
:param pulumi.Input[Sequence[pulumi.Input[str]]] scope_compliance_resource_types: Field `scope_compliance_resource_types` has been deprecated from provider version 1.124.1. New field `resource_types_scope` instead.
:param pulumi.Input[str] source_detail_message_type: Field `source_detail_message_type` has been deprecated from provider version 1.124.1. New field `config_rule_trigger_types` instead.
:param pulumi.Input[str] source_identifier: The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
:param pulumi.Input[str] source_maximum_execution_frequency: Field `source_maximum_execution_frequency` has been deprecated from provider version 1.124.1. New field `maximum_execution_frequency` instead.
:param pulumi.Input[str] source_owner: Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and you own the rule. `ALIYUN`: The rule is a managed rule and Alibaba Cloud owns the rule.
:param pulumi.Input[str] tag_key_scope: The rule monitors the tag key, only applies to rules created based on managed rules.
:param pulumi.Input[str] tag_value_scope: The rule monitors the tag value, use with the TagKeyScope options. only applies to rules created based on managed rules.
"""
if config_rule_trigger_types is not None:
pulumi.set(__self__, "config_rule_trigger_types", config_rule_trigger_types)
if description is not None:
pulumi.set(__self__, "description", description)
if exclude_resource_ids_scope is not None:
pulumi.set(__self__, "exclude_resource_ids_scope", exclude_resource_ids_scope)
if input_parameters is not None:
pulumi.set(__self__, "input_parameters", input_parameters)
if maximum_execution_frequency is not None:
pulumi.set(__self__, "maximum_execution_frequency", maximum_execution_frequency)
if region_ids_scope is not None:
pulumi.set(__self__, "region_ids_scope", region_ids_scope)
if resource_group_ids_scope is not None:
pulumi.set(__self__, "resource_group_ids_scope", resource_group_ids_scope)
if resource_types_scopes is not None:
pulumi.set(__self__, "resource_types_scopes", resource_types_scopes)
if risk_level is not None:
pulumi.set(__self__, "risk_level", risk_level)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if scope_compliance_resource_types is not None:
warnings.warn("""Field 'scope_compliance_resource_types' has been deprecated from provider version 1.124.1. New field 'resource_types_scope' instead.""", DeprecationWarning)
pulumi.log.warn("""scope_compliance_resource_types is deprecated: Field 'scope_compliance_resource_types' has been deprecated from provider version 1.124.1. New field 'resource_types_scope' instead.""")
if scope_compliance_resource_types is not None:
pulumi.set(__self__, "scope_compliance_resource_types", scope_compliance_resource_types)
if source_detail_message_type is not None:
warnings.warn("""Field 'source_detail_message_type' has been deprecated from provider version 1.124.1. New field 'config_rule_trigger_types' instead.""", DeprecationWarning)
pulumi.log.warn("""source_detail_message_type is deprecated: Field 'source_detail_message_type' has been deprecated from provider version 1.124.1. New field 'config_rule_trigger_types' instead.""")
if source_detail_message_type is not None:
pulumi.set(__self__, "source_detail_message_type", source_detail_message_type)
if source_identifier is not None:
pulumi.set(__self__, "source_identifier", source_identifier)
if source_maximum_execution_frequency is not None:
warnings.warn("""Field 'source_maximum_execution_frequency' has been deprecated from provider version 1.124.1. New field 'maximum_execution_frequency' instead.""", DeprecationWarning)
pulumi.log.warn("""source_maximum_execution_frequency is deprecated: Field 'source_maximum_execution_frequency' has been deprecated from provider version 1.124.1. New field 'maximum_execution_frequency' instead.""")
if source_maximum_execution_frequency is not None:
pulumi.set(__self__, "source_maximum_execution_frequency", source_maximum_execution_frequency)
if source_owner is not None:
pulumi.set(__self__, "source_owner", source_owner)
if status is not None:
pulumi.set(__self__, "status", status)
if tag_key_scope is not None:
pulumi.set(__self__, "tag_key_scope", tag_key_scope)
if tag_value_scope is not None:
pulumi.set(__self__, "tag_value_scope", tag_value_scope)
@property
@pulumi.getter(name="configRuleTriggerTypes")
def config_rule_trigger_types(self) -> Optional[pulumi.Input[str]]:
"""
The trigger type of the rule. Valid values: `ConfigurationItemChangeNotification`: The rule is triggered upon configuration changes. `ScheduledNotification`: The rule is triggered as scheduled.
"""
return pulumi.get(self, "config_rule_trigger_types")
@config_rule_trigger_types.setter
def config_rule_trigger_types(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_rule_trigger_types", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Config Rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="excludeResourceIdsScope")
def exclude_resource_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors excluded resource IDs, multiple of which are separated by commas, only applies to rules created based on managed rules, custom rule this field is empty.
"""
return pulumi.get(self, "exclude_resource_ids_scope")
@exclude_resource_ids_scope.setter
def exclude_resource_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exclude_resource_ids_scope", value)
@property
@pulumi.getter(name="inputParameters")
def input_parameters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Threshold value for managed rule triggering.
"""
return pulumi.get(self, "input_parameters")
@input_parameters.setter
def input_parameters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "input_parameters", value)
@property
@pulumi.getter(name="maximumExecutionFrequency")
def maximum_execution_frequency(self) -> Optional[pulumi.Input[str]]:
"""
The frequency of the compliance evaluations, it is required if the ConfigRuleTriggerTypes value is ScheduledNotification. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, `TwentyFour_Hours`.
"""
return pulumi.get(self, "maximum_execution_frequency")
@maximum_execution_frequency.setter
def maximum_execution_frequency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum_execution_frequency", value)
@property
@pulumi.getter(name="regionIdsScope")
def region_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors region IDs, separated by commas, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "region_ids_scope")
@region_ids_scope.setter
def region_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region_ids_scope", value)
@property
@pulumi.getter(name="resourceGroupIdsScope")
def resource_group_ids_scope(self) -> Optional[pulumi.Input[str]]:
"""
The rule monitors resource group IDs, separated by commas, only applies to rules created based on managed rules.
"""
return pulumi.get(self, "resource_group_ids_scope")
@resource_group_ids_scope.setter
def resource_group_ids_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_ids_scope", value)
@property
@pulumi.getter(name="resourceTypesScopes")
def resource_types_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Resource types to be evaluated. [Alibaba Cloud services that support Cloud Config.](https://www.alibabacloud.com/help/en/doc-detail/127411.htm)
"""
return pulumi.get(self, "resource_types_scopes")
@resource_types_scopes.setter
def resource_types_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "resource_types_scopes", value)
@property
@pulumi.getter(name="riskLevel")
def risk_level(self) -> Optional[pulumi.Input[int]]:
"""
The risk level of the Config Rule. Valid values: `1`: Critical ,`2`: Warning , `3`: Info.
"""
return pulumi.get(self, "risk_level")
@risk_level.setter
def risk_level(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "risk_level", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Config Rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter(name="scopeComplianceResourceTypes")
def scope_compliance_resource_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Field `scope_compliance_resource_types` has been deprecated from provider version 1.124.1. New field `resource_types_scope` instead.
"""
return pulumi.get(self, "scope_compliance_resource_types")
@scope_compliance_resource_types.setter
def scope_compliance_resource_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "scope_compliance_resource_types", value)
@property
@pulumi.getter(name="sourceDetailMessageType")
def source_detail_message_type(self) -> Optional[pulumi.Input[str]]:
"""
Field `source_detail_message_type` has been deprecated from provider version 1.124.1. New field `config_rule_trigger_types` instead.
"""
return pulumi.get(self, "source_detail_message_type")
@source_detail_message_type.setter
def source_detail_message_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_detail_message_type", value)
@property
@pulumi.getter(name="sourceIdentifier")
def source_identifier(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the rule. For a managed rule, the value is the name of the managed rule. For a custom rule, the value is the ARN of the custom rule. Using managed rules, refer to [List of Managed rules.](https://www.alibabacloud.com/help/en/doc-detail/127404.htm)
"""
return pulumi.get(self, "source_identifier")
@source_identifier.setter
def source_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_identifier", value)
@property
@pulumi.getter(name="sourceMaximumExecutionFrequency")
def source_maximum_execution_frequency(self) -> Optional[pulumi.Input[str]]:
"""
Field `source_maximum_execution_frequency` has been deprecated from provider version 1.124.1. New field `maximum_execution_frequency` instead.
"""
return pulumi.get(self, "source_maximum_execution_frequency")
@source_maximum_execution_frequency.setter
def source_maximum_execution_frequency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_maximum_execution_frequency", value)
@property
@pulumi.getter(name="sourceOwner")
def source_owner(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether you or Alibaba Cloud owns and manages the rule. Valid values: `CUSTOM_FC`: The rule is a custom rule and | |
1.1155787450000001)]),
('P',
[(5.5831385290000002, -0.1283927634),
(2.174045204, 0.5852047641),
(0.95702005089999997, 0.54394420399999999)]),
('D',
[(5.5831385290000002, 0.12506621379999999),
(2.174045204, 0.66867855769999995),
(0.95702005089999997, 0.30524682450000001)]),
('S',
[(0.62358164199999999, -0.38426426070000003),
(0.3357601616, -0.1972567438),
(0.1483111678, 1.3754955120000001)]),
('P',
[(0.62358164199999999, -0.34816915259999998),
(0.3357601616, 0.62903236900000004),
(0.1483111678, 0.66628327430000001)])],
51: [('S',
[(5586.9870019999998, 0.15432896730000001),
(1017.676657, 0.53532814230000003),
(275.42301889999999, 0.44463454219999998)]),
('S',
[(492.19248879999998, -0.099967229199999993),
(114.37494940000001, 0.3995128261),
(37.198283359999998, 0.70011546889999998)]),
('P',
[(492.19248879999998, 0.15591627499999999),
(114.37494940000001, 0.60768371860000003),
(37.198283359999998, 0.3919573931)]),
('S',
[(62.521797749999998, -0.2277635023),
(19.07114112, 0.21754360440000001),
(7.3582391310000004, 0.91667696109999997)]),
('P',
[(62.521797749999998, 0.0049515111999999997),
(19.07114112, 0.57776646909999996),
(7.3582391310000004, 0.4846460366)]),
('D',
[(62.521797749999998, 0.2197679508),
(19.07114112, 0.65554736270000002),
(7.3582391310000004, 0.28657325900000002)]),
('S',
[(6.1206931490000001, -0.33061006259999998),
(2.383366187, 0.057610953399999998),
(1.0491636630000001, 1.1155787450000001)]),
('P',
[(6.1206931490000001, -0.1283927634),
(2.383366187, 0.5852047641),
(1.0491636630000001, 0.54394420399999999)]),
('D',
[(6.1206931490000001, 0.12506621379999999),
(2.383366187, 0.66867855769999995),
(1.0491636630000001, 0.30524682450000001)]),
('S',
[(0.65292269280000004, -0.38426426070000003),
(0.3515585034, -0.1972567438),
(0.1552895732, 1.3754955120000001)]),
('P',
[(0.65292269280000004, -0.34816915259999998),
(0.3515585034, 0.62903236900000004),
(0.1552895732, 0.66628327430000001)])],
52: [('S',
[(5810.0615909999997, 0.15432896730000001),
(1058.309972, 0.53532814230000003),
(286.4199797, 0.44463454219999998)]),
('S',
[(512.75419199999999, -0.099967229199999993),
(119.15304709999999, 0.3995128261),
(38.752268999999998, 0.70011546889999998)]),
('P',
[(512.75419199999999, 0.15591627499999999),
(119.15304709999999, 0.60768371860000003),
(38.752268999999998, 0.3919573931)]),
('S',
[(65.985562270000003, -0.2277635023),
(20.127699700000001, 0.21754360440000001),
(7.765892279, 0.91667696109999997)]),
('P',
[(65.985562270000003, 0.0049515111999999997),
(20.127699700000001, 0.57776646909999996),
(7.765892279, 0.4846460366)]),
('D',
[(65.985562270000003, 0.2197679508),
(20.127699700000001, 0.65554736270000002),
(7.765892279, 0.28657325900000002)]),
('S',
[(6.7079569210000001, -0.33061006259999998),
(2.6120436549999999, 0.057610953399999998),
(1.149828048, 1.1155787450000001)]),
('P',
[(6.7079569210000001, -0.1283927634),
(2.6120436549999999, 0.5852047641),
(1.149828048, 0.54394420399999999)]),
('D',
[(6.7079569210000001, 0.12506621379999999),
(2.6120436549999999, 0.66867855769999995),
(1.149828048, 0.30524682450000001)]),
('S',
[(0.70127134830000004, -0.38426426070000003),
(0.37759126529999998, -0.1972567438),
(0.16678870200000001, 1.3754955120000001)]),
('P',
[(0.70127134830000004, -0.34816915259999998),
(0.37759126529999998, 0.62903236900000004),
(0.16678870200000001, 0.66628327430000001)])],
53: [('S',
[(6035.1836229999999, 0.15432896730000001),
(1099.316231, 0.53532814230000003),
(297.5178737, 0.44463454219999998)]),
('S',
[(533.73664180000003, -0.099967229199999993),
(124.0289171, 0.3995128261),
(40.338053279999997, 0.70011546889999998)]),
('P',
[(533.73664180000003, 0.15591627499999999),
(124.0289171, 0.60768371860000003),
(40.338053279999997, 0.3919573931)]),
('S',
[(69.54270545, -0.2277635023),
(21.212741749999999, 0.21754360440000001),
(8.1845352340000002, 0.91667696109999997)]),
('P',
[(69.54270545, 0.0049515111999999997),
(21.212741749999999, 0.57776646909999996),
(8.1845352340000002, 0.4846460366)]),
('D',
[(69.54270545, 0.2197679508),
(21.212741749999999, 0.65554736270000002),
(8.1845352340000002, 0.28657325900000002)]),
('S',
[(7.2959911960000001, -0.33061006259999998),
(2.8410211539999999, 0.057610953399999998),
(1.2506245060000001, 1.1155787450000001)]),
('P',
[(7.2959911960000001, -0.1283927634),
(2.8410211539999999, 0.5852047641),
(1.2506245060000001, 0.54394420399999999)]),
('D',
[(7.2959911960000001, 0.12506621379999999),
(2.8410211539999999, 0.66867855769999995),
(1.2506245060000001, 0.30524682450000001)]),
('S',
[(0.79003645820000001, -0.38426426070000003),
(0.42538578919999998, -0.1972567438),
(0.1879003836, 1.3754955120000001)]),
('P',
[(0.79003645820000001, -0.34816915259999998),
(0.42538578919999998, 0.62903236900000004),
(0.1879003836, 0.66628327430000001)])]}
g631ss = \
{1: [('S',
[(18.731137, 0.033494599999999999),
(2.8253936999999998, 0.23472694999999999),
(0.64012170000000002, 0.81375732999999995)]),
('S', [(0.1612778, 1.0)]),
('P', [(1.1000000000000001, 1.0)])],
2: [('S',
[(38.421633999999997, 0.023765999999999999),
(5.7780300000000002, 0.15467900000000001),
(1.2417739999999999, 0.46962999999999999)]),
('S', [(0.29796400000000001, 1.0)]),
('P', [(1.1000000000000001, 1.0)])],
3: [('S',
[(642.41891999999996, 0.0021426000000000001),
(96.798514999999995, 0.016208899999999998),
(22.091121000000001, 0.077315599999999998),
(6.2010702999999996, 0.245786),
(1.9351176999999999, 0.47018900000000002),
(0.63673579999999996, 0.34547080000000002)]),
('S',
[(2.3249184000000001, -0.035091700000000003),
(0.63243059999999995, -0.19123280000000001),
(0.079053399999999996, 1.0839878000000001)]),
('P',
[(2.3249184000000001, 0.0089414999999999998),
(0.63243059999999995, 0.14100950000000001),
(0.079053399999999996, 0.94536370000000003)]),
('S', [(0.035962000000000001, 1.0)]),
('P', [(0.035962000000000001, 1.0)]),
('D', [(0.20000000000000001, 1.0)])],
4: [('S',
[(1264.5857000000001, 0.0019448),
(189.93681000000001, 0.0148351),
(43.159089000000002, 0.072090600000000005),
(12.098663, 0.23715420000000001),
(3.8063232, 0.46919870000000002),
(1.2728903, 0.35652020000000001)]),
('S',
[(3.1964630999999999, -0.1126487),
(0.74781330000000001, -0.2295064),
(0.2199663, 1.1869167)]),
('P',
[(3.1964630999999999, 0.055980200000000001),
(0.74781330000000001, 0.26155060000000002),
(0.2199663, 0.79397229999999996)]),
('S', [(0.082309900000000005, 1.0)]),
('P', [(0.082309900000000005, 1.0)]),
('D', [(0.40000000000000002, 1.0)])],
5: [('S',
[(2068.8823000000002, 0.0018663),
(310.64956999999998, 0.0142515),
(70.683032999999995, 0.069551600000000005),
(19.861080000000001, 0.2325729),
(6.2993047999999998, 0.46707870000000001),
(2.127027, 0.36343140000000002)]),
('S',
[(4.7279710000000001, -0.1303938),
(1.1903376999999999, -0.13078890000000001),
(0.3594117, 1.1309444)]),
('P',
[(4.7279710000000001, 0.0745976),
(1.1903376999999999, 0.30784669999999997),
(0.3594117, 0.74345680000000003)]),
('S', [(0.12675120000000001, 1.0)]),
('P', [(0.12675120000000001, 1.0)]),
('D', [(0.59999999999999998, 1.0)])],
6: [('S',
[(3047.5248999999999, 0.0018347000000000001),
(457.36950999999999, 0.014037300000000001),
(103.94869, 0.068842600000000004),
(29.210155, 0.23218440000000001),
(9.2866630000000008, 0.4679413),
(3.1639270000000002, 0.36231200000000002)]),
('S',
[(7.8682724000000004, -0.11933240000000001),
(1.8812884999999999, -0.1608542),
(0.54424930000000005, 1.1434564)]),
('P',
[(7.8682724000000004, 0.068999099999999994),
(1.8812884999999999, 0.31642399999999998),
(0.54424930000000005, 0.74430830000000003)]),
('S', [(0.16871439999999999, 1.0)]),
('P', [(0.16871439999999999, 1.0)]),
('D', [(0.80000000000000004, 1.0)])],
7: [('S',
[(4173.5110000000004, 0.0018347999999999999),
(627.4579, 0.013995),
(142.90209999999999, 0.068586999999999995),
(40.23433, 0.232241),
(12.820209999999999, 0.46906999999999999),
(4.3904370000000004, 0.36045500000000003)]),
('S',
[(11.626358, -0.11496099999999999),
(2.7162799999999998, -0.16911799999999999),
(0.77221799999999996, 1.1458520000000001)]),
('P',
[(11.626358, 0.067580000000000001),
(2.7162799999999998, 0.323907),
(0.77221799999999996, 0.74089499999999997)]),
('S', [(0.21203130000000001, 1.0)]),
('P', [(0.21203130000000001, 1.0)]),
('D', [(0.80000000000000004, 1.0)])],
8: [('S',
[(5484.6716999999999, 0.0018311),
(825.23495000000003, 0.0139501),
(188.04696000000001, 0.068445099999999995),
(52.964500000000001, 0.23271430000000001),
(16.897570000000002, 0.47019300000000003),
(5.7996353000000003, 0.35852089999999998)]),
('S',
[(15.539616000000001, -0.1107775),
(3.5999336, -0.1480263),
(1.0137617999999999, 1.1307670000000001)]),
('P',
[(15.539616000000001, 0.070874300000000001),
(3.5999336, 0.33975280000000002),
(1.0137617999999999, 0.72715859999999999)]),
('S', [(0.27000580000000002, 1.0)]),
('P', [(0.27000580000000002, 1.0)]),
('D', [(0.80000000000000004, 1.0)])],
9: [('S',
[(7001.7130900000002, 0.0018196168999999999),
(1051.36609, 0.013916079600000001),
(239.28568999999999, 0.068405324500000003),
(67.397445300000001, 0.23318575999999999),
(21.519957300000002, 0.47126743900000001),
(7.4031013000000003, 0.35661854599999998)]),
('S',
[(20.847952800000002, -0.10850697500000001),
(4.80830834, -0.14645165800000001),
(1.3440698600000001, 1.1286885799999999)]),
('P',
[(20.847952800000002, 0.071628724300000002),
(4.80830834, 0.34591210300000003),
(1.3440698600000001, 0.72246995700000005)]),
('S', [(0.35815139299999998, 1.0)]),
('P', [(0.35815139299999998, 1.0)]),
('D', [(0.80000000000000004, 1.0)])],
10: [('S',
[(8425.8515299999999, 0.0018843480999999999),
(1268.5193999999999, 0.0143368994),
(289.62141400000002, 0.070109623300000007),
(81.859003999999999, 0.237373266),
(26.2515079, 0.473007126),
(9.0947205100000001, 0.34840124099999997)]),
('S',
[(26.532131, -0.10711828700000001),
(6.1017550099999998, -0.146163821),
(1.69627153, 1.1277735)]),
('P',
[(26.532131, 0.071909588499999996),
(6.1017550099999998, 0.34951337199999999),
(1.69627153, 0.71994051199999998)]),
('S', [(0.44581870000000001, 1.0)]),
('P', [(0.44581870000000001, 1.0)]),
('D', [(0.80000000000000004, 1.0)])],
11: [('S',
[(9993.2000000000007, 0.0019377000000000001),
(1499.8900000000001, 0.014807000000000001),
(341.95100000000002, 0.072706000000000007),
(94.679699999999997, 0.25262899999999999),
(29.734500000000001, 0.49324200000000001),
(10.0063, 0.31316899999999998)]),
('S',
[(150.96299999999999, -0.0035420999999999998),
(35.587800000000001, -0.043958999999999998),
(11.1683, -0.10975210000000001),
(3.9020100000000002, 0.18739800000000001),
(1.3817699999999999, 0.64669900000000002),
(0.46638200000000002, 0.306058)]),
('P',
[(150.96299999999999, 0.0050017000000000004),
(35.587800000000001, 0.035511000000000001),
(11.1683, 0.14282500000000001),
(3.9020100000000002, 0.33861999999999998),
(1.3817699999999999, 0.45157900000000001),
(0.46638200000000002, 0.27327099999999999)]),
('S',
[(0.49796600000000002, -0.248503),
(0.084352999999999997, -0.13170399999999999),
(0.066635, 1.2335199999999999)]),
('P',
[(0.49796600000000002, -0.023022999999999998),
(0.084352999999999997, 0.95035899999999995),
(0.066635, 0.059858000000000001)]),
('S', [(0.025954399999999999, 1.0)]),
('P', [(0.025954399999999999, 1.0)]),
('D', [(0.17499999999999999, 1.0)])],
12: [('S',
[(11722.799999999999, 0.0019778),
(1759.9300000000001, 0.015114000000000001),
(400.846, 0.073911000000000004),
(112.807, 0.249191),
(35.999699999999997, 0.48792799999999997),
(12.1828, 0.319662)]),
('S',
[(189.18000000000001, -0.0032372),
(45.2119, -0.041008000000000003),
(14.356299999999999, -0.11260000000000001),
(5.1388600000000002, 0.14863299999999999),
(1.90652, 0.61649699999999996),
(0.70588700000000004, 0.36482900000000001)]),
('P',
[(189.18000000000001, 0.0049281000000000004),
(45.2119, 0.034988999999999999),
(14.356299999999999, 0.14072499999999999),
(5.1388600000000002, 0.33364199999999999),
(1.90652, 0.44494),
(0.70588700000000004, 0.26925399999999999)]),
('S',
[(0.92934000000000005, -0.21229000000000001),
(0.26903500000000002, -0.107985),
(0.117379, 1.17584)]),
('P',
[(0.92934000000000005, -0.022419000000000001),
(0.26903500000000002, 0.19227),
(0.117379, 0.84618099999999996)]),
('S', [(0.0421061, 1.0)]),
('P', [(0.0421061, 1.0)]),
('D', [(0.17499999999999999, 1.0)])],
13: [('S',
[(13983.1, 0.0019426700000000001),
(2098.75, 0.014859900000000001),
(477.70499999999998, 0.072849399999999995),
(134.36000000000001, 0.24682999999999999),
(42.870899999999999, 0.48725800000000002),
(14.5189, 0.32349600000000001)]),
('S',
[(239.66800000000001, -0.00292619),
(57.441899999999997, -0.037407999999999997),
(18.285900000000002, -0.11448700000000001),
(6.5991400000000002, 0.115635),
(2.4904899999999999, 0.612595),
(0.94454000000000005, 0.39379900000000001)]),
('P',
[(239.66800000000001, 0.0046028500000000003),
(57.441899999999997, 0.033198999999999999),
(18.285900000000002, 0.13628199999999999),
(6.5991400000000002, 0.33047599999999999),
(2.4904899999999999, 0.44914599999999999),
(0.94454000000000005, 0.265704)]),
('S',
[(1.2779, -0.227606),
(0.39759, 0.0014458299999999999),
(0.16009499999999999, 1.0927899999999999)]),
('P',
[(1.2779, -0.017513000000000001),
(0.39759, 0.244533),
(0.16009499999999999, 0.80493400000000004)]),
('S', [(0.055657699999999997, 1.0)]),
('P', [(0.055657699999999997, 1.0)]),
('D', [(0.32500000000000001, 1.0)])],
14: [('S',
[(16115.9, 0.00195948),
(2425.5799999999999, 0.014928800000000001),
(553.86699999999996, 0.072847800000000004),
(156.34, 0.24612999999999999),
(50.068300000000001, 0.48591400000000001),
(17.017800000000001, 0.32500200000000001)]),
('S',
[(292.71800000000002, -0.00278094),
(69.873099999999994, -0.035714599999999999),
(22.336300000000001, -0.114985),
(8.1503899999999998, 0.093563400000000005),
(3.1345800000000001, 0.60301700000000003),
(1.22543, 0.41895900000000003)]),
('P',
[(292.71800000000002, 0.0044382600000000003),
(69.873099999999994, 0.0326679),
(22.336300000000001, 0.13472100000000001),
(8.1503899999999998, 0.32867800000000003),
(3.1345800000000001, 0.44963999999999998),
(1.22543, 0.26137199999999999)]),
('S',
[(1.7273799999999999, -0.24462999999999999),
(0.57292200000000004, 0.0043157200000000003),
(0.222192, 1.0981799999999999)]),
('P',
[(1.7273799999999999, -0.017795100000000001),
(0.57292200000000004, 0.25353900000000001),
(0.222192, 0.80066899999999996)]),
('S', [(0.077836900000000001, 1.0)]),
('P', [(0.077836900000000001, 1.0)]),
('D', [(0.45000000000000001, 1.0)])],
15: [('S',
[(19413.299999999999, 0.0018515999999999999),
(2909.4200000000001, 0.0142062),
(661.36400000000003, 0.069999500000000006),
(185.75899999999999, 0.24007899999999999),
(59.194299999999998, 0.48476200000000003),
(20.030999999999999, 0.3352)]),
('S',
[(339.47800000000001, -0.00278217),
(81.010099999999994, -0.036049900000000003),
(25.878, -0.116631),
(9.4522099999999991, 0.096832799999999997),
(3.6656599999999999, 0.61441800000000002),
(1.46746, 0.40379799999999999)]),
('P',
[(339.47800000000001, 0.0045646200000000001),
(81.010099999999994, 0.033693599999999997),
(25.878, 0.13975499999999999),
(9.4522099999999991, 0.339362),
(3.6656599999999999, 0.45092100000000002),
(1.46746, 0.23858599999999999)]),
('S',
[(2.1562299999999999, -0.25292300000000001),
(0.74899700000000002, 0.032851699999999998),
(0.28314499999999998, 1.08125)]),
('P',
[(2.1562299999999999, -0.017765300000000001),
(0.74899700000000002, 0.27405800000000002),
(0.28314499999999998, 0.78542100000000004)]),
('S', [(0.099831699999999995, 1.0)]),
('P', [(0.099831699999999995, 1.0)]),
('D', [(0.55000000000000004, 1.0)])],
16: [('S',
[(21917.099999999999, 0.001869),
(3301.4899999999998, 0.01423),
(754.14599999999996, 0.069695999999999994),
(212.71100000000001, 0.238487),
(67.989599999999996, 0.48330699999999999),
(23.051500000000001, 0.33807399999999999)]),
('S',
[(423.73500000000001, -0.0023766999999999998),
(100.70999999999999, -0.031692999999999999),
(32.1599, -0.113317),
(11.8079, 0.056090000000000001),
(4.6311, 0.59225499999999998),
(1.87025, 0.45500600000000002)]),
('P',
[(423.73500000000001, 0.0040610000000000004),
(100.70999999999999, 0.030681),
(32.1599, 0.13045200000000001),
(11.8079, 0.32720500000000002),
(4.6311, 0.452851),
(1.87025, 0.25604199999999999)]),
('S',
[(2.6158399999999999, -0.25037399999999999),
(0.92216699999999996, 0.066957000000000003),
(0.34128700000000001, 1.0545100000000001)]),
('P',
[(2.6158399999999999, -0.014511),
(0.92216699999999996, 0.31026300000000001),
(0.34128700000000001, 0.75448300000000001)]),
('S', [(0.11716699999999999, 1.0)]),
('P', [(0.11716699999999999, 1.0)]),
('D', [(0.65000000000000002, 1.0)])],
17: [('S',
[(25180.099999999999, 0.001833),
(3780.3499999999999, 0.014034),
(860.47400000000005, 0.069097000000000006),
(242.14500000000001, 0.237452),
(77.334900000000005, 0.48303400000000002),
(26.247, 0.33985599999999999)]),
('S',
[(491.76499999999999, -0.0022973999999999998),
(116.98399999999999, -0.030714000000000002),
(37.415300000000002, -0.112528),
(13.7834, 0.045016),
(5.4521499999999996, 0.58935300000000002),
(2.2258800000000001, 0.46520600000000001)]),
('P',
[(491.76499999999999, 0.0039893999999999997),
(116.98399999999999, 0.030318000000000001),
(37.415300000000002, 0.12988),
(13.7834, 0.32795099999999999),
(5.4521499999999996, 0.45352700000000001),
(2.2258800000000001, 0.25215399999999999)]),
('S',
[(3.18649, -0.25183),
(1.1442699999999999, 0.061588999999999998),
(0.420377, 1.0601799999999999)]),
('P',
[(3.18649, -0.014298999999999999),
(1.1442699999999999, 0.32357200000000003),
(0.420377, 0.74350700000000003)]),
('S', [(0.14265700000000001, 1.0)]),
('P', [(0.14265700000000001, 1.0)]),
('D', [(0.75, 1.0)])],
18: [('S',
[(28348.299999999999, 0.00182526),
(4257.6199999999999, 0.013968599999999999),
(969.85699999999997, 0.068707299999999999),
(273.26299999999998, 0.236204),
(87.369500000000002, 0.48221399999999998),
(29.686699999999998, 0.34204299999999999)]),
('S',
[(575.89099999999996, -0.00215972),
(136.816, -0.029077499999999999),
(43.809800000000003, -0.11082699999999999),
(16.209399999999999, 0.0276999),
(6.4608400000000001, 0.57761300000000004),
(2.6511399999999998, 0.48868800000000001)]),
('P',
[(575.89099999999996, 0.00380665),
(136.816, 0.0292305),
(43.809800000000003, 0.126467),
(16.209399999999999, 0.32351000000000002),
(6.4608400000000001, 0.45489600000000002),
(2.6511399999999998, 0.25663000000000002)]),
('S',
[(3.8602799999999999, -0.25559199999999999),
(1.4137299999999999, 0.037806600000000003),
(0.51664600000000005, 1.08056)]),
('P',
[(3.8602799999999999, -0.015919699999999998),
(1.4137299999999999, 0.32464599999999999),
(0.51664600000000005, 0.74399000000000004)]),
('S', [(0.17388799999999999, 1.0)]),
('P', [(0.17388799999999999, 1.0)]),
('D', [(0.84999999999999998, 1.0)])],
19: [('S',
[(31594.419999999998, 0.0018280099999999999),
(4744.3299999999999, 0.013994029999999999),
(1080.4190000000001, 0.068871290000000002),
(304.23379999999997, 0.23697599999999999),
(97.245859999999993, 0.482904),
(33.024949999999997, 0.34047949999999999)]),
('S',
[(622.76250000000005, -0.0025029760000000001),
(147.88390000000001, -0.033155499999999997),
(47.327350000000003, -0.1226387),
(17.514949999999999, 0.053536430000000003),
(6.9227220000000003, 0.61938599999999999),
(2.7682769999999999, 0.43458780000000002)]),
('P',
[(622.76250000000005, 0.0040946369999999999),
(147.88390000000001, 0.031451989999999999),
(47.327350000000003, 0.13515579999999999),
(17.514949999999999, 0.33905000000000002),
(6.9227220000000003, 0.46294550000000001),
(2.7682769999999999, 0.22426380000000001)]),
('S',
[(11.84802, 0.012776890000000001),
(4.0792109999999999, 0.2098767),
(1.7634810000000001, -0.0030952739999999999),
(0.78892700000000004, -0.55938840000000001),
(0.350387, -0.51347600000000004),
(0.146344, -0.065980349999999993)]),
('P',
[(11.84802, -0.012213770000000001),
(4.0792109999999999, -0.006900537),
(1.7634810000000001, 0.2007466),
(0.78892700000000004, 0.42813319999999999),
(0.350387, 0.39701560000000002),
(0.146344, 0.1104718)]),
('S',
[(0.71680100000000002, -0.052377720000000003),
(0.233741, -0.2798503),
(0.038675000000000001, 1.1415470000000001)]),
('P',
[(0.71680100000000002, 0.031642999999999998),
(0.233741, -0.0404616),
(0.038675000000000001, 1.0120290000000001)]),
('S', [(0.016521000000000001, 1.0)]),
('P', [(0.016521000000000001, 1.0)]),
('D', [(0.20000000000000001, 1.0)])],
20: [('S',
[(35264.860000000001, 0.0018135009999999999),
(5295.5029999999997, 0.01388493),
(1206.02, 0.068361619999999998),
(339.68389999999999, 0.23561879999999999),
(108.6264, 0.48206389999999999),
(36.921030000000002, 0.34298190000000001)]),
('S',
[(706.30960000000005, 0.0024482250000000001),
(167.81870000000001, 0.032415039999999999),
(53.825580000000002, 0.12262190000000001),
(20.016380000000002, -0.043169649999999997),
(7.9702789999999997, -0.61269949999999995),
(3.212059, -0.44875399999999999)]),
('P',
[(706.30960000000005, 0.004020371),
(167.81870000000001, 0.031006010000000001),
(53.825580000000002, 0.13372790000000001),
(20.016380000000002, 0.33679829999999999),
(7.9702789999999997, 0.46312809999999999),
(3.212059, 0.22575319999999999)]),
('S',
[(14.195180000000001, 0.010845),
(4.8808280000000002, 0.2088333),
(2.16039, 0.031503379999999997),
(0.98789899999999997, -0.55265180000000003),
(0.449517, -0.5437997),
(0.187387, -0.066693420000000003)]),
('P',
[(14.195180000000001, -0.01289621),
(4.8808280000000002, -0.010251980000000001),
(2.16039, 0.19597809999999999),
(0.98789899999999997, 0.43579329999999999),
(0.449517, 0.39964519999999998),
(0.187387, 0.097136360000000005)]),
('S',
[(1.0322709999999999, -0.044397199999999998),
(0.38117099999999998, -0.32845629999999998),
(0.065130999999999994, 1.1630100000000001)]),
('P',
[(1.0322709999999999, -0.42986210000000002),
(0.38117099999999998, 0.0069358290000000001),
(0.065130999999999994, 0.97059329999999999)]),
('S', [(0.026009999999999998, 1.0)]),
('P', [(0.026009999999999998, 1.0)]),
('D', [(0.20000000000000001, 1.0)])],
21: [('S',
[(39088.980000000003, 0.001803263),
(5869.7920000000004, 0.013807689999999999),
(1336.9100000000001, 0.068003960000000002),
(376.60309999999998, 0.2347099),
| |
<gh_stars>0
#-*- encoding: utf-8 -*-
#
# Copyright (c) 2013, 2014, 2017 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author : <NAME> <<EMAIL>>
#
"""
the VCCS authentication client package
Copyright (c) 2013, 2014, 2017 NORDUnet A/S
See the source file for complete license statement.
Short usage, see the README for details :
Add credential, and authenticate with correct password :
>>> import vccs_client
>>> f = vccs_client.VCCSPasswordFactor('password', credential_id=4712)
>>> client = vccs_client.VCCSClient(base_url='http://localhost:8550/')
>>> client.add_credentials('<EMAIL>', [f])
True
>>> f.salt
'$2a$12$F0TIdfp4quhVJYIOO1ojU.'
>>>
The salt and the credential_id needs to be remembered in the client
application for use when validating the password later on.
Authenticate with incorrect password :
>>> client.authenticate('<EMAIL>', [f])
True
>>> incorrect_f = vccs_client.VCCSPasswordFactor('foobar', credential_id=4712,
... salt='$2a$12$F0TIdfp4quhVJYIOO1ojU.')
>>> client.authenticate('<EMAIL>', [incorrect_f])
False
>>>
Revoke a credential (irreversible!) :
>>> r = vccs_client.VCCSRevokeFactor(4712, 'testing revoke', reference='foobar')
>>> client.revoke_credentials('<EMAIL>', [r])
True
>>>
"""
__version__ = '0.5.0b0'
__copyright__ = 'NORDUnet A/S'
__organization__ = 'NORDUnet'
__license__ = 'BSD'
__authors__ = ['<NAME>']
__all__ = [
]
import os
import six
import bson
import bcrypt
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.error import HTTPError, URLError
from six import string_types
import simplejson as json
class VCCSClientException(Exception):
"""
Base exception class for VCCS client.
"""
def __init__(self, reason):
Exception.__init__(self)
self.reason = reason
class VCCSClientHTTPError(VCCSClientException):
"""
Class to convey HTTP errors to VCCS client users in a
way that does not make them have to know what HTTP
library is used by the VCCS client.
"""
def __init__(self, reason, http_code):
VCCSClientException.__init__(self, reason)
self.http_code = http_code
def __str__(self):
return '<{cl} instance at {addr}: {code!r} {reason!r}>'.format(
cl = self.__class__.__name__,
addr = hex(id(self)),
code = self.http_code,
reason = self.reason,
)
class VCCSFactor(object):
"""
Base class for authentication factors. Do not use directly.
"""
def __init__(self):
pass
def to_dict(self, _action):
"""
Return factor as a dict that can be serialized for sending to the
authentication backend.
:param _action: 'auth', 'add_creds' or 'revoke_creds'
:returns: dict
"""
raise NotImplementedError('Sub-class must implement to_dict')
class VCCSPasswordFactor(VCCSFactor):
"""
Object representing an ordinary password authentication factor.
"""
def __init__(self, password, credential_id, salt=None, strip_whitespace=True):
"""
:param password: string, password as <PASSWORD>
:param credential_id: unique id of credential in the authentication backend databas
:param salt: string or None, NDNv1H1 salt to be used for pre-hashing
(if None, one will be generated. If non-default salt
parameters are requested, use generate_salt() directly)
:param strip_whitespace: boolean, Remove all whitespace from input
:type password: string_types
:type credential_id: string_types | bson.ObjectId
:type salt: None | string_types
:type strip_whitespace: bool
"""
if salt is None:
salt = self.generate_salt()
if not salt.startswith('$NDNv1H1$'):
raise ValueError('Invalid salt (not NDNv1H1)')
self.salt = salt
if isinstance(credential_id, bson.ObjectId):
# backwards compatibility
credential_id = str(credential_id)
if not isinstance(credential_id, string_types):
raise ValueError('Non-string credential id: {!r}'.format(credential_id))
self.credential_id = credential_id
salt, key_length, rounds, = self._decode_parameters(salt)
cid_str = str(self.credential_id)
if strip_whitespace:
password = ''.join(password.split())
if six.PY2:
# Allow passwords containing non-ascii characters, while
# keeping backward-capability by converting to byte string.
# UTF-8 is the encoding used for POST-requests, for more info see the
# section handling-form-submissions-in-view-callables-unicode-and-character-set-issues
# at http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/views.html
if isinstance(password, unicode):
password = password.encode('UTF-8')
T1 = "{!s}{!s}{!s}{!s}".format(len(cid_str), cid_str,
len(password), password)
else:
password = bytes(password, '<PASSWORD>')
T1 = "{!s}{!s}{!s}".format(len(cid_str), cid_str, len(password))
T1 = bytes(T1, 'utf-8') + password
# password = '<PASSWORD>' (T1 as hex:'<PASSWORD>')
# should give res == '<PASSWORD>'
res = bcrypt.kdf(T1, salt, key_length, rounds)
if six.PY2:
res = res.encode('hex')
else:
res = res.hex()
self.hash = res
VCCSFactor.__init__(self)
def generate_salt(self, salt_length=32, desired_key_length=32, rounds=2 ** 5):
"""
Generate a NDNv1H1 salt.
Encoded into the salt will be the KDF parameter values desired_key_length
and rounds.
For number of rounds, it is recommended that a measurement is made to achieve
a cost of at least 100 ms on current hardware.
:param salt_length: Number of bytes of salt to generate (recommended min 16).
:param desired_key_length: Length of H1 hash to produce (recommended min 32).
:param rounds: bcrypt pbkdf number of rounds.
:returns: string with salt and parameters
"""
random = self._get_random_bytes(salt_length)
if six.PY2:
random_str = random.encode('hex')
else:
random_str = random.hex()
return "$NDNv1H1${!s}${!r}${!r}$".format(random_str, desired_key_length, rounds)
def _decode_parameters(self, salt):
"""
Internal function to decode a NDNv1H1 salt.
"""
_, version, salt, desired_key_length, rounds, _ = salt.split('$')
if version == 'NDNv1H1':
if six.PY2:
salt = salt.decode('hex')
else:
salt = bytes().fromhex(salt)
return salt, int(desired_key_length), int(rounds)
raise NotImplementedError('Unknown hashing scheme')
def _get_random_bytes(self, bytes):
"""
Internal function to make salt generation testable.
"""
return os.urandom(bytes)
def to_dict(self, _action):
"""
Return factor as dictionary, transmittable to authentiation backends.
:param _action: 'auth', 'add_creds' or 'revoke_creds'
"""
res = {'type': 'password',
'H1': self.hash,
'credential_id': self.credential_id,
}
return res
class VCCSOathFactor(VCCSFactor):
"""
Object representing an OATH token authentication factor.
"""
def __init__(self, oath_type, credential_id, user_code=None, nonce=None,
aead=None, key_handle=None, digits=6, oath_counter=0):
"""
:param oath_type: 'oath-totp' or 'oath-hotp' (time based or event based OATH)
:param credential_id: integer, unique index of credential
for authentication :
:param user_code: integer, the user supplied token code
for initialization (add_creds) :
:param nonce: string, AEAD nonce
:param aead: string, encrypted OATH secret
:param key_handle: integer(), YubiHSM key handle used to create AEAD
:param digits: integer, OATH token number of digits per code (6/8)
:param oath_counter: initial OATH counter value of token
"""
if oath_type not in ['oath-totp', 'oath-hotp']:
raise ValueError('Invalid OATH type (not oath-totp or oath-hotp)')
self.oath_type = oath_type
self.credential_id = credential_id
self.user_code = user_code
self.nonce = nonce
self.aead = aead
self.key_handle = key_handle
self.digits = digits
self.oath_counter = oath_counter
VCCSFactor.__init__(self)
def to_dict(self, action):
"""
Return factor as dictionary, transmittable to authentiation backends.
:param action: 'auth', 'add_creds' or 'revoke_creds'
:returns: Factor in dict format
:rtype: dict
"""
if action == 'auth':
if self.user_code is None:
raise ValueError('User code not provided')
res = {'type': self.oath_type,
'user_code': self.user_code,
'credential_id': self.credential_id,
}
elif action == 'add_creds':
res = {'type': self.oath_type,
'credential_id': self.credential_id,
'nonce': self.nonce,
'aead': self.aead,
'key_handle': self.key_handle,
'digits': self.digits,
'oath_counter': self.oath_counter,
}
elif action == 'revoke_creds':
# XXX implement this
raise NotImplementedError()
else:
raise ValueError('Unknown \'action\' value (not auth or add_creds)')
for (k, v) in res.items():
if v is None:
raise ValueError('{!r} property {!r} not provided'.format(action, k))
return res
class VCCSRevokeFactor(VCCSFactor):
"""
Object representing a factor to be revoked.
"""
def __init__(self, credential_id, reason, reference=''):
"""
:param credential_id: unique index of credential
:param reason: reason for revocation
:param reference: optional data to identify this event in logs on frontend
:type credential_id: string_types | bson.ObjectId
:type reason: string_types
:type reference: string_types
"""
if isinstance(credential_id, bson.ObjectId):
# backwards compatibility
credential_id = str(credential_id)
if not isinstance(credential_id, string_types):
raise TypeError('Non-string credential id: {!r}'.format(credential_id))
if not isinstance(reason, string_types):
raise TypeError('Revocation reason value type error : {!r}'.format(reason))
if not isinstance(reference, string_types):
raise TypeError('Revocation reference value type error : {!r}'.format(reference))
self.credential_id = credential_id
self.reason = reason
self.reference = reference
VCCSFactor.__init__(self)
def to_dict(self, _action):
"""
Return factor as dictionary, transmittable to authentiation backends.
:param _action: string, | |
: { stpair : { path1 : %x, path2: %y, ...}}}
#or { alg : { stpair : { path1 : [%x, di,...] , path2 : [%y, dj,...], ...} ... } ... }
algstpathlen={}
for d in Zless1:#demands:
#TODO analysis path utilization and when Z<1 we can some how find the relationship
#between optimal link using for each s-t pair compared with other algs
for alg in algdic:
if alg not in stpUtilize:
algPathSet[alg]={}
stpUtilize[alg]={}
algstpathlen[alg]={}
for comp in compare:#here actually includes only one method like ["hardnop"]
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
Zless1[d][alg]=fileJSON['Z']
key = "<PASSWORD> (maximize total throuput)"
stpband={}
pathsband=fileJSON[key]
leband={}
for st in pathsband:
sttemp=str(st).split()
s=sttemp[0]
t=sttemp[1]
stpband[(s,t)]={}
sv=int(s[1:])#TODO for gscale we can do like this but for Cernet we need to read hosts
tv=int(t[1:])
plentemp=0
pntemp=0
if (s,t) not in stpUtilize[alg]:
stpUtilize[alg][(s,t)]={}
algstpathlen[alg][(s,t)]=[0,0,100,0]
#first average path length, second average path number?, min len, max len?
for path in pathsband[st]:
#TODO at least for ksp each time the path set is the same
if str(path) not in stpUtilize[alg][(s,t)]:
algPathSet[alg][str(path)]=0
stpUtilize[alg][(s,t)][str(path)]=[0]
if pathsband[st][path]>0:
pntemp+=1
stpUtilize[alg][(s,t)][str(path)][0]+=1*1.0/ndzles
stpUtilize[alg][(s,t)][str(path)].append(d)
algPathSet[alg][str(path)]+=1*1.0/ndzles
stpband[(s,t)][str(path)]=pathsband[st][path]
ptemp = str(path).split("->")
plentemp+=len(ptemp)-1
if len(ptemp)-1<algstpathlen[alg][(s,t)][2]:
algstpathlen[alg][(s,t)][2]=len(ptemp)-1
if len(ptemp)-1>algstpathlen[alg][(s,t)][3]:
algstpathlen[alg][(s,t)][3]=len(ptemp)-1
del ptemp[0]
del ptemp[-1]
ilen=len(ptemp)-1
for i in range(ilen):
if (ptemp[i],ptemp[i+1]) not in leband:
leband[(ptemp[i],ptemp[i+1])]=0
#print plentemp,pntemp,algstpathlen[alg][(s,t)][0]
algstpathlen[alg][(s,t)][0]=algstpathlen[alg][(s,t)][0]+plentemp*1.0/pntemp/1.0/ndzles
algstpathlen[alg][(s,t)][1]=algstpathlen[alg][(s,t)][1]+pntemp/1.0/ndzles
#algstpathlen[alg][(s,t)][0]=algstpathlen[alg][(s,t)][0]/1.0/ndzles
#algstpathlen[alg][(s,t)][1]=algstpathlen[alg][(s,t)][1]/1.0/ndzles
#TODO cal or get demand for corresponding topo and d (JSON file)
#leband[(ptemp[i],ptemp[i+1])]+=pathsband[st][path]*demand49gscale[(sv-1)*12+tv-1]*1.0/pow(2,20)/1000#here need to *demand(s,t)
#print 'Step 2',stpband,leband#TODO not right
#first get the result for hardnop
#print "============"
print 'Zless1',Zless1,len(Zless1)
#print 'stpUtilize',stpUtilize
print 'algstpathlen ',algstpathlen
dictx={}
dicty={}
for alg in algdic:
data=[algstpathlen[alg][key][0] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Average path length of each s-t pair','Pathlength-'+topo+'-1-Zless1-'+comp+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][1] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'# used path of each s-t pair','Pathnum-'+topo+'-1-Zless1-'+comp+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][3] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Length of the longest used s-t path','Pathmaxlen-'+topo+'-1-Zless1-'+comp+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][3] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zless1-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Length of the shortest used s-t path','Pathmaxlen-'+topo+'-1-Zless1-'+comp+'-ccdf.pdf',figurepath)
def analysisPathlength1(Zrelated, lessorlargeorall, demands, pathpre, topo, algdic, compare, figurepath):
#TODO here Zrelated actually can stands for a lot of things?
#TODO TODO consider for Z<=1 only or any Z?,
#the difference is that for Z>1 the total throughput of each alg after step 2 may not be the same
#TODO each s-t first get the average path length?, then draw CCDF for each alg (for all the s-t pairs)
Zinname=''
if len(Zrelated)==0:
Zless1={}
Zlarge1={}
for d in demands:
filename=pathpre+topo+"-"+str(d)+"-optimal-mcf.json"
#Z,leband,lest,steband=readOptimal(filename)
fileOptimal=readOptimal(filename)
Z=fileOptimal['Z']
if Z<=1:#fileOptimal['Z']<=1:
Zless1[d]={}
Zless1[d]['Optimal']=Z#fileOptimal['Z']
elif Z<2.5:
Zlarge1[d]={}
Zlarge1[d]['Optimal']=Z#fileOptimal['Z']
print "============"
if lessorlargeorall==3:
Zrelated=demands
Zinname='all'
elif lessorlargeorall==2:
Zrelated=Zless1
Zinname='Zlarge1'
elif lessorlargeorall==1:
Zrelated=Zlarge1
Zinname='Zless1new'
else:
if lessorlargeorall==3:
Zinname='all'
elif lessorlargeorall==2:
Zinname='Zlarge1'
elif lessorlargeorall==1:
Zinname='Zless1new'
ndzles = len(Zrelated)
algPathSet={}
stpUtilize={}#This can be initialized only once then record the path utilization for each TM
# { alg : { stpair : { path1 : %x, path2: %y, ...}}}
#or { alg : { stpair : { path1 : [%x, di,...] , path2 : [%y, dj,...], ...} ... } ... }
algstpathlen={}
for d in Zrelated:#demands:
#TODO analysis path utilization and when Z<1 we can some how find the relationship
#between optimal link using for each s-t pair compared with other algs
for alg in algdic:
if alg not in stpUtilize:
algPathSet[alg]={}
stpUtilize[alg]={}
algstpathlen[alg]={}
for comp in compare:#here actually includes only one method like ["hardnop"]
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
#Zrelated[d][alg]=fileJSON['Z']
key = "paths after the second step (maximize total throuput)"
stpband={}
pathsband=fileJSON[key]
leband={}
for st in pathsband:
sttemp=str(st).split()
s=sttemp[0]
t=sttemp[1]
stpband[(s,t)]={}
sv=int(s[1:])#TODO for gscale we can do like this but for Cernet we need to read hosts
tv=int(t[1:])
plentemp=0
pntemp=0
if (s,t) not in stpUtilize[alg]:
stpUtilize[alg][(s,t)]={}
algstpathlen[alg][(s,t)]=[0,0,100,0]
#first average path length, second average path number?, min len, max len?
for path in pathsband[st]:
#TODO at least for ksp each time the path set is the same
if str(path) not in stpUtilize[alg][(s,t)]:
algPathSet[alg][str(path)]=0
stpUtilize[alg][(s,t)][str(path)]=[0]
if pathsband[st][path]>0:
pntemp+=1
stpUtilize[alg][(s,t)][str(path)][0]+=1*1.0/ndzles
stpUtilize[alg][(s,t)][str(path)].append(d)
algPathSet[alg][str(path)]+=1*1.0/ndzles
stpband[(s,t)][str(path)]=pathsband[st][path]
ptemp = str(path).split("->")
plentemp+=len(ptemp)-1
if len(ptemp)-1<algstpathlen[alg][(s,t)][2]:
algstpathlen[alg][(s,t)][2]=len(ptemp)-1
if len(ptemp)-1>algstpathlen[alg][(s,t)][3]:
algstpathlen[alg][(s,t)][3]=len(ptemp)-1
del ptemp[0]
del ptemp[-1]
ilen=len(ptemp)-1
for i in range(ilen):
if (ptemp[i],ptemp[i+1]) not in leband:
leband[(ptemp[i],ptemp[i+1])]=0
#print plentemp,pntemp,algstpathlen[alg][(s,t)][0]
algstpathlen[alg][(s,t)][0]=algstpathlen[alg][(s,t)][0]+plentemp*1.0/pntemp/1.0/ndzles
algstpathlen[alg][(s,t)][1]=algstpathlen[alg][(s,t)][1]+pntemp/1.0/ndzles
#algstpathlen[alg][(s,t)][0]=algstpathlen[alg][(s,t)][0]/1.0/ndzles
#algstpathlen[alg][(s,t)][1]=algstpathlen[alg][(s,t)][1]/1.0/ndzles
#TODO cal or get demand for corresponding topo and d (JSON file)
#leband[(ptemp[i],ptemp[i+1])]+=pathsband[st][path]*demand49gscale[(sv-1)*12+tv-1]*1.0/pow(2,20)/1000#here need to *demand(s,t)
#print 'Step 2',stpband,leband#TODO not right
#first get the result for hardnop
#print "============"
#print 'Zrelated',Zrelated,len(Zrelated)
#print 'stpUtilize',stpUtilize
#print 'algstpathlen ',algstpathlen
dictx={}
dicty={}
for alg in algdic:
data=[algstpathlen[alg][key][0] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zrelated-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Average path length of each s-t pair','Pathlength-'+topo+'-1-'+Zinname+'-'+comp+str(len(compare))+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][1] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zrelated-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'# used path of each s-t pair','Pathnum-'+topo+'-1-'+Zinname+'-'+comp+str(len(compare))+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][3] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zrelated-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Length of the longest used s-t path','Pathmaxlen-'+topo+'-1-'+Zinname+'-'+comp+str(len(compare))+'-ccdf.pdf',figurepath)
for alg in algdic:
data=[algstpathlen[alg][key][2] for key in algstpathlen[alg]]
x,y=ccdf(data)#y=P(X>x)
#x,y=cdf(data)
dictx[alg]=x
dicty[alg]=y
#drawCDFandCCDF(dictx,dicty,2,0,'Path utilization','Path-gscale-1-Zrelated-ccdf.pdf')
drawCDFandCCDF(dictx,dicty,6,0,'Length of the shortest used s-t path','Pathminlen-'+topo+'-1-'+Zinname+'-'+comp+str(len(compare))+'-ccdf.pdf',figurepath)
#1. fairness: get the average? satisfied demand ratio (%) for each s-t pair, consider whether include Zopt<=1(will mostly be 1) , Z>1
#2. robustness: as we have shown that each s-t almost all use 1 path only (only one path has weight)
#therefore, we are hard to reroute in data plane use the normalized weight (TODO or just send packet out using equal weight to each healthy tunnel), ? TODO,
#2.1 get the percent that how many single link failure lead to some s-t pair unreachable
#2.2 get the Histogram of P(T>95%), P(T>95%), P(T>95%), P(T>95%), P(T>95%)
#2.3 get the throughput ratio CDF
def fairness(Zrelated, lessorlargeorall, pathpre, topo, algdic, comp, figurepath):
if lessorlargeorall==3:
Zinname='all'
elif lessorlargeorall==2:
Zinname='Zlarge1'
elif lessorlargeorall==1:
Zinname='Zless1'
dictx={}
dicty={}
algstdratio={}
algstdratioH={}
for alg in algdic:
#if alg not in algstdratio:
# algstdratio[alg]=[]
alldsumtemp=[]#mat([])
for d in Zrelated:
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
key="number of path and demand ratio of each st pair"
# "h22 h17": [3.0,46.54156150185264],
#compute the y=p(X>x)
data=[]
temp=fileJSON[key]
if 'average' in temp:
del temp['average']
if len(alldsumtemp)==0:
alldsumtemp=mat([0]*len(temp))
data=[float(temp[st][1]) for st in sorted(temp)]
data=mat(data)
alldsumtemp=alldsumtemp+data
#print alldsumtemp
#print alldsumtemp
alldsumtemp=alldsumtemp*1.0/100/len(Zrelated)
algstdratio[alg]=alldsumtemp.tolist()[0]
print algstdratio[alg]
x,y=ccdf(algstdratio[alg])
dictx[alg]=x
dicty[alg]=y
#keylist=["P(T > 90%)","P(T > 95%)","P(T > 99%)","P(T > 99.9%)","P(T > 99.99%)"]
percy=[0,0,0,0,0]
for i in range(len(x)):
percx=x[i]
if percy[0]==0 and percx>=0.9:
if percx==0.9 or i==0:
percy[0]=y[i]
else:
percy[0]=y[i-1]
elif percy[1]==0 and percx>=0.95:
if percx==0.95 or i==0:
percy[1]=y[i]
else:
percy[1]=y[i-1]
elif percy[2]==0 and percx>=0.99:
if percx==0.99 or i==0:
percy[2]=y[i]
else:
percy[2]=y[i-1]
elif percy[3]==0 and percx>=0.999:
if percx==0.999 or i==0:
percy[3]=y[i]
else:
percy[3]=y[i-1]
elif percy[4]==0 and percx>=0.9999:
if percx==0.9999 or i==0:
percy[4]=y[i]
else:
percy[4]=y[i-1]
#print percy
#print "============"
algstdratioH[alg]=percy
#print algstdratio
print algstdratioH
#drawCDFandCCDF(dictx,dicty,2,0,'# flow entries','flow-gscale-1-'+comp+'-ccdf.pdf',figurepath)
drawCDFandCCDF(dictx,dicty,6,0,'Satisfied demand ratio of each s-t pair','dratio-'+topo+'-1-'+Zinname+'-'+comp+'-ccdf.pdf',figurepath)
#TODO draw Histogram for "P(T > 95%)","P(T > 90%)","P(T > 99%)","P(T > 99.9%)","P(T > 99.99%)"
def robustness(Zrelated, lessorlargeorall, pathpre, topo, algdic, comp, figurepath):
#key="Z, total throuput, and throuput ratio at single edge failure"
if lessorlargeorall==3:
Zinname='all'
elif lessorlargeorall==2:
Zinname='Zlarge1'
elif lessorlargeorall==1:
Zinname='Zless1'
keylist=["P(T > 90%)","P(T > 95%)","P(T > 99%)","P(T > 99.9%)","P(T > 99.99%)"]
dictx={}
dicty={}
#algetratio={}
algetratioH={}
algefailunreach={}
for alg in algdic:
#if alg not in algstdratio:
# algstdratio[alg]=[]
alldsumtemp=[]#mat([])
alldsumH=[]
dunreachtemp=[]
for d in Zrelated:
#for comp in compare:
filename=pathpre+topo+"-"+str(d)+"-"+alg+"-"+comp+".json"
fileJSON=readJSON(filename)
key="Z, total throuput, and throuput ratio at single edge failure"
#"s6 s7": [3.1532651852755422, 14672.0837296913, 91.61506879097249],
#compute the y=p(X>x)
data=[]
dataH=[]
unreachN=0
temp=fileJSON[key]
if 'average' in temp:
del temp['average']
if len(alldsumH)==0:
alldsumH=mat([0]*len(keylist))
for prob in keylist:
dataH.append(temp[prob])
del temp[prob]
dataH=mat(dataH)
alldsumH=alldsumH+dataH
# TODO this can not use mat to add all , as some link down may lead to "some pairs have no path"
for k in temp:
if temp[k]=="some pairs have no path":
unreachN=unreachN+1
dunreachtemp.append(unreachN)
if len(alldsumtemp)==0:
alldsumtemp=mat([0]*len(temp))
#data=[float(temp[e][2]) for e in sorted(temp)]
#data=mat(data)
#alldsumtemp=alldsumtemp+data
#print alldsumtemp
#print alldsumtemp
alldsumH=alldsumH*1.0/len(Zrelated)#remember it is % is OK
algetratioH[alg]=alldsumH.tolist()[0]
#alldsumtemp=alldsumtemp*1.0/100/len(Zrelated)
#algetratio[alg]=alldsumtemp.tolist()[0]
#print algetratio[alg]
algefailunreach[alg]=dunreachtemp
#x,y=ccdf(algetratio[alg])
x,y=ccdf(algefailunreach[alg])
dictx[alg]=x
dicty[alg]=y
print "============"
print algetratioH
print algefailunreach
#print algstdratio
#drawCDFandCCDF(dictx,dicty,2,0,'# flow entries','flow-gscale-1-'+comp+'-ccdf.pdf',figurepath)
#drawCDFandCCDF(dictx,dicty,6,0,'Satisfied whole throughput ratio','tratio-'+topo+'-1-'+Zinname+'-'+comp+'-ccdf.pdf',figurepath)
#drawCDFandCCDF(dictx,dicty,6,0,'Percent of unreachable s-t | |
<filename>coherence/backends/gstreamer_renderer.py
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006,2007,2008,2009 <NAME> <<EMAIL>>
from sets import Set
from twisted.internet import reactor, defer
from twisted.internet.task import LoopingCall
from twisted.python import failure
from coherence.upnp.core.soap_service import errorCode
from coherence.upnp.core import DIDLLite
import string
import os, platform
from StringIO import StringIO
import tokenize
import pygst
pygst.require('0.10')
import gst
import coherence.extern.louie as louie
from coherence.extern.simple_plugin import Plugin
from coherence import log
class Player(log.Loggable):
logCategory = 'gstreamer_player'
max_playbin_volume = 1.
def __init__(self, default_mimetype='audio/mpeg', audio_sink_name=None,
video_sink_name=None, audio_sink_options=None,
video_sink_options=None):
self.audio_sink_name = audio_sink_name or "autoaudiosink"
self.video_sink_name = video_sink_name or "autovideosink"
self.audio_sink_options = audio_sink_options or {}
self.video_sink_options = video_sink_options or {}
self.player = None
self.source = None
self.sink = None
self.bus = None
self.views = []
self.playing = False
self.duration = None
self.mimetype = default_mimetype
self.create_pipeline(self.mimetype)
def add_view(self,view):
self.views.append(view)
def remove_view(self,view):
self.views.remove(view)
def update(self,message=None):
for v in self.views:
v(message=message)
def _is_not_playbin2_friendly(self):
uname = platform.uname()[1]
result = False
if uname.startswith('Nokia'):
try:
device = uname.split("-")[1]
except:
device = "unknown"
result = device != "N900"
return result
def create_pipeline(self, mimetype):
self.debug("creating pipeline")
if self._is_not_playbin2_friendly():
self.bus = None
self.player = None
self.source = None
self.sink = None
if mimetype == 'application/ogg':
self.player = gst.parse_launch('gnomevfssrc name=source ! oggdemux ! ivorbisdec ! audioconvert ! dsppcmsink name=sink')
self.player.set_name('oggplayer')
self.set_volume = self.set_volume_dsp_pcm_sink
self.get_volume = self.get_volume_dsp_pcm_sink
else:
self.player = gst.parse_launch('gnomevfssrc name=source ! id3lib ! dspmp3sink name=sink')
self.player.set_name('mp3player')
self.set_volume = self.set_volume_dsp_mp3_sink
self.get_volume = self.get_volume_dsp_mp3_sink
self.source = self.player.get_by_name('source')
self.sink = self.player.get_by_name('sink')
self.player_uri = 'location'
self.mute = self.mute_hack
self.unmute = self.unmute_hack
self.get_mute = self.get_mute_hack
else:
self.player = gst.element_factory_make('playbin2', 'player')
self.player_uri = 'uri'
self.source = self.sink = self.player
self.set_volume = self.set_volume_playbin
self.get_volume = self.get_volume_playbin
self.mute = self.mute_playbin
self.unmute = self.unmute_playbin
self.get_mute = self.get_mute_playbin
audio_sink = gst.element_factory_make(self.audio_sink_name)
self._set_props(audio_sink, self.audio_sink_options)
self.player.set_property("audio-sink", audio_sink)
video_sink = gst.element_factory_make(self.video_sink_name)
self._set_props(video_sink, self.video_sink_options)
self.player.set_property("video-sink", video_sink)
self.bus = self.player.get_bus()
self.player_clean = True
self.bus.connect('message', self.on_message)
self.bus.add_signal_watch()
self.update_LC = LoopingCall(self.update)
def _set_props(self, element, props):
for option, value in props.iteritems():
value = self._py_value(value)
element.set_property(option, value)
def _py_value(self, s):
value = None
g = tokenize.generate_tokens(StringIO(s).readline)
for toknum, tokval, _, _, _ in g:
if toknum == tokenize.NUMBER:
if '.' in tokval:
value = float(tokval)
else:
value = int(tokval)
elif toknum == tokenize.NAME:
value = tokval
if value is not None:
break
return value
def get_volume_playbin(self):
""" playbin volume is a double from 0.0 - 10.0
"""
volume = self.sink.get_property('volume')
return int((volume*100) / self.max_playbin_volume)
def set_volume_playbin(self, volume):
volume = int(volume)
if volume < 0:
volume=0
if volume > 100:
volume=100
volume = (volume * self.max_playbin_volume) / 100.
self.sink.set_property('volume', volume)
def get_volume_dsp_mp3_sink(self):
""" dspmp3sink volume is a n in from 0 to 65535
"""
volume = self.sink.get_property('volume')
return int(volume*100/65535)
def set_volume_dsp_mp3_sink(self, volume):
volume = int(volume)
if volume < 0:
volume=0
if volume > 100:
volume=100
self.sink.set_property('volume', volume*65535/100)
def get_volume_dsp_pcm_sink(self):
""" dspmp3sink volume is a n in from 0 to 65535
"""
volume = self.sink.get_property('volume')
return int(volume*100/65535)
def set_volume_dsp_pcm_sink(self, volume):
volume = int(volume)
if volume < 0:
volume=0
if volume > 100:
volume=100
self.sink.set_property('volume', volume*65535/100)
def mute_playbin(self):
self.player.set_property('mute', True)
def unmute_playbin(self):
self.player.set_property('mute', False)
def get_mute_playbin(self):
return self.player.get_property('mute')
def mute_hack(self):
if hasattr(self,'stored_volume'):
self.stored_volume = self.sink.get_property('volume')
self.sink.set_property('volume', 0)
else:
self.sink.set_property('mute', True)
def unmute_hack(self):
if hasattr(self,'stored_volume'):
self.sink.set_property('volume', self.stored_volume)
else:
self.sink.set_property('mute', False)
def get_mute_hack(self):
if hasattr(self,'stored_volume'):
muted = self.sink.get_property('volume') == 0
else:
try:
muted = self.sink.get_property('mute')
except TypeError:
if not hasattr(self,'stored_volume'):
self.stored_volume = self.sink.get_property('volume')
muted = self.stored_volume == 0
except:
muted = False
self.warning("can't get mute state")
return muted
def get_state(self):
return self.player.get_state()
def get_uri(self):
""" playbin2 has an empty uri property after a
pipeline stops, as the uri is nowdays the next
track to play, not the current one
"""
if self.player.get_name() != 'player':
return self.source.get_property(self.player_uri)
else:
try:
return self.current_uri
except:
return None
def set_uri(self,uri):
self.source.set_property(self.player_uri, uri.encode('utf-8'))
if self.player.get_name() == 'player':
self.current_uri = uri.encode('utf-8')
def on_message(self, bus, message):
#print "on_message", message
#print "from", message.src.get_name()
t = message.type
#print t
if t == gst.MESSAGE_ERROR:
err, debug = message.parse_error()
self.warning("Gstreamer error: %s,%r" % (err.message, debug))
if self.playing == True:
self.seek('-0')
#self.player.set_state(gst.STATE_READY)
elif t == gst.MESSAGE_TAG:
for key in message.parse_tag().keys():
self.tags[key] = message.structure[key]
#print self.tags
elif t == gst.MESSAGE_STATE_CHANGED:
if message.src == self.player:
old, new, pending = message.parse_state_changed()
#print "player (%s) state_change:" %(message.src.get_path_string()), old, new, pending
if new == gst.STATE_PLAYING:
self.playing = True
self.update_LC.start( 1, False)
self.update()
elif old == gst.STATE_PLAYING:
self.playing = False
try:
self.update_LC.stop()
except:
pass
self.update()
#elif new == gst.STATE_READY:
# self.update()
elif t == gst.MESSAGE_EOS:
self.debug("reached file end")
self.seek('-0')
self.update(message=gst.MESSAGE_EOS)
def query_position( self):
#print "query_position"
try:
position, format = self.player.query_position(gst.FORMAT_TIME)
except:
#print "CLOCK_TIME_NONE", gst.CLOCK_TIME_NONE
position = gst.CLOCK_TIME_NONE
position = 0
#print position
if self.duration == None:
try:
self.duration, format = self.player.query_duration(gst.FORMAT_TIME)
except:
self.duration = gst.CLOCK_TIME_NONE
self.duration = 0
#import traceback
#print traceback.print_exc()
#print self.duration
r = {}
if self.duration == 0:
self.duration = None
self.debug("duration unknown")
return r
r[u'raw'] = {u'position':unicode(str(position)), u'remaining':unicode(str(self.duration - position)), u'duration':unicode(str(self.duration))}
position_human = u'%d:%02d' % (divmod( position/1000000000, 60))
duration_human = u'%d:%02d' % (divmod( self.duration/1000000000, 60))
remaining_human = u'%d:%02d' % (divmod( (self.duration-position)/1000000000, 60))
r[u'human'] = {u'position':position_human, u'remaining':remaining_human, u'duration':duration_human}
r[u'percent'] = {u'position':position*100/self.duration, u'remaining':100-(position*100/self.duration)}
self.debug(r)
return r
def load( self, uri, mimetype):
self.debug("load --> %r %r" % (uri, mimetype))
_,state,_ = self.player.get_state()
if( state == gst.STATE_PLAYING or state == gst.STATE_PAUSED):
self.stop()
#print "player -->", self.player.get_name()
if self.player.get_name() != 'player':
self.create_pipeline(mimetype)
self.player.set_state(gst.STATE_READY)
self.set_uri(uri)
self.player_clean = True
self.duration = None
self.mimetype = mimetype
self.tags = {}
#self.player.set_state(gst.STATE_PAUSED)
#self.update()
self.debug("load <--")
self.play()
def play( self):
uri = self.get_uri()
mimetype = self.mimetype
self.debug("play --> %r %r" % (uri, mimetype))
if self.player.get_name() != 'player':
if self.player_clean == False:
#print "rebuild pipeline"
self.player.set_state(gst.STATE_NULL)
self.create_pipeline(mimetype)
self.set_uri(uri)
self.player.set_state(gst.STATE_READY)
else:
self.player_clean = True
self.player.set_state(gst.STATE_PLAYING)
self.debug("play <--")
def pause(self):
self.debug("pause --> %r" % self.get_uri())
self.player.set_state(gst.STATE_PAUSED)
self.debug("pause <--")
def stop(self):
self.debug("stop --> %r" % self.get_uri())
self.seek('-0')
self.player.set_state(gst.STATE_READY)
self.update(message=gst.MESSAGE_EOS)
self.debug("stop <-- %r " % self.get_uri())
def seek(self, location):
"""
@param location: simple number = time to seek to, in seconds
+nL = relative seek forward n seconds
-nL = relative seek backwards n seconds
"""
_,state,_ = self.player.get_state()
if state != gst.STATE_PAUSED:
self.player.set_state(gst.STATE_PAUSED)
l = long(location)*1000000000
p = self.query_position()
#print p['raw']['position'], l
if location[0] == '+':
l = long(p[u'raw'][u'position']) + (long(location[1:])*1000000000)
l = min( l, long(p[u'raw'][u'duration']))
elif location[0] == '-':
if location == '-0':
l = 0L
else:
l = long(p[u'raw'][u'position']) - (long(location[1:])*1000000000)
l = max( l, 0L)
self.debug("seeking to %r" % l)
"""
self.player.seek( 1.0, gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
gst.SEEK_TYPE_SET, l,
gst.SEEK_TYPE_NONE, 0)
"""
event = gst.event_new_seek(1.0, gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_KEY_UNIT,
gst.SEEK_TYPE_SET, l,
gst.SEEK_TYPE_NONE, 0)
res = self.player.send_event(event)
if res:
pass
#print "setting new stream time to 0"
#self.player.set_new_stream_time(0L)
elif location != '-0':
print "seek to %r failed" % location
if location == '-0':
content_type, _ = self.mimetype.split("/")
try:
self.update_LC.stop()
except:
pass
if self.player.get_name() != 'player':
self.player.set_state(gst.STATE_NULL)
self.player_clean = False
elif content_type != "image":
self.player.set_state(gst.STATE_READY)
self.update()
else:
self.player.set_state(state)
if state == gst.STATE_PAUSED:
self.update()
class GStreamerPlayer(log.Loggable,Plugin):
""" a backend with a GStreamer based audio player
needs gnomevfssrc from gst-plugins-base
unfortunately gnomevfs has way too much dependencies
# not working -> http://bugzilla.gnome.org/show_bug.cgi?id=384140
# needs the neonhttpsrc plugin from gst-plugins-bad
# tested with CVS version
# and with this patch applied
# --> http://bugzilla.gnome.org/show_bug.cgi?id=375264
# not working
and id3demux from gst-plugins-good CVS too
"""
logCategory = 'gstreamer_player'
implements = ['MediaRenderer']
vendor_value_defaults = {'RenderingControl': {'A_ARG_TYPE_Channel':'Master'},
'AVTransport': {'A_ARG_TYPE_SeekMode':('ABS_TIME','REL_TIME','TRACK_NR')}}
vendor_range_defaults = {'RenderingControl': {'Volume': {'maximum':100}}}
def __init__(self, device, **kwargs):
if(device.coherence.config.get('use_dbus','no') != 'yes' and
device.coherence.config.get('glib','no') != 'yes'):
raise Exception, 'this media renderer needs use_dbus enabled in the configuration'
self.name = kwargs.get('name','GStreamer Audio Player')
audio_sink_name = kwargs.get("audio_sink_name")
audio_sink_options = kwargs.get("audio_sink_options")
video_sink_name = kwargs.get("video_sink_name")
video_sink_options = kwargs.get("video_sink_options")
self.player = Player(audio_sink_name=audio_sink_name,
video_sink_name=video_sink_name,
audio_sink_options=audio_sink_options,
video_sink_options=video_sink_options)
self.player.add_view(self.update)
self.metadata = None
self.duration = None
self.view = []
self.tags = {}
self.server = device
self.playcontainer = None
self.dlna_caps = ['playcontainer-0-1']
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def __repr__(self):
return str(self.__class__).split('.')[-1]
def update(self, message=None):
_, current,_ = self.player.get_state()
self.debug("update current %r", current)
connection_manager = self.server.connection_manager_server
av_transport = self.server.av_transport_server
conn_id = connection_manager.lookup_avt_id(self.current_connection_id)
if current == gst.STATE_PLAYING:
state = 'playing'
av_transport.set_variable(conn_id, 'TransportState', 'PLAYING')
elif current == gst.STATE_PAUSED:
state = 'paused'
av_transport.set_variable(conn_id, 'TransportState',
'PAUSED_PLAYBACK')
elif self.playcontainer != None and message == gst.MESSAGE_EOS and \
self.playcontainer[0]+1 < len(self.playcontainer[2]):
state = 'transitioning'
av_transport.set_variable(conn_id, 'TransportState', 'TRANSITIONING')
| |
0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
plasma = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
| |
# -*- coding: utf-8 -*-
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import sys
import unittest
import mock
class TestGeoPoint(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.firestore_v1._helpers import GeoPoint
return GeoPoint
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def test_constructor(self):
lat = 81.25
lng = 359.984375
geo_pt = self._make_one(lat, lng)
self.assertEqual(geo_pt.latitude, lat)
self.assertEqual(geo_pt.longitude, lng)
def test_to_protobuf(self):
from google.type import latlng_pb2
lat = 0.015625
lng = 20.03125
geo_pt = self._make_one(lat, lng)
result = geo_pt.to_protobuf()
geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
self.assertEqual(result, geo_pt_pb)
def test___eq__(self):
lat = 0.015625
lng = 20.03125
geo_pt1 = self._make_one(lat, lng)
geo_pt2 = self._make_one(lat, lng)
self.assertEqual(geo_pt1, geo_pt2)
def test___eq__type_differ(self):
lat = 0.015625
lng = 20.03125
geo_pt1 = self._make_one(lat, lng)
geo_pt2 = object()
self.assertNotEqual(geo_pt1, geo_pt2)
self.assertIs(geo_pt1.__eq__(geo_pt2), NotImplemented)
def test___ne__same_value(self):
lat = 0.015625
lng = 20.03125
geo_pt1 = self._make_one(lat, lng)
geo_pt2 = self._make_one(lat, lng)
comparison_val = geo_pt1 != geo_pt2
self.assertFalse(comparison_val)
def test___ne__(self):
geo_pt1 = self._make_one(0.0, 1.0)
geo_pt2 = self._make_one(2.0, 3.0)
self.assertNotEqual(geo_pt1, geo_pt2)
def test___ne__type_differ(self):
lat = 0.015625
lng = 20.03125
geo_pt1 = self._make_one(lat, lng)
geo_pt2 = object()
self.assertNotEqual(geo_pt1, geo_pt2)
self.assertIs(geo_pt1.__ne__(geo_pt2), NotImplemented)
class Test_verify_path(unittest.TestCase):
@staticmethod
def _call_fut(path, is_collection):
from google.cloud.firestore_v1._helpers import verify_path
return verify_path(path, is_collection)
def test_empty(self):
path = ()
with self.assertRaises(ValueError):
self._call_fut(path, True)
with self.assertRaises(ValueError):
self._call_fut(path, False)
def test_wrong_length_collection(self):
path = ("foo", "bar")
with self.assertRaises(ValueError):
self._call_fut(path, True)
def test_wrong_length_document(self):
path = ("Kind",)
with self.assertRaises(ValueError):
self._call_fut(path, False)
def test_wrong_type_collection(self):
path = (99, "ninety-nine", "zap")
with self.assertRaises(ValueError):
self._call_fut(path, True)
def test_wrong_type_document(self):
path = ("Users", "Ada", "Candy", {})
with self.assertRaises(ValueError):
self._call_fut(path, False)
def test_success_collection(self):
path = ("Computer", "Magic", "Win")
ret_val = self._call_fut(path, True)
# NOTE: We are just checking that it didn't fail.
self.assertIsNone(ret_val)
def test_success_document(self):
path = ("Tokenizer", "Seventeen", "Cheese", "Burger")
ret_val = self._call_fut(path, False)
# NOTE: We are just checking that it didn't fail.
self.assertIsNone(ret_val)
class Test_encode_value(unittest.TestCase):
@staticmethod
def _call_fut(value):
from google.cloud.firestore_v1._helpers import encode_value
return encode_value(value)
def test_none(self):
from google.protobuf import struct_pb2
result = self._call_fut(None)
expected = _value_pb(null_value=struct_pb2.NULL_VALUE)
self.assertEqual(result, expected)
def test_boolean(self):
result = self._call_fut(True)
expected = _value_pb(boolean_value=True)
self.assertEqual(result, expected)
def test_integer(self):
value = 425178
result = self._call_fut(value)
expected = _value_pb(integer_value=value)
self.assertEqual(result, expected)
def test_float(self):
value = 123.4453125
result = self._call_fut(value)
expected = _value_pb(double_value=value)
self.assertEqual(result, expected)
def test_datetime_with_nanos(self):
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.protobuf import timestamp_pb2
dt_seconds = 1488768504
dt_nanos = 458816991
timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos)
dt_val = DatetimeWithNanoseconds.from_timestamp_pb(timestamp_pb)
result = self._call_fut(dt_val)
expected = _value_pb(timestamp_value=timestamp_pb)
self.assertEqual(result, expected)
def test_datetime_wo_nanos(self):
from google.protobuf import timestamp_pb2
dt_seconds = 1488768504
dt_nanos = 458816000
# Make sure precision is valid in microseconds too.
self.assertEqual(dt_nanos % 1000, 0)
dt_val = datetime.datetime.utcfromtimestamp(dt_seconds + 1e-9 * dt_nanos)
result = self._call_fut(dt_val)
timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos)
expected = _value_pb(timestamp_value=timestamp_pb)
self.assertEqual(result, expected)
def test_string(self):
value = u"\u2018left quote, right quote\u2019"
result = self._call_fut(value)
expected = _value_pb(string_value=value)
self.assertEqual(result, expected)
def test_bytes(self):
value = b"\xe3\xf2\xff\x00"
result = self._call_fut(value)
expected = _value_pb(bytes_value=value)
self.assertEqual(result, expected)
def test_reference_value(self):
client = _make_client()
value = client.document("my", "friend")
result = self._call_fut(value)
expected = _value_pb(reference_value=value._document_path)
self.assertEqual(result, expected)
def test_geo_point(self):
from google.cloud.firestore_v1._helpers import GeoPoint
value = GeoPoint(50.5, 88.75)
result = self._call_fut(value)
expected = _value_pb(geo_point_value=value.to_protobuf())
self.assertEqual(result, expected)
def test_array(self):
from google.cloud.firestore_v1.proto.document_pb2 import ArrayValue
result = self._call_fut([99, True, 118.5])
array_pb = ArrayValue(
values=[
_value_pb(integer_value=99),
_value_pb(boolean_value=True),
_value_pb(double_value=118.5),
]
)
expected = _value_pb(array_value=array_pb)
self.assertEqual(result, expected)
def test_map(self):
from google.cloud.firestore_v1.proto.document_pb2 import MapValue
result = self._call_fut({"abc": 285, "def": b"piglatin"})
map_pb = MapValue(
fields={
"abc": _value_pb(integer_value=285),
"def": _value_pb(bytes_value=b"piglatin"),
}
)
expected = _value_pb(map_value=map_pb)
self.assertEqual(result, expected)
def test_bad_type(self):
value = object()
with self.assertRaises(TypeError):
self._call_fut(value)
class Test_encode_dict(unittest.TestCase):
@staticmethod
def _call_fut(values_dict):
from google.cloud.firestore_v1._helpers import encode_dict
return encode_dict(values_dict)
def test_many_types(self):
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from google.cloud.firestore_v1.proto.document_pb2 import ArrayValue
from google.cloud.firestore_v1.proto.document_pb2 import MapValue
dt_seconds = 1497397225
dt_nanos = 465964000
# Make sure precision is valid in microseconds too.
self.assertEqual(dt_nanos % 1000, 0)
dt_val = datetime.datetime.utcfromtimestamp(dt_seconds + 1e-9 * dt_nanos)
client = _make_client()
document = client.document("most", "adjective", "thing", "here")
values_dict = {
"foo": None,
"bar": True,
"baz": 981,
"quux": 2.875,
"quuz": dt_val,
"corge": u"\N{snowman}",
"grault": b"\xe2\x98\x83",
"wibble": document,
"garply": [u"fork", 4.0],
"waldo": {"fred": u"zap", "thud": False},
}
encoded_dict = self._call_fut(values_dict)
expected_dict = {
"foo": _value_pb(null_value=struct_pb2.NULL_VALUE),
"bar": _value_pb(boolean_value=True),
"baz": _value_pb(integer_value=981),
"quux": _value_pb(double_value=2.875),
"quuz": _value_pb(
timestamp_value=timestamp_pb2.Timestamp(
seconds=dt_seconds, nanos=dt_nanos
)
),
"corge": _value_pb(string_value=u"\N{snowman}"),
"grault": _value_pb(bytes_value=b"\xe2\x98\x83"),
"wibble": _value_pb(reference_value=document._document_path),
"garply": _value_pb(
array_value=ArrayValue(
values=[
_value_pb(string_value=u"fork"),
_value_pb(double_value=4.0),
]
)
),
"waldo": _value_pb(
map_value=MapValue(
fields={
"fred": _value_pb(string_value=u"zap"),
"thud": _value_pb(boolean_value=False),
}
)
),
}
self.assertEqual(encoded_dict, expected_dict)
class Test_reference_value_to_document(unittest.TestCase):
@staticmethod
def _call_fut(reference_value, client):
from google.cloud.firestore_v1._helpers import reference_value_to_document
return reference_value_to_document(reference_value, client)
def test_bad_format(self):
from google.cloud.firestore_v1._helpers import BAD_REFERENCE_ERROR
reference_value = "not/the/right/format"
with self.assertRaises(ValueError) as exc_info:
self._call_fut(reference_value, None)
err_msg = BAD_REFERENCE_ERROR.format(reference_value)
self.assertEqual(exc_info.exception.args, (err_msg,))
def test_same_client(self):
from google.cloud.firestore_v1.document import DocumentReference
client = _make_client()
document = client.document("that", "this")
reference_value = document._document_path
new_document = self._call_fut(reference_value, client)
self.assertIsNot(new_document, document)
self.assertIsInstance(new_document, DocumentReference)
self.assertIs(new_document._client, client)
self.assertEqual(new_document._path, document._path)
def test_different_client(self):
from google.cloud.firestore_v1._helpers import WRONG_APP_REFERENCE
client1 = _make_client(project="kirk")
document = client1.document("tin", "foil")
reference_value = document._document_path
client2 = _make_client(project="spock")
with self.assertRaises(ValueError) as exc_info:
self._call_fut(reference_value, client2)
err_msg = WRONG_APP_REFERENCE.format(reference_value, client2._database_string)
self.assertEqual(exc_info.exception.args, (err_msg,))
class Test_decode_value(unittest.TestCase):
@staticmethod
def _call_fut(value, client=mock.sentinel.client):
from google.cloud.firestore_v1._helpers import decode_value
return decode_value(value, client)
def test_none(self):
from google.protobuf import struct_pb2
value = _value_pb(null_value=struct_pb2.NULL_VALUE)
self.assertIsNone(self._call_fut(value))
def test_bool(self):
value1 = _value_pb(boolean_value=True)
self.assertTrue(self._call_fut(value1))
value2 = _value_pb(boolean_value=False)
self.assertFalse(self._call_fut(value2))
def test_int(self):
int_val = 29871
value = _value_pb(integer_value=int_val)
self.assertEqual(self._call_fut(value), int_val)
def test_float(self):
float_val = 85.9296875
value = _value_pb(double_value=float_val)
self.assertEqual(self._call_fut(value), float_val)
@unittest.skipIf(
(3,) <= sys.version_info < (3, 4, 4), "known datetime bug (bpo-23517) in Python"
)
def test_datetime(self):
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.protobuf import timestamp_pb2
dt_seconds = 552855006
dt_nanos = 766961828
timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos)
value = _value_pb(timestamp_value=timestamp_pb)
expected_dt_val = DatetimeWithNanoseconds.from_timestamp_pb(timestamp_pb)
self.assertEqual(self._call_fut(value), expected_dt_val)
def test_unicode(self):
unicode_val = u"zorgon"
value = _value_pb(string_value=unicode_val)
self.assertEqual(self._call_fut(value), unicode_val)
def test_bytes(self):
bytes_val = b"abc\x80"
value = _value_pb(bytes_value=bytes_val)
self.assertEqual(self._call_fut(value), bytes_val)
def test_reference(self):
from google.cloud.firestore_v1.document import DocumentReference
client = _make_client()
path = (u"then", u"there-was-one")
document = client.document(*path)
ref_string = document._document_path
value = _value_pb(reference_value=ref_string)
result = self._call_fut(value, client)
self.assertIsInstance(result, DocumentReference)
self.assertIs(result._client, client)
self.assertEqual(result._path, path)
def test_geo_point(self):
from google.cloud.firestore_v1._helpers import GeoPoint
geo_pt = GeoPoint(latitude=42.5, longitude=99.0625)
value = _value_pb(geo_point_value=geo_pt.to_protobuf())
self.assertEqual(self._call_fut(value), geo_pt)
def test_array(self):
from google.cloud.firestore_v1.proto import document_pb2
sub_value1 = _value_pb(boolean_value=True)
sub_value2 = _value_pb(double_value=14.1396484375)
sub_value3 = _value_pb(bytes_value=b"\xde\xad\xbe\xef")
array_pb = document_pb2.ArrayValue(values=[sub_value1, sub_value2, sub_value3])
value = _value_pb(array_value=array_pb)
expected = [
sub_value1.boolean_value,
sub_value2.double_value,
sub_value3.bytes_value,
]
self.assertEqual(self._call_fut(value), expected)
def test_map(self):
from google.cloud.firestore_v1.proto import document_pb2
sub_value1 = _value_pb(integer_value=187680)
sub_value2 = _value_pb(string_value=u"how low can you go?")
map_pb = document_pb2.MapValue(
fields={"first": sub_value1, "second": sub_value2}
)
value = _value_pb(map_value=map_pb)
expected = {
"first": sub_value1.integer_value,
"second": sub_value2.string_value,
}
self.assertEqual(self._call_fut(value), expected)
def test_nested_map(self):
from google.cloud.firestore_v1.proto import document_pb2
actual_value1 = 1009876
actual_value2 = u"hey you guys"
actual_value3 = 90.875
map_pb1 = document_pb2.MapValue(
fields={
"lowest": _value_pb(integer_value=actual_value1),
"aside": _value_pb(string_value=actual_value2),
}
)
map_pb2 = document_pb2.MapValue(
fields={
"middle": _value_pb(map_value=map_pb1),
"aside": _value_pb(boolean_value=True),
}
)
map_pb3 = document_pb2.MapValue(
fields={
"highest": _value_pb(map_value=map_pb2),
"aside": _value_pb(double_value=actual_value3),
}
)
value = _value_pb(map_value=map_pb3)
expected = {
"highest": {
"middle": {"lowest": actual_value1, "aside": actual_value2},
"aside": True,
},
"aside": actual_value3,
}
self.assertEqual(self._call_fut(value), expected)
def test_unset_value_type(self):
with self.assertRaises(ValueError):
self._call_fut(_value_pb())
def test_unknown_value_type(self):
value_pb = mock.Mock(spec=["WhichOneof"])
value_pb.WhichOneof.return_value = "zoob_value"
with self.assertRaises(ValueError):
self._call_fut(value_pb)
value_pb.WhichOneof.assert_called_once_with("value_type")
class Test_decode_dict(unittest.TestCase):
@staticmethod
def _call_fut(value_fields, client=mock.sentinel.client):
from google.cloud.firestore_v1._helpers import decode_dict
return decode_dict(value_fields, client)
@unittest.skipIf(
(3,) <= sys.version_info < (3, 4, 4), "known datetime bug (bpo-23517) in Python"
)
def test_many_types(self):
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from google.cloud.firestore_v1.proto.document_pb2 import ArrayValue
from google.cloud.firestore_v1.proto.document_pb2 import MapValue
from google.cloud._helpers import UTC
from google.cloud.firestore_v1.field_path import FieldPath
dt_seconds = 1394037350
dt_nanos = 667285000
# Make sure precision is valid in microseconds too.
self.assertEqual(dt_nanos % 1000, 0)
dt_val = datetime.datetime.utcfromtimestamp(
dt_seconds + 1e-9 * dt_nanos
).replace(tzinfo=UTC)
value_fields = {
"foo": _value_pb(null_value=struct_pb2.NULL_VALUE),
"bar": _value_pb(boolean_value=True),
"baz": _value_pb(integer_value=981),
"quux": _value_pb(double_value=2.875),
"quuz": _value_pb(
timestamp_value=timestamp_pb2.Timestamp(
seconds=dt_seconds, nanos=dt_nanos
)
),
"corge": _value_pb(string_value=u"\N{snowman}"),
"grault": _value_pb(bytes_value=b"\xe2\x98\x83"),
"garply": _value_pb(
array_value=ArrayValue(
values=[
_value_pb(string_value=u"fork"),
_value_pb(double_value=4.0),
]
)
),
"waldo": _value_pb(
map_value=MapValue(
fields={
| |
<gh_stars>1-10
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2010 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import cgi
import unittest
from urllib import quote_plus
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from openid.consumer.consumer import Consumer, SuccessResponse
from openid.consumer.discover import OpenIDServiceEndpoint
from openid.extensions import ax, sreg, pape
from openid.fetchers import (
HTTPFetcher, HTTPFetchingError, HTTPResponse, setDefaultFetcher)
from openid.oidutil import importElementTree
from openid.server.server import BROWSER_REQUEST_MODES, ENCODE_URL, Server
from openid.store.memstore import MemoryStore
from openid.message import OPENID1_URL_LIMIT, IDENTIFIER_SELECT
from django_openid_auth import teams
from django_openid_auth.models import UserOpenID
from django_openid_auth.views import (
sanitise_redirect_url,
make_consumer,
)
from django_openid_auth.auth import OpenIDBackend
from django_openid_auth.signals import openid_login_complete
from django_openid_auth.store import DjangoOpenIDStore
from django_openid_auth.exceptions import (
MissingUsernameViolation,
DuplicateUsernameViolation,
MissingPhysicalMultiFactor,
RequiredAttributeNotReturned,
)
ET = importElementTree()
class StubOpenIDProvider(HTTPFetcher):
def __init__(self, base_url):
self.store = MemoryStore()
self.identity_url = base_url + 'identity'
self.localid_url = base_url + 'localid'
self.endpoint_url = base_url + 'endpoint'
self.server = Server(self.store, self.endpoint_url)
self.last_request = None
self.type_uris = ['http://specs.openid.net/auth/2.0/signon']
def fetch(self, url, body=None, headers=None):
if url == self.identity_url:
# Serve an XRDS document directly, pointing at our endpoint.
type_uris = ['<Type>%s</Type>' % uri for uri in self.type_uris]
return HTTPResponse(
url, 200, {'content-type': 'application/xrds+xml'}, """\
<?xml version="1.0"?>
<xrds:XRDS
xmlns="xri://$xrd*($v*2.0)"
xmlns:xrds="xri://$xrds">
<XRD>
<Service priority="0">
%s
<URI>%s</URI>
<LocalID>%s</LocalID>
</Service>
</XRD>
</xrds:XRDS>
""" % ('\n'.join(type_uris), self.endpoint_url, self.localid_url))
elif url.startswith(self.endpoint_url):
# Gather query parameters
query = {}
if '?' in url:
query.update(cgi.parse_qsl(url.split('?', 1)[1]))
if body is not None:
query.update(cgi.parse_qsl(body))
self.last_request = self.server.decodeRequest(query)
# The browser based requests should not be handled through
# the fetcher interface.
assert self.last_request.mode not in BROWSER_REQUEST_MODES
response = self.server.handleRequest(self.last_request)
webresponse = self.server.encodeResponse(response)
return HTTPResponse(url, webresponse.code, webresponse.headers,
webresponse.body)
else:
raise HTTPFetchingError('unknown URL %s' % url)
def parseFormPost(self, content):
"""Parse an HTML form post to create an OpenID request."""
# Hack to make the javascript XML compliant ...
content = content.replace('i < elements.length',
'i < elements.length')
tree = ET.XML(content)
form = tree.find('.//form')
assert form is not None, 'No form in document'
assert form.get('action') == self.endpoint_url, (
'Form posts to %s instead of %s' % (form.get('action'),
self.endpoint_url))
query = {}
for input in form.findall('input'):
if input.get('type') != 'hidden':
continue
query[input.get('name').encode('UTF-8')] = \
input.get('value').encode('UTF-8')
self.last_request = self.server.decodeRequest(query)
return self.last_request
class DummyDjangoRequest(object):
def __init__(self, request_path):
self.request_path = request_path
self.META = {
'HTTP_HOST': "localhost",
'SCRIPT_NAME': "http://localhost",
'SERVER_PROTOCOL': "http",
}
self.POST = {
'openid_identifier': "http://example.com/identity",
}
self.GET = {}
self.session = {}
def get_full_path(self):
return self.META['SCRIPT_NAME'] + self.request_path
def build_absolute_uri(self):
return self.META['SCRIPT_NAME'] + self.request_path
def _combined_request(self):
request = {}
request.update(self.POST)
request.update(self.GET)
return request
REQUEST = property(_combined_request)
class RelyingPartyTests(TestCase):
urls = 'django_openid_auth.tests.urls'
def setUp(self):
super(RelyingPartyTests, self).setUp()
self.provider = StubOpenIDProvider('http://example.com/')
self.req = DummyDjangoRequest('http://localhost/')
self.endpoint = OpenIDServiceEndpoint()
self.endpoint.claimed_id = 'http://example.com/identity'
self.endpoint.server_url = 'http://example.com/'
self.consumer = make_consumer(self.req)
self.server = Server(DjangoOpenIDStore())
setDefaultFetcher(self.provider, wrap_exceptions=False)
self.old_login_redirect_url = getattr(settings, 'LOGIN_REDIRECT_URL', '/accounts/profile/')
self.old_create_users = getattr(settings, 'OPENID_CREATE_USERS', False)
self.old_strict_usernames = getattr(settings, 'OPENID_STRICT_USERNAMES', False)
self.old_update_details = getattr(settings, 'OPENID_UPDATE_DETAILS_FROM_SREG', False)
self.old_sso_server_url = getattr(settings, 'OPENID_SSO_SERVER_URL', None)
self.old_teams_map = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING', {})
self.old_use_as_admin_login = getattr(settings, 'OPENID_USE_AS_ADMIN_LOGIN', False)
self.old_follow_renames = getattr(settings, 'OPENID_FOLLOW_RENAMES', False)
self.old_physical_multifactor = getattr(settings, 'OPENID_PHYSICAL_MULTIFACTOR_REQUIRED', False)
self.old_login_render_failure = getattr(settings, 'OPENID_RENDER_FAILURE', None)
self.old_consumer_complete = Consumer.complete
self.old_required_fields = getattr(
settings, 'OPENID_SREG_REQUIRED_FIELDS', [])
settings.OPENID_CREATE_USERS = False
settings.OPENID_STRICT_USERNAMES = False
settings.OPENID_UPDATE_DETAILS_FROM_SREG = False
settings.OPENID_SSO_SERVER_URL = None
settings.OPENID_LAUNCHPAD_TEAMS_MAPPING = {}
settings.OPENID_USE_AS_ADMIN_LOGIN = False
settings.OPENID_FOLLOW_RENAMES = False
settings.OPENID_PHYSICAL_MULTIFACTOR_REQUIRED = False
settings.OPENID_SREG_REQUIRED_FIELDS = []
def tearDown(self):
settings.LOGIN_REDIRECT_URL = self.old_login_redirect_url
settings.OPENID_CREATE_USERS = self.old_create_users
settings.OPENID_STRICT_USERNAMES = self.old_strict_usernames
settings.OPENID_UPDATE_DETAILS_FROM_SREG = self.old_update_details
settings.OPENID_SSO_SERVER_URL = self.old_sso_server_url
settings.OPENID_LAUNCHPAD_TEAMS_MAPPING = self.old_teams_map
settings.OPENID_USE_AS_ADMIN_LOGIN = self.old_use_as_admin_login
settings.OPENID_FOLLOW_RENAMES = self.old_follow_renames
settings.OPENID_PHYSICAL_MULTIFACTOR_REQUIRED = self.old_physical_multifactor
settings.OPENID_RENDER_FAILURE = self.old_login_render_failure
Consumer.complete = self.old_consumer_complete
settings.OPENID_SREG_REQUIRED_FIELDS = self.old_required_fields
setDefaultFetcher(None)
super(RelyingPartyTests, self).tearDown()
def complete(self, openid_response):
"""Complete an OpenID authentication request."""
# The server can generate either a redirect or a form post
# here. For simplicity, force generation of a redirect.
openid_response.whichEncoding = lambda: ENCODE_URL
webresponse = self.provider.server.encodeResponse(openid_response)
self.assertEquals(webresponse.code, 302)
redirect_to = webresponse.headers['location']
self.assertTrue(redirect_to.startswith(
'http://testserver/openid/complete/'))
return self.client.get('/openid/complete/',
dict(cgi.parse_qsl(redirect_to.split('?', 1)[1])))
def test_login(self):
user = User.objects.create_user('someuser', '<EMAIL>')
useropenid = UserOpenID(
user=user,
claimed_id='http://example.com/identity',
display_id='http://example.com/identity')
useropenid.save()
# The login form is displayed:
response = self.client.get('/openid/login/')
self.assertTemplateUsed(response, 'openid/login.html')
# Posting in an identity URL begins the authentication request:
response = self.client.post('/openid/login/',
{'openid_identifier': 'http://example.com/identity',
'next': '/getuser/'})
self.assertContains(response, 'OpenID transaction in progress')
openid_request = self.provider.parseFormPost(response.content)
self.assertEquals(openid_request.mode, 'checkid_setup')
self.assertTrue(openid_request.return_to.startswith(
'http://testserver/openid/complete/'))
# Complete the request. The user is redirected to the next URL.
openid_response = openid_request.answer(True)
response = self.complete(openid_response)
self.assertRedirects(response, 'http://testserver/getuser/')
# And they are now logged in:
response = self.client.get('/getuser/')
self.assertEquals(response.content, 'someuser')
def test_login_no_next(self):
"""Logins with no next parameter redirect to LOGIN_REDIRECT_URL."""
user = User.objects.create_user('someuser', '<EMAIL>')
useropenid = UserOpenID(
user=user,
claimed_id='http://example.com/identity',
display_id='http://example.com/identity')
useropenid.save()
settings.LOGIN_REDIRECT_URL = '/getuser/'
response = self.client.post('/openid/login/',
{'openid_identifier': 'http://example.com/identity'})
self.assertContains(response, 'OpenID transaction in progress')
openid_request = self.provider.parseFormPost(response.content)
self.assertEquals(openid_request.mode, 'checkid_setup')
self.assertTrue(openid_request.return_to.startswith(
'http://testserver/openid/complete/'))
# Complete the request. The user is redirected to the next URL.
openid_response = openid_request.answer(True)
response = self.complete(openid_response)
self.assertRedirects(
response, 'http://testserver' + settings.LOGIN_REDIRECT_URL)
def test_login_sso(self):
settings.OPENID_SSO_SERVER_URL = 'http://example.com/identity'
user = User.objects.create_user('someuser', '<EMAIL>')
useropenid = UserOpenID(
user=user,
claimed_id='http://example.com/identity',
display_id='http://example.com/identity')
useropenid.save()
# Requesting the login form immediately begins an
# authentication request.
response = self.client.get('/openid/login/', {'next': '/getuser/'})
self.assertEquals(response.status_code, 200)
self.assertContains(response, 'OpenID transaction in progress')
openid_request = self.provider.parseFormPost(response.content)
self.assertEquals(openid_request.mode, 'checkid_setup')
self.assertTrue(openid_request.return_to.startswith(
'http://testserver/openid/complete/'))
# Complete the request. The user is redirected to the next URL.
openid_response = openid_request.answer(True)
response = self.complete(openid_response)
self.assertRedirects(response, 'http://testserver/getuser/')
# And they are now logged in:
response = self.client.get('/getuser/')
self.assertEquals(response.content, 'someuser')
def test_login_create_users(self):
settings.OPENID_CREATE_USERS = True
# Create a user with the same name as we'll pass back via sreg.
User.objects.create_user('someuser', '<EMAIL>')
# Posting in an identity URL begins the authentication request:
response = self.client.post('/openid/login/',
{'openid_identifier': 'http://example.com/identity',
'next': '/getuser/'})
self.assertContains(response, 'OpenID transaction in progress')
# Complete the request, passing back some simple registration
# data. The user is redirected to the next URL.
openid_request = self.provider.parseFormPost(response.content)
sreg_request = sreg.SRegRequest.fromOpenIDRequest(openid_request)
openid_response = openid_request.answer(True)
sreg_response = sreg.SRegResponse.extractResponse(
sreg_request, {'nickname': 'someuser', 'fullname': '<NAME>',
'email': '<EMAIL>'})
openid_response.addExtension(sreg_response)
response = self.complete(openid_response)
self.assertRedirects(response, 'http://testserver/getuser/')
# And they are now logged in as a new user (they haven't taken
# over the existing "someuser" user).
response = self.client.get('/getuser/')
self.assertEquals(response.content, 'someuser2')
# Check the details of the new user.
user = User.objects.get(username='someuser2')
self.assertEquals(user.first_name, 'Some')
self.assertEquals(user.last_name, 'User')
self.assertEquals(user.email, '<EMAIL>')
def _do_user_login(self, req_data, resp_data, use_sreg=True, use_pape=None):
openid_request = self._get_login_request(req_data)
openid_response = self._get_login_response(openid_request, resp_data, use_sreg, use_pape)
response = self.complete(openid_response)
self.assertRedirects(response, 'http://testserver/getuser/')
return response
def _get_login_request(self, req_data):
# Posting in an identity URL begins the authentication request:
response = self.client.post('/openid/login/', req_data)
self.assertContains(response, 'OpenID transaction in progress')
# Complete the request, passing back some simple registration
# data. The user is redirected to the next URL.
openid_request = self.provider.parseFormPost(response.content)
return openid_request
def _get_login_response(self, openid_request, resp_data, use_sreg, use_pape):
openid_response = openid_request.answer(True)
if use_sreg:
sreg_request = sreg.SRegRequest.fromOpenIDRequest(openid_request)
sreg_response = sreg.SRegResponse.extractResponse(
sreg_request, resp_data)
openid_response.addExtension(sreg_response)
if use_pape is not None:
policies = [
use_pape
]
pape_response = pape.Response(auth_policies=policies)
openid_response.addExtension(pape_response)
return openid_response
def parse_query_string(self, query_str):
query_items = map(tuple,
[item.split('=') for item in query_str.split('&')])
query = dict(query_items)
return query
def test_login_physical_multifactor_request(self):
settings.OPENID_PHYSICAL_MULTIFACTOR_REQUIRED = True
preferred_auth = pape.AUTH_MULTI_FACTOR_PHYSICAL
self.provider.type_uris.append(pape.ns_uri)
openid_req = {'openid_identifier': 'http://example.com/identity',
'next': '/getuser/'}
response = self.client.post('/openid/login/', openid_req)
openid_request = self.provider.parseFormPost(response.content)
request_auth = openid_request.message.getArg(
'http://specs.openid.net/extensions/pape/1.0',
'preferred_auth_policies',
)
self.assertEqual(request_auth, preferred_auth)
def test_login_physical_multifactor_response(self):
settings.OPENID_PHYSICAL_MULTIFACTOR_REQUIRED = True
preferred_auth = pape.AUTH_MULTI_FACTOR_PHYSICAL
self.provider.type_uris.append(pape.ns_uri)
def | |
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/run_e2e_tests.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import atexit
import contextlib
import functools
import os
import re
import signal
import subprocess
import sys
import time
from core.tests import test_utils
import feconf
import python_utils
from scripts import build
from scripts import common
from scripts import install_chrome_on_travis
from scripts import install_third_party_libs
from scripts import run_e2e_tests
CHROME_DRIVER_VERSION = '77.0.3865.40'
class MockProcessClass(python_utils.OBJECT):
def __init__(self, clean_shutdown=True):
"""Create a mock process object.
Attributes:
poll_count: int. The number of times poll() has been called.
signals_received: list(int). List of received signals (as
ints) in order of receipt.
kill_count: int. Number of times kill() has been called.
poll_return: bool. The return value for poll().
clean_shutdown: bool. Whether to shut down when signal.SIGINT
signal is received.
Args:
clean_shutdown: bool. Whether to shut down when SIGINT received.
"""
self.poll_count = 0
self.signals_received = []
self.kill_count = 0
self.poll_return = True
self.clean_shutdown = clean_shutdown
def kill(self):
"""Increment kill_count.
Mocks the process being killed.
"""
self.kill_count += 1
def poll(self):
"""Increment poll_count.
Mocks checking whether the process is still alive.
Returns:
bool. The value of self.poll_return, which mocks whether the
process is still alive.
"""
self.poll_count += 1
return self.poll_return
def send_signal(self, signal_number):
"""Append signal to self.signals_received.
Mocks receiving a process signal. If a SIGINT signal is received
(e.g. from ctrl-C) and self.clean_shutdown is True, then we set
self.poll_return to False to mimic the process shutting down.
Args:
signal_number: int. The number of the received signal.
"""
self.signals_received.append(signal_number)
if signal_number == signal.SIGINT and self.clean_shutdown:
self.poll_return = False
def wait(self):
"""Wait for the process completion.
Mocks the process waiting for completion before it continues execution.
"""
return
class RunE2ETestsTests(test_utils.GenericTestBase):
"""Test the run_e2e_tests methods."""
def setUp(self):
super(RunE2ETestsTests, self).setUp()
def mock_print(unused_msg):
return
def mock_run_cmd(unused_commands):
pass
def mock_check_call(unused_commands):
pass
def mock_build_main(args): # pylint: disable=unused-argument
pass
def mock_popen(args, env, shell): # pylint: disable=unused-argument
return
def mock_remove(unused_path):
pass
def mock_inplace_replace(
unused_filename, unused_pattern, unused_replace):
return
self.popen_swap = functools.partial(
self.swap_with_checks, subprocess, 'Popen', mock_popen)
self.inplace_replace_swap = functools.partial(
self.swap_with_checks, common, 'inplace_replace_file',
mock_inplace_replace)
self.mock_run_cmd = mock_run_cmd
self.mock_check_call = mock_check_call
self.mock_build_main = mock_build_main
self.mock_remove = mock_remove
self.print_swap = functools.partial(
self.swap_with_checks, python_utils, 'PRINT', mock_print)
self.mock_node_bin_path = 'node'
self.node_bin_path_swap = self.swap(
common, 'NODE_BIN_PATH', self.mock_node_bin_path)
self.mock_webpack_bin_path = 'webpack'
self.webpack_bin_path_swap = self.swap(
run_e2e_tests, 'WEBPACK_BIN_PATH', self.mock_webpack_bin_path)
self.mock_constant_file_path = 'constant.ts'
self.constant_file_path_swap = self.swap(
run_e2e_tests, 'CONSTANT_FILE_PATH', self.mock_constant_file_path)
def test_check_screenhost_when_not_exist(self):
def mock_isdir(unused_path):
return False
exist_swap = self.swap_with_checks(
os.path, 'isdir', mock_isdir,
expected_args=[(os.path.join(os.pardir, 'protractor-screenshots'),)]
)
print_swap = self.print_swap(called=False)
with print_swap, exist_swap:
run_e2e_tests.ensure_screenshots_dir_is_removed()
def test_check_screenhost_when_exist(self):
screenshot_dir = os.path.join(os.pardir, 'protractor-screenshots')
def mock_isdir(unused_path):
return True
def mock_rmdir(unused_path):
return True
exist_swap = self.swap_with_checks(
os.path, 'isdir', mock_isdir, expected_args=[(screenshot_dir,)])
rmdir_swap = self.swap_with_checks(
os, 'rmdir', mock_rmdir, expected_args=[(screenshot_dir,)])
expected_output = (
'Note: If ADD_SCREENSHOT_REPORTER is set to true in'
'core/tests/protractor.conf.js, you can view screenshots'
'of the failed tests in ../protractor-screenshots/')
print_swap = self.print_swap(expected_args=[(expected_output,)])
with print_swap, exist_swap, rmdir_swap:
run_e2e_tests.ensure_screenshots_dir_is_removed()
def test_cleanup_when_no_subprocess(self):
def mock_kill_process_based_on_regex(unused_regex):
return
def mock_is_windows_os():
return False
def mock_set_constants_to_default():
return
subprocess_swap = self.swap(run_e2e_tests, 'SUBPROCESSES', [])
google_app_engine_path = '%s/' % (
common.GOOGLE_APP_ENGINE_SDK_HOME)
webdriver_download_path = '%s/selenium' % (
run_e2e_tests.WEBDRIVER_HOME_PATH)
process_pattern = [
('.*%s.*' % re.escape(google_app_engine_path),),
('.*%s.*' % re.escape(webdriver_download_path),)
]
swap_kill_process = self.swap_with_checks(
common, 'kill_processes_based_on_regex',
mock_kill_process_based_on_regex,
expected_args=process_pattern)
swap_is_windows = self.swap_with_checks(
common, 'is_windows_os', mock_is_windows_os)
swap_set_constants_to_default = self.swap_with_checks(
build, 'set_constants_to_default', mock_set_constants_to_default)
with swap_kill_process, subprocess_swap, swap_is_windows:
with swap_set_constants_to_default:
run_e2e_tests.cleanup()
def test_cleanup_when_subprocesses_exist(self):
def mock_kill_process_based_on_regex(unused_regex):
mock_kill_process_based_on_regex.called_times += 1
return True
mock_kill_process_based_on_regex.called_times = 0
def mock_set_constants_to_default():
return
mock_processes = [MockProcessClass(), MockProcessClass()]
subprocess_swap = self.swap(
run_e2e_tests, 'SUBPROCESSES', mock_processes)
swap_kill_process = self.swap_with_checks(
common, 'kill_processes_based_on_regex',
mock_kill_process_based_on_regex)
swap_set_constants_to_default = self.swap_with_checks(
build, 'set_constants_to_default', mock_set_constants_to_default)
with subprocess_swap, swap_kill_process, swap_set_constants_to_default:
run_e2e_tests.cleanup()
self.assertEqual(
mock_kill_process_based_on_regex.called_times, len(mock_processes))
def test_cleanup_on_windows(self):
def mock_is_windows_os():
return True
def mock_set_constants_to_default():
return
subprocess_swap = self.swap(run_e2e_tests, 'SUBPROCESSES', [])
google_app_engine_path = '%s/' % common.GOOGLE_APP_ENGINE_SDK_HOME
webdriver_download_path = '%s/selenium' % (
run_e2e_tests.WEBDRIVER_HOME_PATH)
process_pattern = [
('.*%s.*' % re.escape(google_app_engine_path),),
('.*%s.*' % re.escape(webdriver_download_path),)
]
expected_pattern = process_pattern[:]
expected_pattern[1] = ('.*%s.*' % re.escape(
os.path.abspath(webdriver_download_path)),)
def mock_kill_process_based_on_regex(unused_regex):
return
swap_kill_process = self.swap_with_checks(
common, 'kill_processes_based_on_regex',
mock_kill_process_based_on_regex,
expected_args=expected_pattern)
swap_is_windows = self.swap_with_checks(
common, 'is_windows_os', mock_is_windows_os)
swap_set_constants_to_default = self.swap_with_checks(
build, 'set_constants_to_default', mock_set_constants_to_default)
windows_exception = self.assertRaisesRegexp(
Exception, 'The redis command line interface is not installed '
'because your machine is on the Windows operating system. There is '
'no redis server to shutdown.'
)
with swap_kill_process, subprocess_swap, swap_is_windows, (
windows_exception):
with swap_set_constants_to_default:
run_e2e_tests.cleanup()
def test_is_oppia_server_already_running_when_ports_closed(self):
def mock_is_port_open(unused_port):
return False
is_port_open_swap = self.swap_with_checks(
common, 'is_port_open', mock_is_port_open)
with is_port_open_swap:
result = run_e2e_tests.is_oppia_server_already_running()
self.assertFalse(result)
def test_is_oppia_server_already_running_when_one_of_the_ports_is_open(
self):
running_port = run_e2e_tests.GOOGLE_APP_ENGINE_PORT
def mock_is_port_open(port):
if port == running_port:
return True
return False
is_port_open_swap = self.swap_with_checks(
common, 'is_port_open', mock_is_port_open)
with is_port_open_swap:
result = run_e2e_tests.is_oppia_server_already_running()
self.assertTrue(result)
def test_wait_for_port_to_be_open_when_port_successfully_opened(self):
def mock_is_port_open(unused_port):
mock_is_port_open.wait_time += 1
if mock_is_port_open.wait_time > 10:
return True
return False
mock_is_port_open.wait_time = 0
def mock_sleep(unused_time):
mock_sleep.called_times += 1
return
mock_sleep.called_times = 0
is_port_open_swap = self.swap_with_checks(
common, 'is_port_open', mock_is_port_open)
sleep_swap = self.swap_with_checks(time, 'sleep', mock_sleep)
with is_port_open_swap, sleep_swap:
common.wait_for_port_to_be_open(1)
self.assertEqual(mock_is_port_open.wait_time, 11)
self.assertEqual(mock_sleep.called_times, 10)
def test_wait_for_port_to_be_open_when_port_failed_to_open(self):
def mock_is_port_open(unused_port):
return False
def mock_sleep(unused_time):
mock_sleep.sleep_time += 1
def mock_exit(unused_exit_code):
return
mock_sleep.sleep_time = 0
is_port_open_swap = self.swap(common, 'is_port_open', mock_is_port_open)
sleep_swap = self.swap_with_checks(time, 'sleep', mock_sleep)
exit_swap = self.swap_with_checks(sys, 'exit', mock_exit)
with is_port_open_swap, sleep_swap, exit_swap:
common.wait_for_port_to_be_open(1)
self.assertEqual(
mock_sleep.sleep_time,
common.MAX_WAIT_TIME_FOR_PORT_TO_OPEN_SECS)
def test_run_webpack_compilation_success(self):
def mock_isdir(unused_dirname):
mock_isdir.run_times += 1
if mock_isdir.run_times > 3:
return True
return False
mock_isdir.run_times = 0
expected_commands = [
self.mock_node_bin_path, self.mock_webpack_bin_path, '--config',
'webpack.dev.config.ts']
isdir_swap = self.swap_with_checks(os.path, 'isdir', mock_isdir)
# The webpack compilation processes will be called 4 times as mock_isdir
# will return true after 4 calls.
check_call_swap = self.swap_with_checks(
subprocess, 'check_call', self.mock_check_call,
expected_args=[(expected_commands,)] * 4)
with self.node_bin_path_swap, self.webpack_bin_path_swap, (
check_call_swap):
with isdir_swap:
run_e2e_tests.run_webpack_compilation()
def test_get_chrome_driver_version(self):
def mock_popen(unused_arg):
class Ret(python_utils.OBJECT):
"""Return object with required attributes."""
def read(self):
"""Return required method."""
return '77.0.3865'
return Ret()
popen_swap = self.swap(os, 'popen', mock_popen)
def mock_url_open(unused_arg):
class Ret(python_utils.OBJECT):
"""Return object with required attributes."""
def read(self):
"""Return required method."""
return CHROME_DRIVER_VERSION
return Ret()
url_open_swap = self.swap(python_utils, 'url_open', mock_url_open)
with popen_swap, url_open_swap:
version = run_e2e_tests.get_chrome_driver_version()
self.assertEqual(version, CHROME_DRIVER_VERSION)
def test_run_webpack_compilation_failed(self):
def mock_isdir(unused_port):
return False
def mock_exit(unused_exit_code):
return
expected_commands = [
self.mock_node_bin_path, self.mock_webpack_bin_path, '--config',
'webpack.dev.config.ts']
# The webpack compilation processes will be called five times.
check_call_swap = self.swap_with_checks(
subprocess, 'check_call', self.mock_check_call,
expected_args=[(expected_commands,)] * 5)
isdir_swap = self.swap(os.path, 'isdir', mock_isdir)
exit_swap = self.swap_with_checks(
sys, 'exit', mock_exit, expected_args=[(1,)])
with self.node_bin_path_swap, self.webpack_bin_path_swap:
with check_call_swap, isdir_swap, exit_swap:
run_e2e_tests.run_webpack_compilation()
def test_run_webdriver_manager(self):
expected_commands = [
common.NODE_BIN_PATH, run_e2e_tests.WEBDRIVER_MANAGER_BIN_PATH,
'start', '--detach']
def mock_popen(unused_command):
class Ret(python_utils.OBJECT):
"""Return object with required attributes."""
def __init__(self):
self.returncode = 0
def communicate(self):
"""Return required method."""
return '', ''
return Ret()
popen_swap = self.swap_with_checks(
subprocess, 'Popen', mock_popen, expected_args=[
(expected_commands,)], expected_kwargs=[{}])
with popen_swap:
run_e2e_tests.run_webdriver_manager(['start', '--detach'])
def test_setup_and_install_dependencies_without_skip(self):
def mock_install_third_party_libs_main():
return
install_swap = self.swap_with_checks(
install_third_party_libs, 'main',
mock_install_third_party_libs_main)
with install_swap:
run_e2e_tests.setup_and_install_dependencies(False)
def test_setup_and_install_dependencies_on_travis(self):
def mock_install_third_party_libs_main():
return
def mock_install_chrome_main(args): # pylint: disable=unused-argument
return
def mock_getenv(unused_variable_name):
return True
install_swap = self.swap_with_checks(
install_third_party_libs, 'main',
mock_install_third_party_libs_main)
install_chrome_swap = self.swap_with_checks(
install_chrome_on_travis, 'main', mock_install_chrome_main,
expected_kwargs=[{'args': []}])
getenv_swap = self.swap_with_checks(
os, 'getenv', mock_getenv, expected_args=[('TRAVIS',)])
with install_swap, install_chrome_swap:
with getenv_swap:
run_e2e_tests.setup_and_install_dependencies(False)
def test_setup_and_install_dependencies_with_skip(self):
def mock_install_third_party_libs_main(unused_args):
return
install_swap = self.swap_with_checks(
install_third_party_libs, 'main',
mock_install_third_party_libs_main, called=False)
with install_swap:
run_e2e_tests.setup_and_install_dependencies(True)
def test_build_js_files_in_dev_mode_with_hash_file_exists(self):
def mock_isdir(unused_path):
return True
expected_commands = [
self.mock_node_bin_path, self.mock_webpack_bin_path, '--config',
'webpack.dev.config.ts']
isdir_swap = self.swap_with_checks(os.path, 'isdir', mock_isdir)
check_call_swap = self.swap_with_checks(
subprocess, 'check_call', self.mock_check_call,
expected_args=[(expected_commands,)])
build_main_swap = self.swap_with_checks(
build, 'main', self.mock_build_main, expected_kwargs=[{'args': []}])
print_swap = self.print_swap(called=False)
with print_swap, self.constant_file_path_swap, check_call_swap:
with self.node_bin_path_swap, self.webpack_bin_path_swap:
with build_main_swap, isdir_swap:
run_e2e_tests.build_js_files(True)
def test_build_js_files_in_dev_mode_with_exception_raised(self):
def mock_check_call(commands):
raise subprocess.CalledProcessError(
returncode=2, cmd=commands, output='ERROR')
def mock_exit(unused_code):
pass
expected_commands = [
self.mock_node_bin_path, self.mock_webpack_bin_path, '--config',
'webpack.dev.config.ts']
check_call_swap = self.swap_with_checks(
subprocess, 'check_call', mock_check_call,
expected_args=[(expected_commands,)])
build_main_swap = self.swap_with_checks(
build, 'main', self.mock_build_main, expected_kwargs=[{'args': []}])
exit_swap = self.swap_with_checks(
sys, 'exit', mock_exit, expected_args=[(2,)])
print_swap = self.print_swap(expected_args=[('ERROR',)])
with print_swap, self.constant_file_path_swap:
with self.node_bin_path_swap, self.webpack_bin_path_swap:
with check_call_swap, exit_swap, build_main_swap:
run_e2e_tests.build_js_files(True)
def test_build_js_files_in_prod_mode(self):
run_cmd_swap = self.swap_with_checks(
common, 'run_cmd', self.mock_run_cmd, called=False)
build_main_swap = self.swap_with_checks(
build, 'main', self.mock_build_main,
expected_kwargs=[{'args': ['--prod_env']}])
| |
import os
import settings
import subprocess
import shlex
from logger import Logger
'''Wrapper around rdiff-backup
This module provides facilites for centrally managing a large set of
rdiff-backup backup jobs. Backup job management is built around common tools
like cron, run-parts, and xargs. The base features include:
* central configuration file
* backup jobs for local and remote hosts
* configurable job parallelization
* ability to run arbitrary commands locally or remotely before and after
backup jobs (something especially handy for preparing databases pre-backup)
* logging to syslog
The base features are designed to be extended and we include an extension to
manage the setup and tear down of LVM snapshots for backup.
'''
class ARIBackup(object):
'''Base class includes core features and basic rdiff-backup functionality
This class can be used if all that is needed is to leverage the basic
rdiff-backup features. The pre and post hook functionality as well as
command execution is also part of this class.
'''
def __init__(self, label, source_hostname, remove_older_than_timespec=None):
# The name of the backup job (this will be the name of the directory in the backup store
# that has the data).
self.label = label
# This is the host that has the source data.
self.source_hostname = source_hostname
# We'll bring in the remote_user from our settings, but it is a var
# that the end-user is welcome to override.
self.remote_user = settings.remote_user
# setup logging
self.logger = Logger('ARIBackup ({label})'.format(label=label), settings.debug_logging)
# Include nothing by default
self.include_dir_list = []
self.include_file_list = []
# Exclude nothing by default
# We'll put the '**' exclude on the end of the arg_list later
self.exclude_dir_list = []
self.exclude_file_list = []
# initialize hook lists
self.pre_job_hook_list = []
self.post_job_hook_list = []
if remove_older_than_timespec != None:
self.post_job_hook_list.append((
self._remove_older_than,
{'timespec': remove_older_than_timespec}))
def _process_pre_job_hooks(self):
self.logger.info('processing pre-job hooks...')
for task in self.pre_job_hook_list:
# Let's do some assignments for readability
hook = task[0]
kwargs = task[1]
hook(**kwargs)
def _process_post_job_hooks(self, error_case):
if error_case:
self.logger.error('processing post-job hooks for error case...')
else:
self.logger.info('processing post-job hooks...')
for task in self.post_job_hook_list:
# Let's do some assignments for readability
hook = task[0]
kwargs = task[1]
kwargs.update({'error_case': error_case})
hook(**kwargs)
def _run_command(self, command, host='localhost'):
'''Runs an arbitrary command on host.
Given an input string or list, we attempt to execute it on the host via
SSH unless host is "localhost".
Returns a tuple with (stdout, stderr) if the exitcode is zero,
otherwise an Exception is raised.
'''
# make args a list if it's not already so
if isinstance(command, basestring):
args = shlex.split(command)
elif isinstance(command, list):
args = command
else:
raise Exception('_run_command: command arg must be str or list')
# add SSH arguments if this is a remote command
if host != 'localhost':
ssh_args = shlex.split('%s %s@%s' % (settings.ssh_path, self.remote_user, host))
args = ssh_args + args
try:
self.logger.debug('_run_command %r' % args)
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# We really want to block until our subprocess exists or
# KeyboardInterrupt. If we don't, clean up tasks can likely fail.
try:
stdout, stderr = p.communicate()
except KeyboardInterrupt:
# TODO terminate() doesn't block, so we'll need to poll
p.terminate()
raise KeyboardInterrupt
if stdout:
self.logger.debug(stdout)
if stderr:
# Warning level should be fine here since we'll also look at
# the exitcode.
self.logger.warning(stderr)
exitcode = p.returncode
except IOError:
raise Exception('Unable to execute/find {args}'.format(args=args))
if exitcode > 0:
error_message = ('[{host}] A command terminated with errors and likely requires intervention. The '
'command attempted was "{command}".').format(
host=host, command=command)
raise Exception(error_message)
return (stdout, stderr)
def run_backup(self):
self.logger.info('started')
try:
error_case = False
self._process_pre_job_hooks()
self.logger.info('data backup started...')
self._run_backup()
self.logger.info('data backup complete')
except Exception, e:
error_case = True
self.logger.error((str(e)))
self.logger.info("let's try to clean up...")
except KeyboardInterrupt:
error_case = True
# using error level here so that these messages will
# print to the console
self.logger.error('backup job cancelled by user')
self.logger.error("let's try to clean up...")
finally:
self._process_post_job_hooks(error_case)
self.logger.info('stopped')
def _run_backup(self, top_level_src_dir='/'):
'''Run rdiff-backup job.
Builds an argument list for a full rdiff-backup command line based on
the settings in the instance and optionally the top_level_src_dir
parameter. Said parameter is used to define the context for the backup
mirror. This is especially handy when backing up mounted spanshots so
that the mirror doesn't contain the directory where the snapshot is
mounted.
'''
self.logger.debug('_run_backup started')
# Init our arguments list with the path to rdiff-backup.
# This will be in the format we'd normally pass to the command-line
# e.g. [ '--include', '/dir/to/include', '--exclude', '/dir/to/exclude']
arg_list = [settings.rdiff_backup_path]
# setup some default rdiff-backup options
# TODO provide a way to override these
arg_list.append('--exclude-device-files')
arg_list.append('--exclude-fifos')
arg_list.append('--exclude-sockets')
# Bring the terminal verbosity down so that we only see errors
arg_list += ['--terminal-verbosity', '1']
# This conditional reads strangely, but that's because rdiff-backup
# not only defaults to having SSH compression enabled, it also doesn't
# have an option to explicitly enable it -- only one to disable it.
if not settings.ssh_compression:
arg_list.append('--ssh-no-compression')
# Populate self.argument list
for exclude_dir in self.exclude_dir_list:
arg_list.append('--exclude')
arg_list.append(exclude_dir)
for exclude_file in self.exclude_file_list:
arg_list.append('--exclude-filelist')
arg_list.append(exclude_file)
for include_dir in self.include_dir_list:
arg_list.append('--include')
arg_list.append(include_dir)
for include_file in self.include_file_list:
arg_list.append('--include-filelist')
arg_list.append(include_file)
# Exclude everything else
arg_list.append('--exclude')
arg_list.append('**')
# Add a source argument
if self.source_hostname == 'localhost':
arg_list.append(top_level_src_dir)
else:
arg_list.append(
'{remote_user}@{source_hostname}::{top_level_src_dir}'.format(
remote_user=self.remote_user,
source_hostname=self.source_hostname,
top_level_src_dir=top_level_src_dir
)
)
# Add a destination argument
arg_list.append(
'{backup_store_path}/{label}'.format(
backup_store_path=settings.backup_store_path,
label=self.label
)
)
# Rdiff-backup GO!
self._run_command(arg_list)
self.logger.debug('_run_backup completed')
def _remove_older_than(self, timespec, error_case):
'''Trims increments older than timespec
Post-job hook that uses rdiff-backup's --remove-old-than feature to
trim old increments from the backup history
'''
if not error_case:
self.logger.info('remove_older_than %s started' % timespec)
arg_list = [settings.rdiff_backup_path]
arg_list.append('--force')
arg_list.append('--remove-older-than')
arg_list.append(timespec)
arg_list.append('%s/%s' % (settings.backup_store_path, self.label))
self._run_command(arg_list)
self.logger.info('remove_older_than %s completed' % timespec)
class LVMBackup(ARIBackup):
def __init__(self, label, source_hostname, remove_older_than_timespec=None):
super(LVMBackup, self).__init__(label, source_hostname, remove_older_than_timespec)
# This is a list of 2-tuples, where each inner 2-tuple expresses the LV
# to back up, the mount point for that LV any mount options necessary.
# For example: [('hostname/root, '/', 'noatime'),]
# TODO I wonder if noatime being used all the time makes sense to
# improve read performance and reduce writes to the snapshots.
self.lv_list = []
# a list of dicts with the snapshot paths and where they should be
# mounted
self.lv_snapshots = []
# mount the snapshots in a directory named for this job's label
self.snapshot_mount_point_base_path = os.path.join(settings.snapshot_mount_root, self.label)
# setup pre and post job hooks to manage snapshot work flow
self.pre_job_hook_list.append((self._create_snapshots, {}))
self.pre_job_hook_list.append((self._mount_snapshots, {}))
self.post_job_hook_list.append((self._umount_snapshots, {}))
self.post_job_hook_list.append((self._delete_snapshots, {}))
def _create_snapshots(self):
'''Creates snapshots of all the volumns listed in self.lv_list'''
self.logger.info('creating LVM snapshots...')
for volume in self.lv_list:
try:
lv_path, src_mount_path, mount_options = volume
except ValueError:
lv_path, src_mount_path = volume
mount_options = None
vg_name, lv_name = lv_path.split('/')
new_lv_name = lv_name + settings.snapshot_suffix
mount_path = '{snapshot_mount_point_base_path}{src_mount_path}'.format(
snapshot_mount_point_base_path=self.snapshot_mount_point_base_path,
src_mount_path=src_mount_path
)
# TODO Is it really OK to always make a 1GB exception table?
self._run_command('lvcreate -s -L 1G %s -n %s' % (lv_path, new_lv_name), self.source_hostname)
self.lv_snapshots.append({
'lv_path': vg_name + '/' + new_lv_name,
'mount_path': mount_path,
'mount_options': mount_options,
'created': True,
'mount_point_created': False,
'mounted': False,
})
def _delete_snapshots(self, error_case=None):
'''Deletes snapshots in self.lv_snapshots
This method behaves the same in the normal and error cases.
'''
self.logger.info('deleting LVM snapshots...')
for snapshot in self.lv_snapshots:
if snapshot['created']:
lv_path = snapshot['lv_path']
# -f makes lvremove not interactive
self._run_command('lvremove -f %s' % lv_path, self.source_hostname)
snapshot.update({'created': False})
def _mount_snapshots(self):
self.logger.info('mounting LVM snapshots...')
for snapshot in self.lv_snapshots:
lv_path = snapshot['lv_path']
device_path = '/dev/' + lv_path
mount_path = snapshot['mount_path']
mount_options = snapshot['mount_options']
# mkdir the mount point
self._run_command('mkdir -p %s' % mount_path, self.source_hostname)
snapshot.update({'mount_point_created': True})
# If where we want to mount our LV is already a mount point then
# let's back out.
if os.path.ismount(mount_path):
raise Exception("{mount_path} is already a mount point".format(mount_path=mount_path))
# mount the LV, possibly with mount options
if mount_options:
command = 'mount -o {mount_options} {device_path} {mount_path}'.format(
mount_options=mount_options,
device_path=device_path,
mount_path=mount_path
)
else:
command = 'mount {device_path} {mount_path}'.format(
device_path=device_path,
mount_path=mount_path
)
self._run_command(command, self.source_hostname)
snapshot.update({'mounted': True})
def _umount_snapshots(self, error_case=None):
'''Umounts mounted snapshots in self.lv_snapshots
This method behaves the same in the normal and error cases.
'''
# TODO If | |
from __future__ import annotations # postpone evaluation of annotations
import logging
from typing import Any, Dict, List, Optional, Tuple
import cv2
import numpy as np
import numpy.typing as npt
from pyquaternion import Quaternion
from scipy import ndimage
from scipy.spatial.transform import Rotation as R
from sqlalchemy import Column, inspect
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Float, Integer
from nuplan.database.common import sql_types
from nuplan.database.common.utils import simple_repr
from nuplan.database.maps_db.gpkg_mapsdb import GPKGMapsDB
from nuplan.database.maps_db.utils import build_lane_segments_from_blps, connect_blp_predecessor, connect_blp_successor
from nuplan.database.nuplan_db.models import Base, Image, generate_multi_scale_connections
from nuplan.database.nuplan_db.utils import crop_rect, get_candidates
from nuplan.database.nuplan_db.vector_map_np import VectorMapNp
logger = logging.getLogger()
class EgoPose(Base):
"""
Ego vehicle pose at a particular timestamp. Given with respect to global coordinate system.
"""
__tablename__ = "ego_pose"
token = Column(sql_types.HexLen8, primary_key=True) # type: str
timestamp = Column(Integer) # field type: int
x = Column(Float) # type: float
y = Column(Float) # type: float
z = Column(Float) # type: float
qw: float = Column(Float)
qx: float = Column(Float)
qy: float = Column(Float)
qz: float = Column(Float)
vx = Column(Float) # type: float
vy = Column(Float) # type: float
vz = Column(Float) # type: float
acceleration_x = Column(Float) # type: float
acceleration_y = Column(Float) # type: float
acceleration_z = Column(Float) # type: float
angular_rate_x = Column(Float) # type: float
angular_rate_y = Column(Float) # type: float
angular_rate_z = Column(Float) # type: float
epsg = Column(Integer) # type: int
log_token = Column(sql_types.HexLen8, ForeignKey("log.token"), nullable=False) # type: str
image = relationship(
"Image", foreign_keys="Image.ego_pose_token", back_populates="ego_pose", uselist=False
) # type: Image
@property
def _session(self) -> Any:
"""
Get the underlying session.
:return: The underlying session.
"""
return inspect(self).session
def __repr__(self) -> str:
"""
Return the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
@property
def quaternion(self) -> Quaternion:
"""
Get the orientation of ego vehicle as quaternion respect to global coordinate system.
:return: The orientation in quaternion.
"""
return Quaternion(self.qw, self.qx, self.qy, self.qz)
@property
def translation_np(self) -> npt.NDArray[np.float64]:
"""
Position of ego vehicle respect to global coordinate system.
:return: <np.float: 3> Translation.
"""
return np.array([self.x, self.y, self.z])
@property
def trans_matrix(self) -> npt.NDArray[np.float64]:
"""
Get the transformation matrix.
:return: <np.float: 4, 4>. Transformation matrix.
"""
tm: npt.NDArray[np.float64] = self.quaternion.transformation_matrix
tm[:3, 3] = self.translation_np
return tm
@property
def trans_matrix_inv(self) -> npt.NDArray[np.float64]:
"""
Get the inverse transformation matrix.
:return: <np.float: 4, 4>. Inverse transformation matrix.
"""
tm: npt.NDArray[np.float64] = np.eye(4)
rot_inv = self.quaternion.rotation_matrix.T
tm[:3, :3] = rot_inv
tm[:3, 3] = rot_inv.dot(np.transpose(-self.translation_np))
return tm
def rotate_2d_points2d_to_ego_vehicle_frame(self, points2d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""
Rotate 2D points from global frame to ego-vehicle frame.
:param points2d: <np.float: num_points, 2>. 2D points in global frame.
:return: <np.float: num_points, 2>. 2D points rotated to ego-vehicle frame.
"""
# Add zeros to the z dimension to make them 3D points.
points3d: npt.NDArray[np.float32] = np.concatenate((points2d, np.zeros_like(points2d[:, 0:1])), axis=-1)
# We need to extract the rotation around the z-axis only. since we are cropping a 2D map.
# Construct scipy rotation instance using the rotation matrix from quaternion.
rotation = R.from_matrix(self.quaternion.rotation_matrix.T)
# Extract the angle of rotation around z-axis from the rotation.
ego_rotation_angle = rotation.as_euler('zxy', degrees=True)[0]
# Construct scipy rotation instance using ego_rotation_angle.
xy_rotation = R.from_euler('z', ego_rotation_angle, degrees=True)
# Rotate the corner points of the desired map crop to align with ego pose.
rotated_points3d = xy_rotation.apply(points3d)
# Remove the z dimension.
rotated_points2d: npt.NDArray[np.float64] = rotated_points3d[:, :2]
return rotated_points2d
def get_map_crop(
self,
maps_db: Optional[GPKGMapsDB],
xrange: Tuple[float, float],
yrange: Tuple[float, float],
map_layer_name: str,
rotate_face_up: bool,
target_imsize_xy: Optional[Tuple[float, float]] = None,
) -> Tuple[Optional[npt.NDArray[np.float64]], npt.NDArray[np.float64], Tuple[float, ...]]:
"""
This function returns the crop of the map centered at the current ego-pose with the given xrange and yrange.
:param maps_db: Map database associated with this database.
:param xrange: The range in x direction in meters relative to the current ego-pose. Eg: (-60, 60]).
:param yrange: The range in y direction in meters relative to the current ego-pose Eg: (-60, 60).
:param map_layer_name: A relevant map layer. Eg: 'drivable_area' or 'intensity'.
:param rotate_face_up: Boolean indicating whether to rotate the image face up with respect to ego-pose.
:param target_imsize_xy: The target grid xy dimensions for the output array. The xy resolution in meters / grid
may be scaled by zooming to the desired dimensions.
:return: (map_crop, map_translation, map_scale). Where:
map_crop: The desired crop of the map.
map_translation: The translation in map coordinates from the origin to the ego-pose.
map_scale: Map scale (inverse of the map precision). This will be a tuple specifying the zoom in both the x
and y direction if the target_imsize_xy parameter was set, which causes the resolution to change.
map_scale and map_translation are useful for transforming objects like pointcloud/boxes to the map_crop.
Refer to render_on_map().
"""
if maps_db is None:
precision: float = 1
def to_pixel_coords(x: float, y: float) -> Tuple[float, float]:
"""
Get the image coordinates given the x-y coordinates of point. This implementation simply returns the
same coordinates.
:param x: Global x coordinate.
:param y: Global y coordinate.
:return: Pixel coordinates in map.
"""
return x, y
else:
map_layer = maps_db.load_layer(self.log.map_version, map_layer_name)
precision = map_layer.precision
to_pixel_coords = map_layer.to_pixel_coords
map_scale: Tuple[float, ...] = (1.0 / precision, 1.0 / precision, 1.0)
ego_translation = self.translation_np
center_x, center_y = to_pixel_coords(ego_translation[0], ego_translation[1])
center_x, center_y = int(center_x), int(center_y)
top_left = int(xrange[0] * map_scale[0]), int(yrange[0] * map_scale[1])
bottom_right = int(xrange[1] * map_scale[0]), int(yrange[1] * map_scale[1])
# We need to extract the rotation around the z-axis only. since we are cropping a 2D map.
# Construct scipy rotation instance using the rotation matrix from quaternion.
rotation = R.from_matrix(self.quaternion.rotation_matrix.T)
# Extract the angle of rotation around z-axis from the rotation.
ego_rotation_angle = rotation.as_euler('zxy', degrees=True)[0]
# Construct scipy rotation instance using ego_rotation_angle.
xy_rotation = R.from_euler('z', ego_rotation_angle, degrees=True)
map_rotate = 0
# Rotate the corner points of the desired map crop to align with ego pose.
rotated = xy_rotation.apply(
[
[top_left[0], top_left[1], 0],
[top_left[0], bottom_right[1], 0],
[bottom_right[0], top_left[1], 0],
[bottom_right[0], bottom_right[1], 0],
]
)[:, :2]
# Construct minAreaRect using 4 corner points
rect = cv2.minAreaRect(np.hstack([rotated[:, :1] + center_x, rotated[:, 1:] + center_y]).astype(int))
rect_angle = rect[2]
# Due to rounding error, the dimensions returned by cv2 may be off by 1, therefore it's better to manually
# calculate the cropped dimensions instead of relying on the values returned by cv2 in rect[1]
cropped_dimensions: npt.NDArray[np.float32] = np.array(
[map_scale[0] * (xrange[1] - xrange[0]), map_scale[1] * (yrange[1] - yrange[0])]
)
rect = (rect[0], cropped_dimensions, rect_angle)
# In OpenCV 4.4, the angle returned by cv2.minAreaRect is [-90,0). In OpenCV 4.5, the angle returned
# appears to be [0, 90), though this isn't documented anywhere. To be compatible with both versions,
# we adjust the angle to be [-90,0) if it isn't already.
rect_angle = rect[2]
cropped_dimensions = np.array([map_scale[0] * (xrange[1] - xrange[0]), map_scale[1] * (yrange[1] - yrange[0])])
if rect_angle >= 0:
rect = (rect[0], cropped_dimensions, rect_angle - 90)
else:
rect = (rect[0], cropped_dimensions, rect_angle)
# We construct rect using cv2.minAreaRect, which takes only 4 unordered corner points, and can not consider
# the angle of the required rect. The range of of 'angle' in cv2.minAreaRect is [-90,0).
# A good explanation for the angle can be found at :
# https://namkeenman.wordpress.com/2015/12/18/open-cv-determine-angle-of-rotatedrect-minarearect/
# Hence, we have to manually rotate the map after cropping based on the initial rotation angle.
if ego_rotation_angle < -90:
map_rotate = -90
if -90 < ego_rotation_angle < 0:
map_rotate = 0
if 0 < ego_rotation_angle < 90:
map_rotate = 90
if 90 < ego_rotation_angle < 180:
map_rotate = 180
if map_layer is None:
map_crop = None
else:
# Crop the rect using minAreaRect.
map_crop = crop_rect(map_layer.data, rect)
# Rotate the cropped map using adjusted angles,
# since the angle is reset in cv2.minAreaRect every 90 degrees.
map_crop = ndimage.rotate(map_crop, map_rotate, reshape=False)
if rotate_face_up:
# The map_crop is aligned with the ego_pose, but ego_pose is facing towards the right of the canvas,
# but we need ego_pose to be facing up, hence rotating an extra 90 degrees.
map_crop = np.rot90(map_crop)
# These are in units of pixels, where x | |
<reponame>zerorock1312/lt-maker-master
from app.constants import WINWIDTH, WINHEIGHT, TILEX, TILEY
from app.utilities import utils
from app.sprites import SPRITES
from app.engine.sound import SOUNDTHREAD
from app.data.database import DB
from app.engine import engine, image_mods
from app.engine.state import MapState
from app.engine.game_state import game
from app.engine.input_manager import INPUT
class Link():
def __init__(self, pos):
self.position = pos
self.adjacent_links = set()
self.chain = None
self.orientation = None
def __repr__(self):
return '%s %s' % (self.position, self.orientation)
class CliffManager():
def __init__(self, cliff_positions, size):
self.unexplored = set([Link(pos) for pos in cliff_positions])
unexplored_length = len(self.unexplored)
self.chains = []
if self.unexplored:
self.gen_chains()
self.width, self.height = size
self.orientation_grid = [0 for _ in range(self.width*self.height)]
chain_length = sum(len(chain) for chain in self.chains)
assert chain_length == unexplored_length, "%s, %s" % (chain_length, unexplored_length)
for chain in self.chains:
self.place_chain(chain)
def gen_chains(self):
current_chain = set()
explored = []
explored.append(self.unexplored.pop())
while explored:
current_link = explored.pop()
current_chain.add(current_link)
current_link.chain = current_chain
adj = self.get_adjacent(current_link)
if adj:
for a in adj:
self.make_adjacent(current_link, a)
explored.append(a)
self.unexplored -= adj
elif explored:
continue
else:
self.chains.append(current_chain)
if self.unexplored:
current_chain = set()
current_link = self.unexplored.pop()
explored.append(current_link)
def make_adjacent(self, a, b):
a.adjacent_links.add(b)
b.adjacent_links.add(a)
def is_adjacent(self, pos1, pos2) -> bool:
return pos1 in ((pos2[0], pos2[1] - 1), (pos2[0] - 1, pos2[1]), (pos2[0] + 1, pos2[1]), (pos2[0], pos2[1] + 1))
def get_adjacent(self, current_link):
adj = set()
pos = current_link.position
for link in self.unexplored:
if self.is_adjacent(link.position, pos):
adj.add(link)
for link in self.unexplored:
# If you are at a diagonal and you are not adjacent to anything I am already adjacent to
if link.position in ((pos[0] - 1, pos[1] - 1), (pos[0] - 1, pos[1] + 1), (pos[0] + 1, pos[1] - 1), (pos[0] + 1, pos[1] + 1)) and \
not any(self.is_adjacent(a.position, link.position) for a in adj):
adj.add(link)
return adj
def place_chain(self, chain):
if len(chain) == 1:
c_link = next(iter(chain))
c_link.orientation = 1
x, y = c_link.position
self.orientation_grid[x + y*self.width] = 1
return
# Look for endings links (ie, have only one adjacency)
ending_links = [link for link in chain if len(link.adjacent_links) == 1]
if len(ending_links) == 0: # Cycle
ending_link = next(iter(chain))
# Initial set-up
else:
ending_link = ending_links[0]
assert len(ending_link.adjacent_links) >= 1
adj_link = next(iter(ending_link.adjacent_links))
if len(chain) == 2: # Only if there are no middle links
dx, dy = self.get_difference(ending_link, adj_link)
if dx == 0:
ending_link.orientation = 2
elif dy == 0:
ending_link.orientation = 1
elif dx == dy:
ending_link.orientation = 3
else:
ending_link.orientation = 4
# Now iterate through
explored = set()
explored.add((ending_link, adj_link))
while explored:
prev, current = explored.pop()
other_adjs = current.adjacent_links - set([prev])
if other_adjs:
for adj in other_adjs:
self.find_orientation(prev, current, adj)
explored.add((current, adj))
else:
self.find_ending_orientation(prev, current)
# get starting point now -- only if there were middle links
if len(chain) != 2:
self.find_ending_orientation(adj_link, ending_link)
# Lastly, commit it to the orientation grid
for link in chain:
x, y = link.position
self.orientation_grid[x + y*self.width] = link.orientation
def find_orientation(self, prev, current, nxt):
pdx, pdy = self.get_difference(prev, current)
ndx, ndy = self.get_difference(current, nxt)
tdx, tdy = self.get_difference(prev, nxt)
if tdx == 0:
current.orientation = 2
return
if tdy == 0:
current.orientation = 1
return
slope = tdy/float(tdx)
if slope > 0:
current.orientation = 3
else:
current.orientation = 4
return
def find_ending_orientation(self, prev, current):
dx, dy = self.get_difference(prev, current)
if dy == 0:
if prev.orientation == 1:
current.orientation = 1
elif prev.orientation == 2:
current.orientation = 2
elif prev.orientation == 3:
current.orientation = 4
else:
current.orientation = 3
elif dx == 0:
if prev.orientation == 1:
current.orientation = 1
elif prev.orientation == 2:
current.orientation = 2
elif prev.orientation == 3:
current.orientation = 4
else:
current.orientation = 3
elif dx == dy:
current.orientation = 3
else:
current.orientation = 4
def get_difference(self, a, b):
dx = b.position[0] - a.position[0]
dy = b.position[1] - a.position[1]
return dx, dy
def get_orientation(self, pos):
x, y = pos
orientation = self.orientation_grid[x + y*self.width]
if orientation == 2:
return (9, 6)
elif orientation == 3:
return (11, 6)
elif orientation == 4:
return (10, 6)
else:
return (8, 6)
# Minimap
class MiniMap(object):
# Constants
minimap_tiles = SPRITES.get('Minimap_Tiles')
minimap_units = SPRITES.get('Minimap_Sprites')
minimap_cursor = SPRITES.get('Minimap_Cursor')
cliffs = ('Cliff', 'Desert_Cliff', 'Snow_Cliff')
complex_map = ('Wall', 'River', 'Sand', 'Sea')
scale_factor = 4
def __init__(self, tilemap, units):
self.tilemap = tilemap
self.width = self.tilemap.width
self.height = self.tilemap.height
self.colorkey = (0, 0, 0)
self.surf = engine.create_surface((self.width*self.scale_factor, self.height*self.scale_factor))
engine.set_colorkey(self.surf, self.colorkey, rleaccel=False) # black is transparent
self.pin_surf = engine.create_surface((self.width*self.scale_factor, self.height*self.scale_factor), transparent=True)
# All the rest of this is used for occlusion generation
self.bg = engine.copy_surface(self.surf)
self.starting_scale = 0.25
new_width = int(self.height*self.scale_factor*self.starting_scale)
new_height = int(self.width*self.scale_factor*self.starting_scale)
self.base_mask = engine.create_surface((new_width, new_height))
engine.set_colorkey(self.base_mask, self.colorkey, rleaccel=False)
engine.fill(self.base_mask, (255, 255, 255), None)
# Handle cliffs
cliff_positions = set()
for x in range(self.width):
for y in range(self.height):
minimap_nid = self.get_minimap_key((x, y))
if minimap_nid in self.cliffs:
cliff_positions.add((x, y))
self.cliff_manager = CliffManager(cliff_positions, (self.width, self.height))
# Build Terrain
for x in range(self.width):
for y in range(self.height):
minimap_nid = self.get_minimap_key((x, y))
sprite = self.handle_key(minimap_nid, (x, y))
self.surf.blit(sprite, (x*self.scale_factor, y*self.scale_factor))
# Fog of War
if game.level_vars['_fog_of_war']:
for x in range(self.width):
for y in range(self.height):
if not game.board.in_vision((x, y)):
mask = (x * self.scale_factor, y * self.scale_factor, self.scale_factor, self.scale_factor)
if game.level_vars['_fog_of_war'] == 2:
engine.fill(self.surf, (12, 12, 12), mask)
else:
engine.fill(self.surf, (128, 128, 128), mask, engine.BLEND_RGB_MULT)
self.surf = self.surf.convert()
# Build units
self.build_units(units)
def get_minimap_key(self, pos):
terrain_nid = self.tilemap.get_terrain(pos)
terrain = DB.terrain.get(terrain_nid)
if terrain:
minimap_nid = terrain.minimap
else:
minimap_nid = DB.minimap.single_map[0]
return minimap_nid
def handle_key(self, key, position):
# print(key)
# Normal keys
if key in DB.minimap.single_map:
return self.get_sprite(DB.minimap.single_map[key])
# Bridge
elif key == 'Bridge':
if self.bridge_left_right(position):
return self.get_sprite((0, 1))
else:
return self.get_sprite((8, 1))
# Door
elif key == 'Door':
return self.door_type(position)
# Wall, River, Desert, Sea
elif key in DB.minimap.complex_map:
return self.complex_shape(key, position)
# Coast
elif key == 'Coast':
return self.coast(position)
# Cliff
elif key in self.cliffs:
pos = self.cliff_manager.get_orientation(position)
if key == 'Desert_Cliff':
pos = (pos[0] + 4, pos[1])
elif key == 'Snow_Cliff':
pos = (pos[0] - 4, pos[1] + 1)
return self.get_sprite(pos)
# Error!
else:
print("Error! Unrecognized Minimap Key %s" % key)
def build_units(self, units):
for unit in units:
if unit.position and game.board.in_vision(unit.position):
pos = unit.position[0] * self.scale_factor, unit.position[1] * self.scale_factor
if unit.team == 'player':
self.pin_surf.blit(engine.subsurface(self.minimap_units, (0, 0, self.scale_factor, self.scale_factor)), pos)
elif unit.team == 'enemy':
self.pin_surf.blit(engine.subsurface(self.minimap_units, (self.scale_factor*1, 0, self.scale_factor, self.scale_factor)), pos)
elif unit.team == 'other':
self.pin_surf.blit(engine.subsurface(self.minimap_units, (self.scale_factor*2, 0, self.scale_factor, self.scale_factor)), pos)
else:
self.pin_surf.blit(engine.subsurface(self.minimap_units, (self.scale_factor*3, 0, self.scale_factor, self.scale_factor)), pos)
def coast(self, position, allow_recurse=True):
sea_keys = ('Sea', 'Pier', 'River', 'Bridge')
# A is up, B is left, C is right, D is down
# This code determines which minimap tiles fit assuming you only knew one side of the tile, and then intersects to find the best
a, b, c, d, e, f, g, h = None, None, None, None, None, None, None, None
up_pos = position[0], position[1] - 1
if not self.tilemap.check_bounds(up_pos):
a = {2, 3, 4, 8, 11, 12, 13}
elif self.get_minimap_key(up_pos) in sea_keys:
a = {0, 1, 5, 6, 7, 9, 10}
elif self.get_minimap_key(up_pos) == 'Coast':
a = {2, 3, 5, 6, 7, 9, 10, 11, 12, 13}
else:
a = {2, 3, 4, 8, 11, 12, 13}
left_pos = position[0] - 1, position[1]
if not self.tilemap.check_bounds(left_pos):
b = {1, 3, 4, 7, 10, 12, 13}
elif self.get_minimap_key(left_pos) in sea_keys:
b = {0, 2, 5, 6, 8, 9, 11}
elif self.get_minimap_key(left_pos) == 'Coast':
b = {1, 4, 5, 6, 8, 9, 10, 11, 12, 13}
else:
b = {1, 3, 4, 7, 10, 12, 13}
right_pos = position[0] + 1, position[1]
if not self.tilemap.check_bounds(right_pos):
c = {1, 2, 4, 6, 9, 11, 13}
elif self.get_minimap_key(right_pos) in sea_keys:
c = {0, 3, 5, 7, 8, 10, 12}
elif self.get_minimap_key(right_pos) == 'Coast':
c = {1, 4, 5, 7, 8, 9, 10, 11, 12, 13}
else:
c = {1, 2, 4, 6, 9, 11, 13}
down_pos = position[0], position[1] + 1
if not self.tilemap.check_bounds(down_pos):
d = {1, 2, 3, 5, 9, 10, 13}
elif self.get_minimap_key(down_pos) in sea_keys:
d = {0, 4, 6, 7, 8, 11, 12}
elif self.get_minimap_key(down_pos) == 'Coast':
| |
= np.zeros(indices.shape[1])
x_com, y_com, z_com = center_of_mass(plane)
for point in range(indices.shape[1]):
dist[point] = np.sqrt(
(indices[0, point] - x_com) ** 2
+ (indices[1, point] - y_com) ** 2
+ (indices[2, point] - z_com) ** 2
)
median_dist = np.median(dist)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points before distance threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
for point in range(indices.shape[1]):
if dist[point] > median_dist:
plane[indices[0, point], indices[1, point], indices[2, point]] = 0
print(
"Fit plane",
label,
", ",
str(indices.shape[1] - plane[plane == 1].sum()),
"points too far from COM, ",
str(plane[plane == 1].sum()),
"remaining",
)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points after distance threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
# update plane indices and check if enough points remain
indices = np.asarray(np.nonzero(plane))
if len(indices[0]) < 5:
no_points = True
return 0, indices, 0, no_points
# the fit parameters are (a, b, c, d) such that a*x + b*y + c*z + d = 0
params, std_param, valid_plane = util.plane_fit(
indices=indices, label=label, threshold=1, debugging=debugging
)
if not valid_plane:
plane[indices] = 0
no_points = True
return params, indices, std_param, no_points
def grow_facet(fit, plane, label, support, max_distance=0.90, debugging=True):
"""
Find voxels of the object which belong to a facet.
It uses the facet plane equation and the distance to the plane to find such voxels.
:param fit: coefficients of the plane (a, b, c, d) such that a*x + b*y + c*z + d = 0
:param plane: 3D binary support of the plane, with shape of the full dataset
:param label: the label of the plane processed
:param support: 3D binary support of the reconstructed object,
with shape of the full dataset
:param max_distance: in pixels, maximum allowed distance to the facet plane
of a voxel
:param debugging: set to True to see plots
:return: the updated plane, a stop flag
"""
nbz, nby, nbx = plane.shape
indices = np.nonzero(plane)
if len(indices[0]) == 0:
no_points = True
return plane, no_points
kernel = np.ones((3, 3, 3))
start_z = max(indices[0].min() - 20, 0)
stop_z = min(indices[0].max() + 21, nbz)
start_y = max(indices[1].min() - 20, 0)
stop_y = min(indices[1].max() + 21, nby)
start_x = max(indices[2].min() - 20, 0)
stop_x = min(indices[2].max() + 21, nbx)
# find nearby voxels using the coordination number
obj = np.copy(plane[start_z:stop_z, start_y:stop_y, start_x:stop_x])
coord = np.rint(convolve(obj, kernel, mode="same"))
coord = coord.astype(int)
coord[np.nonzero(coord)] = 1
if debugging:
gu.scatter_plot_overlaid(
arrays=(np.asarray(np.nonzero(coord)).T, np.asarray(np.nonzero(obj)).T),
markersizes=(2, 8),
markercolors=("b", "r"),
labels=("x", "y", "z"),
title="Plane" + str(label) + " before facet growing and coord matrix",
)
# update plane with new voxels
temp_plane = np.copy(plane)
temp_plane[start_z:stop_z, start_y:stop_y, start_x:stop_x] = coord
# remove voxels not belonging to the support
temp_plane[support == 0] = 0
# check distance of new voxels to the plane
plane, no_points = distance_threshold(
fit=fit,
indices=np.nonzero(temp_plane),
plane_shape=temp_plane.shape,
max_distance=max_distance,
)
plane_normal = fit[:-1] # normal is [a, b, c] if ax+by+cz+d=0
# calculate the local gradient for each point of the plane,
# gradients is a list of arrays of 3 vector components
indices = np.nonzero(plane)
gradients = surface_gradient(
list(zip(indices[0], indices[1], indices[2])), support=support
)
count_grad = 0
nb_indices = len(indices[0])
for idx in range(nb_indices):
if np.dot(plane_normal, gradients[idx]) < 0.75:
# 0.85 is too restrictive checked CH4760 S11 plane 1
plane[indices[0][idx], indices[1][idx], indices[2][idx]] = 0
count_grad += 1
indices = np.nonzero(plane)
if debugging and len(indices[0]) != 0:
gu.scatter_plot(
array=np.asarray(indices).T,
labels=("x", "y", "z"),
title="Plane" + str(label) + " after 1 cycle of facet growing",
)
print(f"{count_grad} points excluded by gradient filtering")
print(str(len(indices[0])) + " after 1 cycle of facet growing")
return plane, no_points
def offset_plane(indices, offset, plane_normal):
"""
Shift plane indices by the offset value in order to scan perpendicular to the plane.
:param indices: tuple of 3 1D ndarrays (array shape = nb_points)
:param offset: offset to be applied to the indices (offset of the plane)
:param plane_normal: ndarray of 3 elements, normal to the plane
:return: offseted indices
"""
if not isinstance(indices, tuple):
raise ValueError("indices should be a tuple of 3 1D ndarrays")
new_indices0 = np.rint(
indices[0]
+ offset
* np.dot(np.array([1, 0, 0]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
new_indices1 = np.rint(
indices[1]
+ offset
* np.dot(np.array([0, 1, 0]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
new_indices2 = np.rint(
indices[2]
+ offset
* np.dot(np.array([0, 0, 1]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
return new_indices0, new_indices1, new_indices2
def remove_duplicates(vertices, faces, debugging=False):
"""
Remove duplicates in a list of vertices and faces.
A face is a triangle made of three vertices.
:param vertices: a ndarray of vertices, shape (N, 3)
:param faces: a ndarray of vertex indices, shape (M, 3)
:param debugging: True to see which vertices are duplicated and how lists are
modified
:return: the updated vertices and faces with duplicates removed in place
"""
# find indices which are duplicated
uniq_vertices, uniq_inverse = np.unique(vertices, axis=0, return_inverse=True)
indices, count = np.unique(uniq_inverse, return_counts=True)
duplicated_indices = indices[count != 1] # list of vertices which are not unique
# for each duplicated vertex, build the list of the corresponding identical vertices
list_duplicated = []
for idx, value in enumerate(duplicated_indices):
same_vertices = np.argwhere(vertices == uniq_vertices[value, :])
# same_vertices is a ndarray of the form
# [[ind0, 0], [ind0, 1], [ind0, 2], [ind1, 0], [ind1, 1], [ind1, 2],...]
list_duplicated.append(list(same_vertices[::3, 0]))
# remove duplicates in vertices
remove_vertices = [value for sublist in list_duplicated for value in sublist[1:]]
vertices = np.delete(vertices, remove_vertices, axis=0)
print(len(remove_vertices), "duplicated vertices removed")
# remove duplicated_vertices in faces
for idx, temp_array in enumerate(list_duplicated):
for idy in range(1, len(temp_array)):
duplicated_value = temp_array[idy]
faces[faces == duplicated_value] = temp_array[0]
# temp_array[0] is the unique value, others are duplicates
# all indices above duplicated_value have to be decreased by 1
# to keep the match with the number of vertices
faces[faces > duplicated_value] = faces[faces > duplicated_value] - 1
# update accordingly all indices above temp_array[idy]
if debugging:
print("temp_array before", temp_array)
print("list_duplicated before", list_duplicated)
temp_array = [
(value - 1) if value > duplicated_value else value
for value in temp_array
]
list_duplicated = [
[
(value - 1) if value > duplicated_value else value
for value in sublist
]
for sublist in list_duplicated
]
if debugging:
print("temp_array after", temp_array)
print("list_duplicated after", list_duplicated)
# look for faces with 2 identical vertices
# (cannot define later a normal to these faces)
remove_faces = []
for idx in range(faces.shape[0]):
if np.unique(faces[idx, :], axis=0).shape[0] != faces[idx, :].shape[0]:
remove_faces.append(idx)
faces = np.delete(faces, remove_faces, axis=0)
print(len(remove_faces), "faces with identical vertices removed")
return vertices, faces
def surface_indices(surface, plane_indices, margin=3):
"""
Find surface indices potentially belonging to a plane.
It crops the surface around the plane with a certain margin, and find corresponding
surface indices.
:param surface: the 3D surface binary array
:param plane_indices: tuple of 3 1D-arrays of plane indices
:param margin: margin to include aroung plane indices, in pixels
:return: 3*1D arrays of surface indices
"""
valid.valid_ndarray(surface, ndim=3)
if not isinstance(plane_indices, tuple):
plane_indices = tuple(plane_indices)
surf_indices = np.nonzero(
surface[
plane_indices[0].min() - margin : plane_indices[0].max() + margin,
plane_indices[1].min() - margin : plane_indices[1].max() + margin,
plane_indices[2].min() - margin : plane_indices[2].max() + margin,
]
)
surf0 = (
surf_indices[0] + plane_indices[0].min() - margin
) # add margin plane_indices[0].min() - margin
surf1 = (
surf_indices[1] + plane_indices[1].min() - margin
) # add margin plane_indices[1].min() - margin
surf2 = (
surf_indices[2] + plane_indices[2].min() - margin
) # add margin plane_indices[2].min() - margin
return surf0, surf1, surf2
def stereographic_proj(
normals,
intensity,
max_angle,
savedir,
voxel_size,
projection_axis,
min_distance=10,
background_south=-1000,
background_north=-1000,
save_txt=False,
cmap=default_cmap,
planes_south=None,
planes_north=None,
plot_planes=True,
scale="linear",
comment_fig="",
debugging=False,
):
"""
Detect facets in an object.
It uses a stereographic projection of normals to mesh triangles and watershed
segmentation.
:param normals: array of normals to mesh triangles (nb_normals rows x 3 columns)
:param intensity: array of intensities (nb_normals rows x 1 column)
:param max_angle: maximum angle in degree of the stereographic projection
(should be larger than 90)
:param savedir: directory for saving figures
:param voxel_size: tuple of three numbers corresponding to the real-space
| |
+ browse_string) # if not, assume that it is a URL without protcol and add it
browser_msg = "glue browse [ " + browse_string + " ] complete\n"
self.view.run_command('glue_writer', {'text': browser_msg, 'command': glue_command, 'exit': False})
else:
browser_error_msg = "Please enter a URL or local filepath after the glue browse command\n"
self.view.run_command('glue_writer', {'text': browser_error_msg, 'command': glue_command, 'exit': False})
# CLEAR command
elif com_args[1] == "clear":
self.view.run_command('glue_clear_editor')
# keeps the input panel open for more commands
self.view.run_command('glue')
# FINDER command
elif com_args[1] == "finder":
# user is requesting a directory as an argument
if len(com_args) > 2:
finder_dirpath = com_args[2]
if os.path.isdir(finder_dirpath):
self.view.window().run_command("open_dir", {"dir": os.path.abspath(finder_dirpath)}) # open it
curdir_finder_msg = "The requested directory was opened in your finder\n"
elif os.path.isfile(finder_dirpath):
finder_dirpath = os.path.dirname(finder_dirpath)
self.view.window().run_command("open_dir", {"dir": os.path.abspath(finder_dirpath)}) # open it
curdir_finder_msg = "The requested directory was opened in your finder\n"
else:
curdir_finder_msg = "Unable to find the requested directory path. Please try again.\n"
# provide Glue view output to user after execution of the finder reveal
self.view.run_command('glue_writer', {'text': curdir_finder_msg, 'command': glue_command, 'exit': False})
# user is requesting the current working directory (i.e. no argument)
else:
if len(self.current_dirpath) > 0 and os.path.isdir(self.current_dirpath):
self.view.window().run_command("open_dir", {"dir": self.current_dirpath})
curdir_finder_msg = "The current directory was opened in your finder.\n"
self.view.run_command('glue_writer', {'text': curdir_finder_msg, 'command': glue_command, 'exit': False})
else:
curdir_finderror_msg = "Unable to detect the current working directory. Please restart the Glue plugin and try again.\n"
self.view.run_command('glue_writer', {'text': curdir_finderror_msg, 'command': glue_command, 'exit': False})
# GOTO command
elif com_args[1] == "goto":
if len(com_args) > 2:
goto_user_msg = "goto " + com_args[2] + " completed\n"
self.view.window().run_command("show_overlay", {"overlay": "goto", "show_files": True, "text": com_args[2]})
self.view.run_command('glue_writer', {'text': goto_user_msg, 'command': glue_command, 'exit': False})
else:
# if no query string, just open the overlay
goto_user_msg = "goto overlay launch completed\n"
self.view.window().run_command("show_overlay", {"overlay": "goto", "show_files": True})
self.view.run_command('glue_writer', {'text': goto_user_msg, 'command': glue_command, 'exit': False})
# LOCALHOST command
elif com_args[1] == "localhost":
import webbrowser
localhost_url = 'http://localhost:8000'
if len(com_args) > 2:
protocol = com_args[2] # the argument is the requested protocol (doesn't perform sanity check)
localhost_url = 'http://localhost:' + protocol
webbrowser.open(localhost_url)
localhost_browse_msg = "glue localhost complete\n"
self.view.run_command('glue_writer', {'text': localhost_browse_msg, 'command': glue_command, 'exit': False})
# NEW command
elif com_args[1] == "new":
filenew_text = "glue new command completed\n"
self.view.run_command('glue_writer', {'text': filenew_text, 'command': glue_command, 'exit': False})
self.view.window().new_file()
# OPEN command
elif com_args[1] == "open":
if len(com_args) > 2:
fileopen_text = "glue open command completed\n"
self.view.run_command('glue_writer', {'text': fileopen_text, 'command': glue_command, 'exit': False})
self.view.window().run_command('glue_file_opener', {'current_dir': self.current_dirpath, 'file_list': com_args[2:]})
else:
missing_file_error_msg = "Please enter at least one filepath after the open command.\n"
self.view.run_command('glue_writer', {'text': missing_file_error_msg, 'command': glue_command, 'exit': False})
# PATH command
elif com_args[1] == "path":
if len(self.userpath) == 0:
# obtain the 'real' mac osx path using the get_mac_path method if not set by user
if sublime.platform() == "osx":
# get the PATH
updated_path = self.get_mac_path() # attempt to obtain the PATH set in the user's respective shell startup file
# set the Mac environ PATH to the obtained PATH
os.environ['PATH'] = updated_path
# assign the PATH to the self.userpath attribute for the executable search below (and for reuse while running)
self.userpath = updated_path
the_path = self.userpath
elif sublime.platform() == "windows":
the_path = os.environ['PATH']
# do not set the PATH in Windows, letting Win shell handle the command
elif sublime.platform() == "linux":
self.userpath = os.environ['PATH']
the_path = self.userpath
else:
# if there is a self.userpath that is set (user set in settings, previously set above) then set Python environ PATH string
the_path = self.userpath
self.view.run_command('glue_writer', {'text': the_path + '\n', 'command': glue_command, 'exit': False})
# TEMPLATE command
elif com_args[1] == "template":
if len(com_args) > 2:
template_name = ""
template_filename = ""
template_multi = False
# test for the flag and name option in the user command
for argument in com_args[2:]: # only test the arguments after the 'template' subcommand
if "--multi" in argument:
template_multi = True # user indicated that the file will specify multiple file paths
elif argument.startswith('--name='):
name_list = argument.split('=')
template_filename = name_list[1] # the user assigned file write name of the file
else:
template_name = argument # if it is not one of the above options, then it is the requested template name
print_string = template_name + " " + template_filename + " " + str(template_multi)
self.view.run_command('glue_writer', {'text': print_string, 'command': glue_command, 'exit': False})
else:
# user did not enter a template name
template_err_msg = "Please enter a template name after your command.\n"
self.view.run_command('glue_writer', {'text': template_err_msg, 'command': glue_command, 'exit': False})
# USER command
elif com_args[1] == "user":
uc_file_path = os.path.join(sublime.packages_path(), 'Glue-Commands', 'glue.json')
if self.is_file_here(uc_file_path):
fr = FileReader(uc_file_path)
user_json = fr.read_utf8()
usercom_dict = json.loads(user_json)
if len(usercom_dict) > 0:
if len(usercom_dict) == 1:
com_extension_string = 'extension'
com_number_string = 'lonely'
else:
com_extension_string = 'extensions'
com_number_string = str(len(usercom_dict))
number_com_msg = "Your " + com_number_string + " Glue " + com_extension_string + ":\n\n"
com_list = []
for key, value in self.xitems(usercom_dict):
com_string = key + " : " + value
com_list.append(com_string)
com_string = '\n'.join(sorted(com_list))
com_string = number_com_msg + com_string + '\n'
self.view.run_command('glue_writer', {'text': com_string, 'command': glue_command, 'exit': False})
else:
user_error_msg = "Your glue.json file does not contain any commands\n"
self.view.run_command('glue_writer', {'text': user_error_msg, 'command': glue_command, 'exit': False})
else:
usercom_error_msg = "The glue.json file could not be found. Please confirm that this is contained in a Glue-Commands directory in your Sublime Text Packages directory.\n"
self.view.run_command('glue_writer', {'text': usercom_error_msg, 'command': glue_command, 'exit': False})
# WCO command
elif com_args[1] == "wco":
if len(com_args) > 2:
fileopen_text = "glue wco command completed\n"
self.view.run_command('glue_writer', {'text': fileopen_text, 'command': glue_command, 'exit': False})
self.view.window().run_command('glue_file_wildcard_opener', {'current_dir': self.current_dirpath, 'match_pattern': com_args[2]})
else:
missing_file_error_msg = "Please enter at least one filepath after the open command.\n"
self.view.run_command('glue_writer', {'text': missing_file_error_msg, 'command': glue_command, 'exit': False})
# TEST command
elif com_args[1] == "test":
pass
# test open containing folder
#self.view.window().run_command("open_dir", {"dir": self.current_dirpath})
# self.view.run_command('glue_writer', {'text': current_proj, 'command': glue_command, 'exit': False})
# USER ALIAS commands
else:
if len(com_args) > 1:
uc_file_path = os.path.join(sublime.packages_path(), 'Glue-Commands', 'glue.json')
if self.is_file_here(uc_file_path):
fr = FileReader(uc_file_path)
user_json = fr.read_utf8()
usercom_dict = json.loads(user_json)
# if arguments from command, add those in location indicated by the file
if len(com_args) > 2:
# arguments were included on the command line, pass them to the user command
arguments = ' '.join(com_args[2:])
else:
# no additional arguments were included so pass empty string if there is an {{args}} tag
arguments = ''
if com_args[1] in usercom_dict:
user_command = usercom_dict[com_args[1]]
user_command = user_command.replace('{{args}}', arguments) # replace with CL args
user_command = user_command.replace('{{pwd}}', os.getcwd()) # replace with working dir path
user_command = user_command.replace('{{clipboard}}', sublime.get_clipboard()) # replace with contents of clipboard
self.muterun(user_command) # execute the command
else:
# didn't find a glue alias with the requested name in the existing glue alias settings file
bad_cmd_error_msg = "Glue could not identify that command. Please try again.\n"
self.view.run_command('glue_writer', {'text': bad_cmd_error_msg, 'command': glue_command, 'exit': False})
# Didn't find a glue alias setting file, provide error message
else:
bad_cmd_error_msg = "Glue could not identify that command. Please try again.\n"
self.view.run_command('glue_writer', {'text': bad_cmd_error_msg, 'command': glue_command, 'exit': False})
else:
missing_arg_error_msg = "Glue requires an argument. Please use 'glue help' for for more information.\n"
self.view.run_command('glue_writer', {'text': missing_arg_error_msg, 'command': glue_command, 'exit': False})
# Execute the system command that was entered
else:
try:
if len(com_args) > 0:
arguments = ' '.join(com_args[1:])
else:
arguments = ''
command = os.path.join(self.get_path(com_args[0]), com_args[0]) + " " + arguments
t = threading.Thread(target=self.execute_command, args=(command, user_command))
t.start() # launch the thread to execute the command
self.progress_indicator(t) # provide progress indicator
self.print_on_complete(t, user_command) # polls for completion of the thread and prints to editor
except Exception as e:
raise e
#------------------------------------------------------------------------------
# [ is_file_here ] - returns boolean for presence of filepath
#------------------------------------------------------------------------------
def is_file_here(self, filepath):
if os.path.exists(filepath) and os.path.isfile(filepath):
return True
else:
return False
#------------------------------------------------------------------------------
# [ get_mac_path method ] - obtain the user PATH setting on the Mac from bash
#------------------------------------------------------------------------------
def get_mac_path(self):
pathgetter = "bash -ilc 'echo -n $PATH'"
updated_path = subprocess.Popen(pathgetter, stdout=subprocess.PIPE, shell=True).stdout.read()
# update the shell PATH with this path
return updated_path.decode("utf-8").rstrip().rstrip(':')
#------------------------------------------------------------------------------
# | |
import torch
from collections import OrderedDict
import json
import math
import config as cfg
import ipeps.ipeps as ipeps
class IPEPS_U1SYM(ipeps.IPEPS):
def __init__(self, sym_tensors, coeffs, vertexToSite=None, lX=None, lY=None, \
peps_args=cfg.peps_args, global_args=cfg.global_args):
r"""
:param sym_tensors: list of selected symmetric tensors
:param coeffs: map from elementary unit cell to vector of coefficients
:param vertexToSite: function mapping arbitrary vertex of a square lattice
into a vertex within elementary unit cell
:param lX: length of the elementary unit cell in X direction
:param lY: length of the elementary unit cell in Y direction
:param peps_args: ipeps configuration
:param global_args: global configuration
:type sym_tensors: list[tuple(dict(str,str), torch.tensor)]
:type coeffs: dict[tuple(int,int) : torch.tensor]
:type vertexToSite: function(tuple(int,int))->tuple(int,int)
:type lX: int
:type lY: int
:type peps_args: PEPSARGS
:type global_args: GLOBALARGS
Member ``sites`` is a dictionary of non-equivalent on-site tensors
indexed by tuple of coordinates (x,y) within the elementary unit cell.
The index-position convetion for on-site tensors is defined as follows::
u s
|/
l--A--r <=> A[s,u,l,d,r]
|
d
where s denotes physical index, and u,l,d,r label four principal directions
up, left, down, right in anti-clockwise order starting from up
Member ``vertexToSite`` is a mapping function from vertex on a square lattice
passed in as tuple(x,y) to a corresponding tuple(x,y) within elementary unit cell.
On-site tensor of an IPEPS object ``wfc`` at vertex (x,y) is conveniently accessed
through the member function ``site``, which internally uses ``vertexToSite`` mapping::
coord= (0,0)
A_00= wfc.site(coord)
By combining the appropriate ``vertexToSite`` mapping function with elementary unit
cell specified through ``sites`` various tilings of a square lattice can be achieved::
# Example 1: 1-site translational iPEPS
sites={(0,0): A}
def vertexToSite(coord):
return (0,0)
wfc= IPEPS(sites,vertexToSite)
# resulting tiling:
# y\x -2 -1 0 1 2
# -2 A A A A A
# -1 A A A A A
# 0 A A A A A
# 1 A A A A A
# Example 2: 2-site bipartite iPEPS
sites={(0,0): A, (1,0): B}
def vertexToSite(coord):
x = (coord[0] + abs(coord[0]) * 2) % 2
y = abs(coord[1])
return ((x + y) % 2, 0)
wfc= IPEPS(sites,vertexToSite)
# resulting tiling:
# y\x -2 -1 0 1 2
# -2 A B A B A
# -1 B A B A B
# 0 A B A B A
# 1 B A B A B
# Example 3: iPEPS with 3x2 unit cell with PBC
sites={(0,0): A, (1,0): B, (2,0): C, (0,1): D, (1,1): E, (2,1): F}
wfc= IPEPS(sites,lX=3,lY=2)
# resulting tiling:
# y\x -2 -1 0 1 2
# -2 B C A B C
# -1 E F D E F
# 0 B C A B C
# 1 E F D E F
where in the last example we used default setting for ``vertexToSite``, which
maps square lattice into elementary unit cell of size `lX` x `lY` assuming
periodic boundary conditions (PBC) along both X and Y directions.
TODO we infer the size of the cluster from the keys of sites. Is it OK?
"""
self.sym_tensors= sym_tensors
self.coeffs= OrderedDict(coeffs)
sites= self.build_onsite_tensors()
super().__init__(sites, vertexToSite=vertexToSite, peps_args=peps_args,\
global_args=global_args)
def __str__(self):
print(f"lX x lY: {self.lX} x {self.lY}")
for nid,coord,site in [(t[0], *t[1]) for t in enumerate(self.coeffs.items())]:
print(f"A{nid} {coord}: {site.size()}")
# show tiling of a square lattice
coord_list = list(self.coeffs.keys())
mx, my = 3*self.lX, 3*self.lY
label_spacing = 1+int(math.log10(len(self.coeffs.keys())))
for y in range(-my,my):
if y == -my:
print("y\\x ", end="")
for x in range(-mx,mx):
print(str(x)+label_spacing*" "+" ", end="")
print("")
print(f"{y:+} ", end="")
for x in range(-mx,mx):
print(f"A{coord_list.index(self.vertexToSite((x,y)))} ", end="")
print("")
# print meta-information of considered symmetric tensors
for i,su2t in enumerate(self.sym_tensors):
print(f"{i} {su2t[0]}")
# print coefficients
for nid,coord,c in [(t[0], *t[1]) for t in enumerate(self.coeffs.items())]:
tdims = c.size()
tlength = tdims[0]
print(f"x: {coord[0]}, y: {coord[1]}")
els=[f"{c[i]}" for i in range(tlength)]
print(els)
return ""
def get_parameters(self):
return self.coeffs.values()
def get_checkpoint(self):
return self.coeffs
def load_checkpoint(self,checkpoint_file):
checkpoint= torch.load(checkpoint_file)
self.coeffs= checkpoint["parameters"]
for coeff_t in self.coeffs.values(): coeff_t.requires_grad_(False)
self.sites= self.build_onsite_tensors()
def build_onsite_tensors(self):
ts= torch.stack([t for m,t in self.sym_tensors])
sites=dict()
for coord,c in self.coeffs.items():
sites[coord]= torch.einsum('i,ipuldr->puldr',c,ts)
return sites
def add_noise(self,noise):
for coord in self.coeffs.keys():
rand_t = torch.rand( self.coeffs[coord].size(), dtype=self.dtype, device=self.device)
tmp_t = self.coeffs[coord] + noise * rand_t
self.coeffs[coord]= tmp_t/torch.max(torch.abs(tmp_t))
self.sites= self.build_onsite_tensors()
def get_aux_bond_dims(self):
return [max(t[1].size()[1:]) for t in self.sym_tensors]
def write_to_file(self, outputfile, aux_seq=[0,1,2,3], tol=1.0e-14, normalize=False):
write_ipeps_u1(self, outputfile, aux_seq=aux_seq, tol=tol, normalize=normalize)
def extend_bond_dim(state, new_d):
return ipeps.extend_bond_dim(state, new_d)
def read_ipeps_u1(jsonfile, vertexToSite=None, aux_seq=[0,1,2,3], peps_args=cfg.peps_args,\
global_args=cfg.global_args):
r"""
:param jsonfile: input file describing iPEPS in json format
:param vertexToSite: function mapping arbitrary vertex of a square lattice
into a vertex within elementary unit cell
:param aux_seq: array specifying order of auxiliary indices of on-site tensors stored
in `jsonfile`
:param peps_args: ipeps configuration
:param global_args: global configuration
:type jsonfile: str or Path object
:type vertexToSite: function(tuple(int,int))->tuple(int,int)
:type aux_seq: list[int]
:type peps_args: PEPSARGS
:type global_args: GLOBALARGS
:return: wavefunction
:rtype: IPEPS
A simple PBC ``vertexToSite`` function is used by default
Parameter ``aux_seq`` defines the expected order of auxiliary indices
in input file relative to the convention fixed in tn-torch::
0
1A3 <=> [up, left, down, right]: aux_seq=[0,1,2,3]
2
for alternative order, eg.
1
0A2 <=> [left, up, right, down]: aux_seq=[1,0,3,2]
3
"""
asq = [x+1 for x in aux_seq]
sites = OrderedDict()
with open(jsonfile) as j:
raw_state = json.load(j)
# check for presence of "aux_seq" field in jsonfile
if "aux_ind_seq" in raw_state.keys():
asq = [x+1 for x in raw_state["aux_ind_seq"]]
# read the list of considered SU(2)-symmetric tensors
sym_tensors=[]
for symt in raw_state["sym_tensors"]:
meta=dict({"meta": symt["meta"]})
dims=[symt["physDim"]]+[symt["auxDim"]]*4
t= torch.zeros(tuple(dims), dtype=global_args.dtype, device=global_args.device)
for elem in symt["entries"]:
tokens= elem.split(' ')
inds=tuple([int(i) for i in tokens[0:5]])
t[inds]= float(tokens[5])
sym_tensors.append((meta,t))
# Loop over non-equivalent tensor,coeffs pairs in the unit cell
coeffs=OrderedDict()
for ts in raw_state["map"]:
coord = (ts["x"],ts["y"])
# find the corresponding tensor of coeffs (and its elements)
# identified by "siteId" in the "sites" list
t = None
for s in raw_state["coeffs"]:
if s["siteId"] == ts["siteId"]:
t = s
if t == None:
raise Exception("Tensor with siteId: "+ts["sideId"]+" NOT FOUND in \"sites\"")
X = torch.zeros(t["numEntries"], dtype=global_args.dtype, device=global_args.device)
# 1) fill the tensor with elements from the list "entries"
# which list the coefficients in the following
# notation: Dimensions are indexed starting from 0
#
# index (integer) of coeff, (float) Re, Im
for entry in t["entries"]:
tokens = entry.split()
X[int(tokens[0])]=float(tokens[1])
coeffs[coord]=X
# Unless given, construct a function mapping from
# any site of square-lattice back to unit-cell
if vertexToSite == None:
# check for legacy keys
lX = 0
lY = 0
if "sizeM" in raw_state:
lX = raw_state["sizeM"]
else:
lX = raw_state["lX"]
if "sizeN" in raw_state:
lY = raw_state["sizeN"]
else:
lY = raw_state["lY"]
def vertexToSite(coord):
x = coord[0]
y = coord[1]
return ( (x + abs(x)*lX)%lX, (y + abs(y)*lY)%lY )
state = IPEPS_U1SYM(sym_tensors=sym_tensors, coeffs=coeffs, \
vertexToSite=vertexToSite, \
lX=lX, lY=lY, peps_args=peps_args, global_args=global_args)
else:
state = IPEPS_U1SYM(sym_tensors=sym_tensors, coeffs=coeffs, \
vertexToSite=vertexToSite, \
peps_args=peps_args, global_args=global_args)
return state
def write_ipeps_u1(state, outputfile, aux_seq=[0,1,2,3], tol=1.0e-14, normalize=False):
r"""
:param state: wavefunction to write out in json format
:param outputfile: target file
:param aux_seq: array specifying order in which the auxiliary indices of on-site tensors
will be stored in the `outputfile`
:param tol: minimum magnitude of tensor elements which are written out
:param normalize: if True, on-site tensors are normalized before writing
:type state: IPEPS
:type ouputfile: str or Path object
:type aux_seq: list[int]
:type tol: float
:type normalize: bool
Parameter ``aux_seq`` defines the order of auxiliary indices relative to the convention
fixed in tn-torch in which the tensor elements are written out::
0
1A3 <=> [up, left, down, right]: aux_seq=[0,1,2,3]
2
for alternative order, eg.
1
0A2 <=> [left, up, right, down]: aux_seq=[1,0,3,2]
3
TODO drop constrain for aux bond dimension to be identical on
all bond indices
TODO implement cutoff on | |
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""bibtex.py encodes and describes the BibTeX specification (see Kopka & Daly,
2004 for details). Useful for validating OLD source input since the data
structure for OLD sources is the BibTeX data structure.
Use Pybtex (http://pybtex.sourceforge.net/manual.html) to parse BibTeX files if
need be:
from io import StringIO
from pybtex.database.input.bibtex import Parser
e1 = '''
@BOOK{knuth:86a,
AUTHOR = "<NAME>",
TITLE = {The \TeX{}book},
EDITION = "third",
PUBLISHER = "Addison--Wesley",
ADDRESS = {Reading, Massachusetts},
YEAR = 1986
}
'''.strip()
parser = Parser()
bib_data = parser.parse_stream(StringIO(e1))
knuth86a = parser.data.entries['knuth:86a']
unicode(knuth86a.persons['author'][0])
u'<NAME>.'
"""
# Entry types.
entry_types = {
'article': {
'description': 'An article from a journal or magazine.',
'required': ('author', 'title', 'journal', 'year'),
'optional': ('volume', 'number', 'pages', 'month', 'note')
},
'book' : {
'description': 'A book with an explicit publisher.',
'required': (('author', 'editor'), 'title', 'publisher', 'year'),
'optional': (('volume', 'number'), 'series', 'address', 'edition', 'month', 'note')
},
'booklet': {
'description': 'A work that is printed and bound, but without a named publisher or sponsoring institution.',
'required': ('title',),
'optional': ('author', 'howpublished', 'address', 'month', 'year', 'note')
},
'conference': {
'description': 'The same as inproceedings, included for Scribe compatibility.',
'required': ('author', 'title', 'booktitle', 'year'),
'optional': ('editor', ('volume', 'number'), 'series', 'pages',
'address', 'month', 'organization', 'publisher', 'note')
},
'inbook': {
'description': 'A part of a book, usually untitled. May be a chapter (or section or whatever) and/or a range of pages.',
'required': (('author', 'editor'), 'title', ('chapter', 'pages'),
'publisher', 'year'),
'optional': (('volume', 'number'), 'series', 'type', 'address',
'edition', 'month', 'note')
},
'incollection': {
'description': 'A part of a book having its own title.',
'required': ('author', 'title', 'booktitle', 'publisher', 'year'),
'optional': ('editor', ('volume', 'number'), 'series', 'type', 'chapter',
'pages', 'address', 'edition', 'month', 'note')
},
'inproceedings': {
'description': 'An article in a conference proceedings.',
'required': ('author', 'title', 'booktitle', 'year'),
'optional': ('editor', ('volume', 'number'), 'series', 'pages',
'address', 'month', 'organization', 'publisher', 'note')
},
'manual': {
'description': 'Technical documentation.',
'required': ('title',),
'optional': ('author', 'organization', 'address', 'edition', 'month',
'year', 'note')
},
'mastersthesis': {
'description': 'A Master\'s thesis.',
'required': ('author', 'title', 'school', 'year'),
'optional': ('type', 'address', 'month', 'note')
},
'misc': {
'description': 'For use when nothing else fits.',
'required': (),
'optional': ('author', 'title', 'howpublished', 'month', 'year', 'note')
},
'phdthesis': {
'description': 'A Ph.D. thesis.',
'required': ('author', 'title', 'school', 'year'),
'optional': ('type', 'address', 'month', 'note')
},
'proceedings': {
'description': 'The proceedings of a conference.',
'required': ('title', 'year'),
'optional': ('editor', ('volume', 'number'), 'series', 'address',
'month', 'publisher', 'organization', 'note')
},
'techreport': {
'description': 'A report published by a school or other institution, usually numbered within a series.',
'required': ('author', 'title', 'institution', 'year'),
'optional': ('type', 'number', 'address', 'month', 'note')
},
'unpublished': {
'description': 'A document having an author and title, but not formally published.',
'required': ('author', 'title', 'note'),
'optional': ('month', 'year')
}
}
# Other entry types. Not recommended.
other_entry_types = (
'collection',
'patent'
)
# These field names are optional for all entry types.
universally_optional_field_names = (
'key', # Additional info for alphabetizing entries
'crossref' # Field text here is the cite key for another entry,
'url',
'crossref'
)
# Fields names.
# Note that 'eprint' and 'url' might also be in the standard fields, cf. http://en.wikipedia.org/wiki/BibTeX#Bibliographic_information_file
field_names = (
'address', # Usually the address of the publisher or other type of institution. For major publishing houses, van Leunen recommends omitting the information entirely. For small publishers, on the other hand, you can help the reader by giving the complete address.
'annote', # An annotation. It is not used by the standard bibliography styles, but may be used by others that produce an annotated bibliography.
'author', # The name(s) of the author(s), in the format described in the LaTeX book.
'booktitle', # Title of a book, part of which is being cited. See the LaTeX book for how to type titles. For book entries, use the title field instead.
'chapter', # A chapter (or section or whatever) number.
'crossref', # The database key of the entry being cross referenced. Any fields that are missing from the current record are inherited from the field being cross referenced.
'edition', # The edition of a book---for example, ``Second''. This should be an ordinal, and should have the first letter capitalized, as shown here; the standard styles convert to lower case when necessary.
'editor', # Name(s) of editor(s), typed as indicated in the LaTeX book. If there is also an author field, then the editor field gives the editor of the book or collection in which the reference appears.
'howpublished', # How something strange has been published. The first word should be capitalized.
'institution', # The sponsoring institution of a technical report.
'journal', # A journal name. Abbreviations are provided for many journals.
'key', # Used for alphabetizing, cross referencing, and creating a label when the ``author'' information is missing. This field should not be confused with the key that appears in the cite command and at the beginning of the database entry.
'month', # The month in which the work was published or, for an unpublished work, in which it was written. You should use the standard three-letter abbreviation, as described in Appendix B.1.3 of the LaTeX book.
'note', # Any additional information that can help the reader. The first word should be capitalized.
'number', # The number of a journal, magazine, technical report, or of a work in a series. An issue of a journal or magazine is usually identified by its volume and number; the organization that issues a technical report usually gives it a number; and sometimes books are given numbers in a named series.
'organization', # The organization that sponsors a conference or that publishes a manual.
'pages', # One or more page numbers or range of numbers, such as 42--111 or 7,41,73--97 or 43+ (the `+' in this last example indicates pages following that don't form a simple range). To make it easier to maintain Scribe-compatible databases, the standard styles convert a single dash (as in 7-33) to the double dash used in TeX to denote number ranges (as in 7--33).
'publisher', # The publisher's name.
'school', # The name of the school where a thesis was written.
'series', # The name of a series or set of books. When citing an entire book, the the title field gives its title and an optional series field gives the name of a series or multi-volume set in which the book is published.
'title', # The work's title, typed as explained in the LaTeX book.
'type', # The type of a technical report---for example, ``Research Note''.
'url', # The universal resource locator for online documents; this is not standard but supplied by more modern bibliography styles.
'volume', # The volume of a journal or multi-volume book.
'year' # The year of publication or, for an unpublished work, the year it was written. Generally it should consist of four numerals, such as 1984, although the standard styles can handle any year whose last four nonpunctuation characters are numerals, such as `\hbox{(about 1984)}'.
)
# Other field names.
other_field_names = (
'affiliation', # The author's affiliation.
'abstract', # An abstract of the work.
'contents', # A Table of Contents
'copyright', # Copyright information.
'ISBN', # The International Standard Book Number.
'ISSN', # The International Standard Serial Number. Used to identify a journal.
'keywords', # Key words used for searching or possibly for annotation.
'language', # The language the document is in.
'location', # A location associated with the entry, such as the city in which a conference took place.
'LCCN', # The Library of Congress Call | |
take up the full area."""
return self.SetFlag(self.optionMaximized, True)
def Minimize(self):
"""
Makes the pane minimized in a L{AuiToolBar}.
Clicking on the minimize button causes a new L{AuiToolBar} to be created
and added to the frame manager, (currently the implementation is such that
panes at West will have a toolbar at the right, panes at South will have
toolbars at the bottom etc...) and the pane is hidden in the manager.
Clicking on the restore button on the newly created toolbar will result in the
toolbar being removed and the original pane being restored.
"""
return self.SetFlag(self.optionMinimized, True)
def MinimizeMode(self, mode):
"""
Sets the expected minimized mode if the MinimizeButton() is visible.
The minimized pane can have a specific position in the work space:
============================== ========= ==============================
Minimize Mode Flag Hex Value Description
============================== ========= ==============================
``AUI_MINIMIZE_POS_SMART`` 0x01 Minimizes the pane on the closest tool bar
``AUI_MINIMIZE_POS_TOP`` 0x02 Minimizes the pane on the top tool bar
``AUI_MINIMIZE_POS_LEFT`` 0x03 Minimizes the pane on its left tool bar
``AUI_MINIMIZE_POS_RIGHT`` 0x04 Minimizes the pane on its right tool bar
``AUI_MINIMIZE_POS_BOTTOM`` 0x05 Minimizes the pane on its bottom tool bar
============================== ========= ==============================
The caption of the minimized pane can be displayed in different modes:
============================== ========= ==============================
Caption Mode Flag Hex Value Description
============================== ========= ==============================
``AUI_MINIMIZE_CAPT_HIDE`` 0x0 Hides the caption of the minimized pane
``AUI_MINIMIZE_CAPT_SMART`` 0x08 Displays the caption in the best rotation (horizontal in the top and in the bottom tool bar or clockwise in the right and in the left tool bar)
``AUI_MINIMIZE_CAPT_HORZ`` 0x10 Displays the caption horizontally
============================== ========= ==============================
"""
self.minimize_mode = mode
return self
def Restore(self):
""" Is the reverse of L{Maximize} and L{Minimize}."""
return self.SetFlag(self.optionMaximized or self.optionMinimized, False)
def Fixed(self):
"""
Forces a pane to be fixed size so that it cannot be resized.
After calling L{Fixed}, L{IsFixed} will return ``True``.
"""
return self.SetFlag(self.optionResizable, False)
def Resizable(self, resizable=True):
"""
Allows a pane to be resizable if `resizable` is ``True``, and forces
it to be a fixed size if `resizeable` is ``False``.
If `resizable` is ``False``, this is simply an antonym for L{Fixed}.
:param `resizable`: whether the pane will be resizeable or not.
"""
return self.SetFlag(self.optionResizable, resizable)
def Transparent(self, alpha):
"""
Makes the pane transparent when floating.
:param `alpha`: an integer value between 0 and 255 for pane transparency.
"""
if alpha < 0 or alpha > 255:
raise Exception("Invalid transparency value (%s)"%repr(alpha))
self.transparent = alpha
self.needsTransparency = True
def Dock(self):
"""
Indicates that a pane should be docked. It is the opposite of L{Float}.
"""
if self.IsNotebookPage():
self.notebook_id = -1
self.dock_direction = AUI_DOCK_NONE
return self.SetFlag(self.optionFloating, False)
def Float(self):
"""
Indicates that a pane should be floated. It is the opposite of L{Dock}.
"""
if self.IsNotebookPage():
self.notebook_id = -1
self.dock_direction = AUI_DOCK_NONE
return self.SetFlag(self.optionFloating, True)
def Hide(self):
"""
Indicates that a pane should be hidden.
Calling L{Show} (``False``) achieve the same effect.
"""
return self.SetFlag(self.optionHidden, True)
def Show(self, show=True):
"""
Indicates that a pane should be shown.
:param `show`: whether the pane should be shown or not.
"""
return self.SetFlag(self.optionHidden, not show)
# By defaulting to 1000, the tab will get placed at the end
def NotebookPage(self, id, tab_position=1000):
"""
Forces a pane to be a notebook page, so that the pane can be
docked on top to another to create a L{AuiNotebook}.
:param `id`: the notebook id;
:param `tab_position`: the tab number of the pane once docked in a notebook.
"""
# Remove any floating frame
self.Dock()
self.notebook_id = id
self.dock_pos = tab_position
self.dock_row = 0
self.dock_layer = 0
self.dock_direction = AUI_DOCK_NOTEBOOK_PAGE
return self
def NotebookControl(self, id):
"""
Forces a pane to be a notebook control (L{AuiNotebook}).
:param `id`: the notebook id.
"""
self.notebook_id = id
self.window = None
self.buttons = []
if self.dock_direction == AUI_DOCK_NOTEBOOK_PAGE:
self.dock_direction = AUI_DOCK_NONE
return self
def HasNotebook(self):
""" Returns whether a pane has a L{AuiNotebook} or not. """
return self.notebook_id >= 0
def IsNotebookPage(self):
""" Returns whether the pane is a notebook page in a L{AuiNotebook}. """
return self.notebook_id >= 0 and self.dock_direction == AUI_DOCK_NOTEBOOK_PAGE
def IsNotebookControl(self):
""" Returns whether the pane is a notebook control (L{AuiNotebook}). """
return not self.IsNotebookPage() and self.HasNotebook()
def SetNameFromNotebookId(self):
""" Sets the pane name once docked in a L{AuiNotebook} using the notebook id. """
if self.notebook_id >= 0:
self.name = "__notebook_%d"%self.notebook_id
return self
def CaptionVisible(self, visible=True, left=False):
"""
Indicates that a pane caption should be visible. If `visible` is ``False``, no pane
caption is drawn.
:param `visible`: whether the caption should be visible or not;
:param `left`: whether the caption should be drawn on the left (rotated by 90 degrees) or not.
"""
if left:
self.SetFlag(self.optionCaption, False)
return self.SetFlag(self.optionCaptionLeft, visible)
self.SetFlag(self.optionCaptionLeft, False)
return self.SetFlag(self.optionCaption, visible)
def PaneBorder(self, visible=True):
"""
Indicates that a border should be drawn for the pane.
:param `visible`: whether the pane border should be visible or not.
"""
return self.SetFlag(self.optionPaneBorder, visible)
def Gripper(self, visible=True):
"""
Indicates that a gripper should be drawn for the pane.
:param `visible`: whether the gripper should be visible or not.
"""
return self.SetFlag(self.optionGripper, visible)
def GripperTop(self, attop=True):
"""
Indicates that a gripper should be drawn at the top of the pane.
:param `attop`: whether the gripper should be drawn at the top or not.
"""
return self.SetFlag(self.optionGripperTop, attop)
def CloseButton(self, visible=True):
"""
Indicates that a close button should be drawn for the pane.
:param `visible`: whether the close button should be visible or not.
"""
return self.SetFlag(self.buttonClose, visible)
def MaximizeButton(self, visible=True):
"""
Indicates that a maximize button should be drawn for the pane.
:param `visible`: whether the maximize button should be visible or not.
"""
return self.SetFlag(self.buttonMaximize, visible)
def MinimizeButton(self, visible=True):
"""
Indicates that a minimize button should be drawn for the pane.
:param `visible`: whether the minimize button should be visible or not.
"""
return self.SetFlag(self.buttonMinimize, visible)
def PinButton(self, visible=True):
"""
Indicates that a pin button should be drawn for the pane.
:param `visible`: whether the pin button should be visible or not.
"""
return self.SetFlag(self.buttonPin, visible)
def DestroyOnClose(self, b=True):
"""
Indicates whether a pane should be destroyed when it is closed.
Normally a pane is simply hidden when the close button is clicked. Setting
`b` to ``True`` will cause the window to be destroyed when the user clicks
the pane's close button.
:param `b`: whether the pane should be destroyed when it is closed or not.
"""
return self.SetFlag(self.optionDestroyOnClose, b)
def TopDockable(self, b=True):
"""
Indicates whether a pane can be docked at the top of the frame.
:param `b`: whether the pane can be docked at the top or not.
"""
return self.SetFlag(self.optionTopDockable, b)
def BottomDockable(self, b=True):
"""
Indicates whether a pane can be docked at the bottom of the frame.
:param `b`: whether the pane can be docked at the bottom or not.
"""
return self.SetFlag(self.optionBottomDockable, b)
def LeftDockable(self, b=True):
"""
Indicates whether a pane can be docked on the left of the frame.
:param `b`: whether the pane can be docked at the left or not.
"""
return self.SetFlag(self.optionLeftDockable, b)
def RightDockable(self, b=True):
"""
Indicates whether a pane can be docked on the right of the frame.
:param `b`: whether the pane can be docked at the right or not.
"""
return self.SetFlag(self.optionRightDockable, b)
def Floatable(self, b=True):
"""
Sets whether the user will be able to undock a pane and turn it
into a floating window.
:param `b`: whether the pane can be floated or not.
"""
return self.SetFlag(self.optionFloatable, b)
def Movable(self, b=True):
"""
Indicates whether a pane can be moved.
:param `b`: whether the pane can be moved or not.
"""
return self.SetFlag(self.optionMovable, b)
def NotebookDockable(self, b=True):
"""
Indicates whether a pane can be docked in an automatic L{AuiNotebook}.
:param `b`: whether the pane can be docked in a notebook or not.
"""
return self.SetFlag(self.optionNotebookDockable, b)
def DockFixed(self, b=True):
"""
Causes the | |
please pass async_req=True
>>> thread = api.v1_departments_id_history_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: instance id of department history record (required)
:type id: str
:param page:
:type page: int
:param page_size:
:type page_size: int
:param sort: Sorting criteria in the format: property:asc/desc. Default sort is date:desc. Multiple sort criteria are supported and must be separated with a comma. Example: sort=date:desc,name:asc
:type sort: list[str]
:param filter: Query in the RSQL format, allowing to filter history notes collection. Default filter is empty query - returning all results for the requested page. Fields allowed in the query: username, date, note, details. This param can be combined with paging and sorting. Example: filter=username!=admin and details==*disabled* and date<2019-12-15
:type filter: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(HistorySearchResults, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id',
'page',
'page_size',
'sort',
'filter'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_departments_id_history_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `v1_departments_id_history_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'page_size' in local_var_params and local_var_params['page_size'] is not None: # noqa: E501
query_params.append(('page-size', local_var_params['page_size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "HistorySearchResults",
404: "ApiError",
}
return self.api_client.call_api(
'/v1/departments/{id}/history', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def v1_departments_id_history_post(self, id, object_history_note, **kwargs): # noqa: E501
"""Add specified Department history object notes # noqa: E501
Adds specified Department history object notes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_departments_id_history_post(id, object_history_note, async_req=True)
>>> result = thread.get()
:param id: instance id of department history record (required)
:type id: str
:param object_history_note: history notes to create (required)
:type object_history_note: ObjectHistoryNote
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: HrefResponse
"""
kwargs['_return_http_data_only'] = True
return self.v1_departments_id_history_post_with_http_info(id, object_history_note, **kwargs) # noqa: E501
def v1_departments_id_history_post_with_http_info(self, id, object_history_note, **kwargs): # noqa: E501
"""Add specified Department history object notes # noqa: E501
Adds specified Department history object notes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_departments_id_history_post_with_http_info(id, object_history_note, async_req=True)
>>> result = thread.get()
:param id: instance id of department history record (required)
:type id: str
:param object_history_note: history notes to create (required)
:type object_history_note: ObjectHistoryNote
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(HrefResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id',
'object_history_note'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_departments_id_history_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `v1_departments_id_history_post`") # noqa: E501
# verify the required parameter 'object_history_note' is set
if self.api_client.client_side_validation and ('object_history_note' not in local_var_params or # noqa: E501
local_var_params['object_history_note'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `object_history_note` when calling `v1_departments_id_history_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'object_history_note' in local_var_params:
body_params = local_var_params['object_history_note']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
201: "HrefResponse",
404: "ApiError",
503: "ApiError",
}
return self.api_client.call_api(
'/v1/departments/{id}/history', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def v1_departments_id_put(self, id, department, **kwargs): # noqa: E501
"""Update specified department object # noqa: E501
Update specified department object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_departments_id_put(id, department, async_req=True)
>>> result = thread.get()
:param id: instance id of department record (required)
:type id: str
:param department: department object to create. ids defined in this body will be ignored (required)
:type department: Department
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Department
"""
kwargs['_return_http_data_only'] = True
return self.v1_departments_id_put_with_http_info(id, department, **kwargs) # noqa: E501
def v1_departments_id_put_with_http_info(self, id, department, **kwargs): # noqa: E501
"""Update specified department object # noqa: E501
Update specified department object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, | |
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for creating instances."""
import collections
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import addresses_utils
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import constants
from googlecloudsdk.compute.lib import image_utils
from googlecloudsdk.compute.lib import instance_utils
from googlecloudsdk.compute.lib import metadata_utils
from googlecloudsdk.compute.lib import request_helper
from googlecloudsdk.compute.lib import utils
from googlecloudsdk.compute.lib import windows_password
from googlecloudsdk.compute.lib import zone_utils
from googlecloudsdk.core import log
DISK_METAVAR = (
'name=NAME [mode={ro,rw}] [boot={yes,no}] [device-name=DEVICE_NAME] '
'[auto-delete={yes,no}]')
class Create(base_classes.BaseAsyncCreator,
image_utils.ImageExpander,
addresses_utils.AddressExpander,
zone_utils.ZoneResourceFetcher):
"""Create Google Compute Engine virtual machine instances."""
@staticmethod
def Args(parser):
metadata_utils.AddMetadataArgs(parser)
instance_utils.AddDiskArgs(parser)
instance_utils.AddLocalSsdArgs(parser)
instance_utils.AddImageArgs(parser)
instance_utils.AddCanIpForwardArgs(parser)
instance_utils.AddAddressArgs(parser, instances=True)
instance_utils.AddMachineTypeArgs(parser)
instance_utils.AddMaintenancePolicyArgs(parser)
instance_utils.AddNetworkArgs(parser)
instance_utils.AddNoRestartOnFailureArgs(parser)
instance_utils.AddScopeArgs(parser)
instance_utils.AddTagsArgs(parser)
parser.add_argument(
'--description',
help='Specifies a textual description of the instances.')
parser.add_argument(
'names',
metavar='NAME',
nargs='+',
help='The names of the instances to create.')
utils.AddZoneFlag(
parser,
resource_type='instances',
operation_type='create')
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'instances'
def ValidateLocalSsdFlags(self, args):
for local_ssd in args.local_ssd or []:
interface = local_ssd.get('interface')
if interface and interface not in instance_utils.LOCAL_SSD_INTERFACES:
raise exceptions.ToolException(
'Unexpected local SSD interface: [{given}]. '
'Legal values are [{ok}].'
.format(given=interface,
ok=', '.join(instance_utils.LOCAL_SSD_INTERFACES)))
def ValidateDiskFlags(self, args):
"""Validates the values of all disk-related flags."""
boot_disk_specified = False
for disk in args.disk or []:
disk_name = disk.get('name')
if not disk_name:
raise exceptions.ToolException(
'[name] is missing in [--disk]. [--disk] value must be of the form '
'[{0}].'.format(DISK_METAVAR))
mode_value = disk.get('mode')
if mode_value and mode_value not in ('rw', 'ro'):
raise exceptions.ToolException(
'Value for [mode] in [--disk] must be [rw] or [ro], not [{0}].'
.format(mode_value))
# Ensures that the user is not trying to attach a read-write
# disk to more than one instance.
if len(args.names) > 1 and mode_value == 'rw':
raise exceptions.ToolException(
'Cannot attach disk [{0}] in read-write mode to more than one '
'instance.'.format(disk_name))
boot_value = disk.get('boot')
if boot_value and boot_value not in ('yes', 'no'):
raise exceptions.ToolException(
'Value for [boot] in [--disk] must be [yes] or [no], not [{0}].'
.format(boot_value))
auto_delete_value = disk.get('auto-delete')
if auto_delete_value and auto_delete_value not in ['yes', 'no']:
raise exceptions.ToolException(
'Value for [auto-delete] in [--disk] must be [yes] or [no], not '
'[{0}].'.format(auto_delete_value))
# If this is a boot disk and we have already seen a boot disk,
# we need to fail because only one boot disk can be attached.
if boot_value == 'yes':
if boot_disk_specified:
raise exceptions.ToolException(
'Each instance can have exactly one boot disk. At least two '
'boot disks were specified through [--disk].')
else:
boot_disk_specified = True
if args.image and boot_disk_specified:
raise exceptions.ToolException(
'Each instance can have exactly one boot disk. One boot disk '
'was specified through [--disk] and another through [--image].')
if boot_disk_specified:
if args.boot_disk_device_name:
raise exceptions.ToolException(
'[--boot-disk-device-name] can only be used when creating a new '
'boot disk.')
if args.boot_disk_type:
raise exceptions.ToolException(
'[--boot-disk-type] can only be used when creating a new boot '
'disk.')
if args.boot_disk_size:
raise exceptions.ToolException(
'[--boot-disk-size] can only be used when creating a new boot '
'disk.')
if args.no_boot_disk_auto_delete:
raise exceptions.ToolException(
'[--no-boot-disk-auto-delete] can only be used when creating a '
'new boot disk.')
def UseExistingBootDisk(self, args):
"""Returns True if the user has specified an existing boot disk."""
return any(disk.get('boot') == 'yes' for disk in args.disk or [])
def CreatePersistentAttachedDiskMessages(self, args, instance_ref):
"""Returns a list of AttachedDisk messages and the boot disk's reference."""
disks = []
boot_disk_ref = None
for disk in args.disk or []:
name = disk['name']
# Resolves the mode.
mode_value = disk.get('mode', 'rw')
if mode_value == 'rw':
mode = self.messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE
else:
mode = self.messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY
boot = disk.get('boot') == 'yes'
auto_delete = disk.get('auto-delete') == 'yes'
disk_ref = self.CreateZonalReference(
name, instance_ref.zone,
resource_type='disks')
if boot:
boot_disk_ref = disk_ref
attached_disk = self.messages.AttachedDisk(
autoDelete=auto_delete,
boot=boot,
deviceName=disk.get('device-name'),
mode=mode,
source=disk_ref.SelfLink(),
type=self.messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
# The boot disk must end up at index 0.
if boot:
disks = [attached_disk] + disks
else:
disks.append(attached_disk)
return disks, boot_disk_ref
def CreateLocalSsdMessage(self, zone, device_name, interface):
disk_type_ref = self.CreateZonalReference('local-ssd', zone,
resource_type='diskTypes')
maybe_interface_enum = (
self.messages.AttachedDisk.InterfaceValueValuesEnum(interface)
if interface else None)
return self.messages.AttachedDisk(
type=self.messages.AttachedDisk.TypeValueValuesEnum.SCRATCH,
autoDelete=True,
deviceName=device_name,
interface=maybe_interface_enum,
mode=self.messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
initializeParams=self.messages.AttachedDiskInitializeParams(
diskType=disk_type_ref.SelfLink()),
)
def CreateDefaultBootAttachedDiskMessage(
self, args, boot_disk_size_gb, image_uri, instance_ref):
"""Returns an AttachedDisk message for creating a new boot disk."""
if args.boot_disk_type:
disk_type_ref = self.CreateZonalReference(
args.boot_disk_type, instance_ref.zone,
resource_type='diskTypes')
disk_type_uri = disk_type_ref.SelfLink()
else:
disk_type_ref = None
disk_type_uri = None
return self.messages.AttachedDisk(
autoDelete=not args.no_boot_disk_auto_delete,
boot=True,
deviceName=args.boot_disk_device_name,
initializeParams=self.messages.AttachedDiskInitializeParams(
sourceImage=image_uri,
diskSizeGb=boot_disk_size_gb,
diskType=disk_type_uri),
mode=self.messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
type=self.messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
def FetchDiskResources(self, disk_refs):
"""Returns a list of disk resources corresponding to the disk references."""
requests = []
for disk_ref in disk_refs:
requests.append((
self.compute.disks,
'Get',
self.messages.ComputeDisksGetRequest(
disk=disk_ref.Name(),
project=disk_ref.project,
zone=disk_ref.zone)))
errors = []
res = list(request_helper.MakeRequests(
requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch some boot disks:')
return res
def CreateServiceAccountMessages(self, args):
"""Returns a list of ServiceAccount messages corresponding to --scopes."""
if args.no_scopes:
scopes = []
else:
scopes = args.scopes or constants.DEFAULT_SCOPES
accounts_to_scopes = collections.defaultdict(list)
for scope in scopes:
parts = scope.split('=')
if len(parts) == 1:
account = 'default'
scope_uri = scope
elif len(parts) == 2:
account, scope_uri = parts
else:
raise exceptions.ToolException(
'[{0}] is an illegal value for [--scopes]. Values must be of the '
'form [SCOPE] or [ACCOUNT=SCOPE].'.format(scope))
# Expands the scope if the user provided an alias like
# "compute-rw".
scope_uri = constants.SCOPES.get(scope_uri, scope_uri)
accounts_to_scopes[account].append(scope_uri)
res = []
for account, scopes in sorted(accounts_to_scopes.iteritems()):
res.append(self.messages.ServiceAccount(
email=account,
scopes=sorted(scopes)))
return res
def CreateNetworkInterfaceMessage(self, args, instance_refs):
"""Returns a new NetworkInterface message."""
network_ref = self.CreateGlobalReference(
args.network, resource_type='networks')
network_interface = self.messages.NetworkInterface(
network=network_ref.SelfLink())
if not args.no_address:
access_config = self.messages.AccessConfig(
name=constants.DEFAULT_ACCESS_CONFIG_NAME,
type=self.messages.AccessConfig.TypeValueValuesEnum.ONE_TO_ONE_NAT)
# If the user provided an external IP, populate the access
# config with it.
if len(instance_refs) == 1:
region = utils.ZoneNameToRegionName(instance_refs[0].zone)
address = self.ExpandAddressFlag(args, region)
if address:
access_config.natIP = address
network_interface.accessConfigs = [access_config]
return network_interface
def CreateRequests(self, args):
self.ValidateDiskFlags(args)
self.ValidateLocalSsdFlags(args)
if args.maintenance_policy:
on_host_maintenance = (
self.messages.Scheduling.OnHostMaintenanceValueValuesEnum(
args.maintenance_policy))
else:
on_host_maintenance = None
scheduling = self.messages.Scheduling(
automaticRestart=not args.no_restart_on_failure,
onHostMaintenance=on_host_maintenance)
service_accounts = self.CreateServiceAccountMessages(args)
if args.tags:
tags = self.messages.Tags(items=args.tags)
else:
tags = None
metadata = metadata_utils.ConstructMetadataMessage(
self.messages,
metadata=args.metadata,
metadata_from_file=args.metadata_from_file)
# If the user already provided an initial Windows password and
# username through metadata, then there is no need to check
# whether the image or the boot disk is Windows.
windows_username_present = False
windows_password_present = False
for kv in metadata.items:
if kv.key == constants.INITIAL_WINDOWS_USER_METADATA_KEY_NAME:
windows_username_present = True
if kv.key == constants.INITIAL_WINDOWS_PASSWORD_METADATA_KEY_NAME:
windows_password_present = True
check_for_windows_image = (not windows_username_present or
not windows_password_present)
boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)
instance_refs = self.CreateZonalReferences(args.names, args.zone)
# Check if the zone is deprecated or has maintenance coming.
self.WarnForZonalCreation(instance_refs)
network_interface = self.CreateNetworkInterfaceMessage(args, instance_refs)
# The element at index i is the machine type URI for instance
# i. We build this list here because we want to delay work that
# requires API calls as much as possible. This leads to a better
# user experience because the tool can fail fast upon a spelling
# mistake instead of delaying the user by making API calls whose
# purpose has already been rendered moot by the spelling mistake.
machine_type_uris = []
for instance_ref in instance_refs:
machine_type_uris.append(self.CreateZonalReference(
args.machine_type, instance_ref.zone,
resource_type='machineTypes').SelfLink())
create_boot_disk = not self.UseExistingBootDisk(args)
add_windows_credentials_to_metadata = False
if create_boot_disk:
image_uri, image_resource = self.ExpandImageFlag(
args, return_image_resource=check_for_windows_image)
if (check_for_windows_image and
image_utils.HasWindowsLicense(image_resource, self.resources)):
log.debug('[%s] is a Windows image.', image_resource.selfLink)
add_windows_credentials_to_metadata = True
else:
image_uri = None
# A list of lists where the element at index i contains a list of
# disk messages that should be set for the instance at index i.
disks_messages = []
# A mapping of zone to boot disk references for all existing boot
# disks that are being attached.
existing_boot_disks = {}
for instance_ref in instance_refs:
persistent_disks, boot_disk_ref = (
self.CreatePersistentAttachedDiskMessages(args, instance_ref))
local_ssds = [
self.CreateLocalSsdMessage(
instance_ref.zone, x.get('device-name'), x.get('interface'))
for x in args.local_ssd or []]
if create_boot_disk:
boot_disk = self.CreateDefaultBootAttachedDiskMessage(
args, boot_disk_size_gb, image_uri, instance_ref)
persistent_disks = [boot_disk] + persistent_disks
else:
existing_boot_disks[boot_disk_ref.zone] = boot_disk_ref
disks_messages.append(persistent_disks + local_ssds)
# Now for every existing boot disk being attached, we have to
# figure out whether it has a Windows license.
if check_for_windows_image and existing_boot_disks:
# Sorts the disk references by zone, so the code behaves
# deterministically.
disk_resources = self.FetchDiskResources(
disk_ref for _, disk_ref in sorted(existing_boot_disks.iteritems()))
for disk_resource in disk_resources:
if image_utils.HasWindowsLicense(disk_resource, self.resources):
| |
| ord(self.co.code[index + 3])
def nextsignedword(self):
return shortmask(self.nextword())
def opcode_0x00(self):
"nop"
# JVMS: Push null (== None)
def opcode_0x01(self):
"aconst_null"
self.stack.push(None)
def opcode_0x02(self):
"iconst_m1"
self.stack.push(-1)
def opcode_0x03(self):
"iconst_0"
self.stack.push(0)
def opcode_0x04(self):
"iconst_1"
self.stack.push(1)
def opcode_0x05(self):
"iconst_2"
self.stack.push(2)
def opcode_0x06(self):
"iconst_3"
self.stack.push(3)
def opcode_0x07(self):
"iconst_4"
self.stack.push(4)
def opcode_0x08(self):
"iconst_5"
self.stack.push(5)
def opcode_0x09(self):
"lconst_0"
self.stack.push(r_longlong(0))
def opcode_0x0a(self):
"lconst_1"
self.stack.push(r_longlong(1))
def opcode_0x0b(self):
"fconst_0"
self.stack.push(r_singlefloat(0.0))
def opcode_0x0c(self):
"fconst_1"
self.stack.push(r_singlefloat(1.0))
def opcode_0x0d(self):
"fconst_2"
self.stack.push(r_singlefloat(2.0))
def opcode_0x0e(self):
"dconst_0"
self.stack.push(0.0)
def opcode_0x0f(self):
"dconst_1"
self.stack.push(1.0)
def opcode_0x10(self):
"bipush"
self.stack.push(self.nextsignedbyte())
# JVMS: Push short
def opcode_0x11(self):
"sipush"
self.stack.push(self.nextsignedword())
def opcode_0x12(self):
"ldc"
index = self.nextbyte()
const = self.const[index]
if isinstance(const, classfile.CONSTANT_String_info):
string = self.const[const.string_index]
objectref = make_String(string, self.loader)
self.stack.push(objectref)
elif isinstance(const, classfile.CONSTANT_Float):
self.stack.push(float_parse(const.bytes)) ### TODO: floatmask
#maybe this is only used in ldc_w
elif isinstance(const, classfile.CONSTANT_Class_info):
name = self.const[const.name_index]
jcls = self.loader.getclass(name)
objectref = vmobject_getClass_helper(jcls, self.loader)
self.stack.push(objectref)
else:# XXX other types?
self.stack.push(intmask(const.bytes))
def opcode_0x13(self):
"ldc_w"
index = self.nextword()
const = self.const[index]
if isinstance(const, classfile.CONSTANT_String_info):
string = self.const[const.string_index]
objectref = make_String(string, self.loader)
#print objectref
self.stack.push(objectref)
elif isinstance(const, classfile.CONSTANT_Float):
self.stack.push(float_parse(const.bytes)) ### TODO: floatmask
elif isinstance(const, classfile.CONSTANT_Class_info):
name = self.const[const.name_index]
jcls = self.loader.getclass(name, self.method)# for inner-classes
objectref = vmobject_getClass_helper(jcls, self.loader)
self.stack.push(objectref)
else:# XXX other types?
self.stack.push(intmask(const.bytes))
#raise NotImplementedError("ldc_w")
def opcode_0x14(self):
"ldc2_w"
indexbyte1 = self.nextbyte()
indexbyte2 = self.nextbyte()
index = (indexbyte1 << 8) | indexbyte2
const = self.const[index]
if isinstance(const, classfile.CONSTANT_Double):
int_value =(const.high_bytes << 32) | const.low_bytes
self.stack.push(parse_double(int_value))
elif isinstance(const, classfile.CONSTANT_Long_Info):
value =(const.high_bytes << 32) | const.low_bytes
self.stack.push(r_longlong(value))
else:
raise Exception("unknown constant type")
def opcode_0x15(self):
"iload"
loc = self.nextbyte()
self.stack.push(self.locals.get(loc,"int"))
def opcode_0x16(self):
"lload"
loc = self.nextbyte()
self.stack.push(self.locals.get(loc,"long"))
def opcode_0x17(self):
"fload"
loc = self.nextbyte()
self.stack.push(self.locals.get(loc,"float"))
def opcode_0x18(self):
"dload"
loc = self.nextbyte()
self.stack.push(self.locals.get(loc,"double"))
# JVMS: Load reference from local variable
def opcode_0x19(self):
"aload"
loc = self.nextbyte()
self.stack.push(self.locals.get(loc,"ref"))
def opcode_0x1a(self):
"iload_0"
self.stack.push(self.locals.get(0,"int"))
def opcode_0x1b(self):
"iload_1"
self.stack.push(self.locals.get(1,"int"))
def opcode_0x1c(self):
"iload_2"
self.stack.push(self.locals.get(2,"int"))
def opcode_0x1d(self):
"iload_3"
self.stack.push(self.locals.get(3,"int"))
def opcode_0x1e(self):
"lload_0"
self.stack.push(self.locals.get(0,"long"))
def opcode_0x1f(self):
"lload_1"
self.stack.push(self.locals.get(1,"long"))
def opcode_0x20(self):
"lload_2"
self.stack.push(self.locals.get(2,"long"))
def opcode_0x21(self):
"lload_3"
self.stack.push(self.locals.get(3,"long"))
def opcode_0x22(self):
"fload_0"
self.stack.push(self.locals.get(0,"float"))
def opcode_0x23(self):
"fload_1"
self.stack.push(self.locals.get(1,"float"))
def opcode_0x24(self):
"fload_2"
self.stack.push(self.locals.get(2,"float"))
def opcode_0x25(self):
"fload_3"
self.stack.push(self.locals.get(3,"float"))
def opcode_0x26(self):
"dload_0"
self.stack.push(self.locals.get(0,"double"))
def opcode_0x27(self):
"dload_1"
self.stack.push(self.locals.get(1,"double"))
def opcode_0x28(self):
"dload_2"
self.stack.push(self.locals.get(2,"double"))
def opcode_0x29(self):
"dload_3"
self.stack.push(self.locals.get(3,"double"))
# JVMS: Load reference from local variable
def opcode_0x2a(self):
"aload_0"
self.stack.push(self.locals.get(0,"ref"))
def opcode_0x2b(self):
"aload_1"
self.stack.push(self.locals.get(1,"ref"))
def opcode_0x2c(self):
"aload_2"
self.stack.push(self.locals.get(2,"ref"))
def opcode_0x2d(self):
"aload_3"
self.stack.push(self.locals.get(3,"ref"))
# JVMS: Load int from array
def opcode_0x2e(self):
"iaload"
index = self.stack.pop()
array = self.stack.pop()
if array==None:
throw_NullPointerException(self.loader)
if not index < len(array.arrayref):
throw_ArrayIndexOutOfBoundsException(self.loader, index)
self.stack.push(array.arrayref[index])
def opcode_0x2f(self):
"laload"
index = self.stack.pop()
array = self.stack.pop()
if array==None:
throw_NullPointerException(self.loader)
if not index < len(array.arrayref):
throw_ArrayIndexOutOfBoundsException(self.loader, index)
self.stack.push(array.arrayref[index])
def opcode_0x30(self):
"faload"
index = self.stack.pop()
array = self.stack.pop()
if array==None:
throw_NullPointerException(self.loader)
if not index < len(array.arrayref):
throw_ArrayIndexOutOfBoundsException(self.loader, index)
self.stack.push(array.arrayref[index])
def opcode_0x31(self):
"daload"
index = self.stack.pop()
array = self.stack.pop()
if array==None:
throw_NullPointerException(self.loader)
if not index < len(array.arrayref):
throw_ArrayIndexOutOfBoundsException(self.loader, index)
self.stack.push(array.arrayref[index])
def opcode_0x32(self):
"aaload"
index = self.stack.pop()
array = self.stack.pop()
if array==None:
throw_NullPointerException(self.loader)
if not index < len(array.arrayref):
throw_ArrayIndexOutOfBoundsException(self.loader, index)
#print "aa",array.arrayref[index].jcls.__name__
#array.arrayref[index].fields.print_map()
self.stack.push(array.arrayref[index])
# JVMS: Load byte or boolean from array
def opcode_0x33(self):
"baload"
index = self.stack.pop()
array = self.stack.pop()
self.stack.push(array.arrayref[index])
def opcode_0x34(self):
"caload"
index = self.stack.pop()
array = self.stack.pop()
#print "CA:",index
self.stack.push(self.DESCR_UNCAST['char'](array.arrayref[index]))
def opcode_0x35(self):
"saload"
index = self.stack.pop()
array = self.stack.pop()
self.stack.push(array.arrayref[index])
def opcode_0x36(self):
"istore"
loc = self.nextbyte()
self.locals.set(loc, self.stack.pop(), "int")
def opcode_0x37(self):
"lstore"
loc = self.nextbyte()
self.locals.set(loc, self.stack.pop(), "long")
def opcode_0x38(self):
"fstore"
loc = self.nextbyte()
self.locals.set(loc, self.stack.pop(), "float")
def opcode_0x39(self):
"dstore"
loc = self.nextbyte()
self.locals.set(loc, self.stack.pop(), "double")
def opcode_0x3a(self):
"astore"
loc = self.nextbyte()
self.locals.set(loc, self.stack.pop(), "ref")
def opcode_0x3b(self):
"istore_0"
self.locals.set(0, self.stack.pop(), "int")
def opcode_0x3c(self):
"istore_1"
self.locals.set(1, self.stack.pop(), "int")
def opcode_0x3d(self):
"istore_2"
self.locals.set(2, self.stack.pop(), "int")
def opcode_0x3e(self):
"istore_3"
self.locals.set(3, self.stack.pop(), "int")
def opcode_0x3f(self):
"lstore_0"
self.locals.set(0, self.stack.pop(), "long")
def opcode_0x40(self):
"lstore_1"
self.locals.set(1, self.stack.pop(), "long")
def opcode_0x41(self):
"lstore_2"
self.locals.set(2, self.stack.pop(), "long")
def opcode_0x42(self):
"lstore_3"
self.locals.set(3, self.stack.pop(), "long")
def opcode_0x43(self):
"fstore_0"
self.locals.set(0, self.stack.pop(), "float")
def opcode_0x44(self):
"fstore_1"
self.locals.set(1, self.stack.pop(), "float")
def opcode_0x45(self):
"fstore_2"
self.locals.set(2, self.stack.pop(), "float")
def opcode_0x46(self):
"fstore_3"
self.locals.set(3, self.stack.pop(), "float")
def opcode_0x47(self):
"dstore_0"
self.locals.set(0, self.stack.pop(), "double")
def opcode_0x48(self):
"dstore_1"
self.locals.set(1, self.stack.pop(), "double")
def opcode_0x49(self):
"dstore_2"
self.locals.set(2, self.stack.pop(), "double")
def opcode_0x4a(self):
"dstore_3"
self.locals.set(3, self.stack.pop(), "double")
def opcode_0x4b(self):
"astore_0"
self.locals.set(0, self.stack.pop(), "ref")
def opcode_0x4c(self):
"astore_1"
self.locals.set(1, self.stack.pop(), "ref")
def opcode_0x4d(self):
"astore_2"
self.locals.set(2, self.stack.pop(), "ref")
def opcode_0x4e(self):
"astore_3"
self.locals.set(3, self.stack.pop(), "ref")
# JVMS: Store into int array
def opcode_0x4f(self):
"iastore"
value = self.stack.pop()
index = self.stack.pop()
array = self.stack.pop()
array.arrayref[index] = value
def opcode_0x50(self):
"lastore"
value = self.stack.pop()
index = self.stack.pop()
array = self.stack.pop()
array.arrayref[index] = value
def opcode_0x51(self):
"fastore"
value = self.stack.pop()
index = self.stack.pop()
array = self.stack.pop()
array.arrayref[index] = value
def opcode_0x52(self):
"dastore"
value = self.stack.pop()
index = self.stack.pop()
array = self.stack.pop()
array.arrayref[index] = value
# JVMS: Store into reference array
def opcode_0x53(self):
"aastore"
value = self.stack.pop()
index = self.stack.pop()
array = self.stack.pop()
array.arrayref[index] = value
def opcode_0x54(self):
"bastore"
value = self.stack.pop()
index = self.stack.pop()
array = self.stack.pop()
array.arrayref[index] = value
def opcode_0x55(self):
"castore"
value = self.stack.pop()
index = self.stack.pop()
array = self.stack.pop()
array.arrayref[index] = self.DESCR_CAST['char'](value)
def opcode_0x56(self):
"sastore"
value = self.stack.pop()
index = self.stack.pop()
array = self.stack.pop()
array.arrayref[index] = value
# This 6 methods are the only ones which access
# istack, astack..., directly
# FIXME: no direct use
def opcode_0x57(self):
"pop"
value = self.stack.pop()
assert not isinstance(value,float)
assert not isinstance(value,r_longlong)
# XXX Not Tested yet
def opcode_0x58(self):
"pop2"
value = self.stack.pop()
# is "value" a category 2 value?
# value is not a long and not a double
if not isinstance(value,float) and not isinstance(value,r_longlong):
self.stack.pop()
def opcode_0x59(self):
"dup"
last_stack = self.stack.stackhistory[-1]
if last_stack == "int":
self.stack.push(self.stack.istack[-1])
return
elif last_stack == "ref":
self.stack.push(self.stack.astack[-1])
return
elif last_stack == "float":
self.stack.push(self.stack.fstack[-1])
return
elif last_satck == "long" or last_satck == "double":
raise Exception("category 2 value on stack")
else:
raise Exception("unknown type on stack")
def opcode_0x5a(self):
"dup_x1"
value1 = self.stack.pop()
value2 = self.stack.pop()
self.stack.push(value1)
self.stack.push(value2)
self.stack.push(value1)
# Not tested yet
def opcode_0x5b(self):
"dup_x2"
# value1 = self.stack.pop()
# value2 = self.stack.pop()
# if not isinstance(value1,float) and not isinstance(value1,r_longlong) and not isinstance(value2,float) and not isinstance(value2,r_longlong):
# value3 = self.stack.pop()
# self.stack.push(value1)
# self.stack.push(value3)
# self.stack.push(value2)
# self.stack.push(value1)
# elif not isinstance(value1,float) and not isinstance(value1,r_longlong) and (isinstance(value2,float) or isinstance(value2,r_longlong)):
# self.stack.push(value1)
# self.stack.push(value2)
# self.stack.push(value1)
# else:
# raise Exception("unexpected or unknown type on stack")
raise NotImplementedError("dup_x2")
def opcode_0x5c(self):
"dup2"
value1 = self.stack.pop()
if not isinstance(value1,float) and not isinstance(value1,r_longlong):
value2 = self.stack.pop()
assert not isinstance(value2,float) and not isinstance(value2,r_longlong)
self.stack.push(value2)
self.stack.push(value1)
self.stack.push(value2)
self.stack.push(value1)
elif isinstance(value1,float) or isinstance(value1,r_longlong):
self.stack.push(value1)
self.stack.push(value1)
else:
raise Exception("unexpected or unknown type on stack")
# Not tested yet
def opcode_0x5d(self):
"dup2_x1"
# value1 = self.stack.pop()
# value2 = self.stack.pop()
# if not isinstance(value1,float) and not isinstance(value1,r_longlong) and not isinstance(value2,float) and not isinstance(value1,r_longlong):
# value3 = self.stack.pop()
# assert not isinstance(value3,float) and not isinstance(value3,r_longlong)
# self.stack.push(value2)
# self.stack.push(value1)
# self.stack.push(value3)
# self.stack.push(value2)
# self.stack.push(value1)
# elif (isinstance(value1,float) or isinstance(value1,r_longlong)) and not isinstance(value2,float) and not isinstance(value2,r_longlong):
# self.stack.push(value1)
# self.stack.push(value2)
# self.stack.push(value1)
# else:
# raise Exception("unexpected or unknown type on stack")
raise NotImplementedError("dup2_x1")
# Not tested yet
def opcode_0x5e(self):
"dup2_x2"
# value1 = self.stack.pop()
# value2 = self.stack.pop()
# if not isinstance(value1,float) and not isinstance(value1,r_longlong) and not isinstance(value2,float) and not isinstance(value2,r_longlong):
# value3 = self.stack.pop()
# if not isinstance(value3,float) and not isinstance(value3,r_longlong):
# value4 = self.stack.pop()
# assert not isinstance(value4,float) and not isinstance(value4,r_longlong)
# self.stack.push(value2)
# self.stack.push(value1)
# self.stack.push(value4)
# self.stack.push(value3)
# self.stack.push(value2)
# self.stack.push(value1)
# elif isinstance(value3,float) or isinstance(value3,r_longlong):
# self.stack.push(value2)
# self.stack.push(value1)
# self.stack.push(value3)
# self.stack.push(value2)
# self.stack.push(value1)
# else:
# raise Exception("unexpected or unknown type on stack
# elif (isinstance(value1,float) or isinstance(value1,r_longlong)) and not isinstance(value2,float) and not isinstance(value2,r_longlong):
# value3 = self.stack.pop()
# assert not isinstance(value3,float) and not isinstance(value3,r_longlong)
# self.stack.push(value1)
# self.stack.push(value3)
# self.stack.push(value2)
# self.stack.push(value1)
# elif (isinstance(value1,float) or isinstance(value1,r_longlong)) and (isinstance(value2,float) or isinstance(value2,r_longlong)):
# self.stack.push(value1)
# self.stack.push(value2)
# self.stack.push(value1)
# else:
# raise Exception("unexpected or unknown type on stack
raise NotImplementedError("dup2_x2")
# Not tested yet
def opcode_0x5f(self):
"swap"
# value1 = | |
'num_filters' : 128},
'bypass': 4},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : n_classes},
'bypass': [0,1]},
}
}
def cfg_mom_flat_concat(n_classes, use_cond=False, method='sign', nonlin='relu'):
return {
'use_cond': use_cond,
# ONLY USED IF use_cond = True!!!
'cond_scale_factor': 4,
'cond_encode_depth': 1,
'cond_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
},
# Encoding the inputs
'main_encode_depth': 4,
'main_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
},
# ONLY USED IF use_cond = True!!!
'encode_depth': 3,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
#'pool': {'size' : 2, 'stride' : 2, 'type' : 'max'}
}
},
# Calculate moments
'combine_moments': 'minus' if method is 'sign' else 'concat',
# ONLY USED IF combine_moments is 'concat'
'combine_moments_encode_depth' : 1,
'combine_moments_encode' : {
1 : {'conv' : {'filter_size': 3, 'stride': 1, 'num_filters' : 128},
}
},
'moments_encode_depth' : 5,
'moments_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
}
},
# Predict next moments
'delta_moments_encode_depth' : 11,
'delta_moments_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
6 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
7 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
8 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
9 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
10 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'nonlinearity': nonlin},
11 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
},
'combine_delta': 'plus' if method is 'sign' else 'concat',
# ONLY USED IF combine_delta is 'concat'
'combine_delta_encode_depth' : 1,
'combine_delta_encode' : {
1 : {'conv' : {'filter_size': 3, 'stride': 1, 'num_filters' : 128},
}
},
'deconv_depth': 2,
'deconv' : {
1 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 128},
},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : n_classes},
},
}
}
def cfg_mom_concat(n_classes, use_cond=False, method='sign'):
return {
'use_cond': use_cond,
# ONLY USED IF use_cond = True!!!
'cond_scale_factor': 8,
'cond_encode_depth': 1,
'cond_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
},
# Encoding the inputs
'main_encode_depth': 8,
'main_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 256},
},
6 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 256},
},
7 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 256},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
8 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
},
# ONLY USED IF use_cond = True!!!
'encode_depth': 3,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
#'pool': {'size' : 2, 'stride' : 2, 'type' : 'max'}
}
},
# Calculate moments
'combine_moments': 'minus' if method is 'sign' else 'concat',
# ONLY USED IF combine_moments is 'concat'
'combine_moments_encode_depth' : 1,
'combine_moments_encode' : {
1 : {'conv' : {'filter_size': 3, 'stride': 1, 'num_filters' : 512},
}
},
'moments_encode_depth' : 5,
'moments_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
}
},
# Predict next moments
'delta_moments_encode_depth' : 5,
'delta_moments_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 512},
}
},
'combine_delta': 'plus' if method is 'sign' else 'concat',
# ONLY USED IF combine_delta is 'concat'
'combine_delta_encode_depth' : 1,
'combine_delta_encode' : {
1 : {'conv' : {'filter_size': 3, 'stride': 1, 'num_filters' : 512},
}
},
'deconv_depth': 3,
'deconv' : {
1 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 512},
},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 256},
},
3 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : n_classes},
},
#4 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : n_classes},
# },
}
}
def cfg_vgg_jerk_action(n_classes):
return {
'cond_scale_factor': 8,
'cond_encode_depth': 1,
'cond_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
},
'main_encode_depth': 7,
'main_encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64}
},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 64},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 128},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
5 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 256},
},
6 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 256},
},
7 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 256},
'pool' : {'size' : 2, 'stride' : 2, 'type' : 'max'}},
},
'encode_depth': 3,
'encode' : {
1 : {'conv' : {'filter_size' | |
= color;
} else {
discard;
}
}
"""
}
_Margins = namedtuple('Margins', ('left', 'right', 'top', 'bottom'))
# Margins used when plot frame is not displayed
_NoDisplayMargins = _Margins(0, 0, 0, 0)
def __init__(self, margins):
"""
:param margins: The margins around plot area for axis and labels.
:type margins: dict with 'left', 'right', 'top', 'bottom' keys and
values as ints.
"""
self._renderResources = None
self._margins = self._Margins(**margins)
self.axes = [] # List of PlotAxis to be updated by subclasses
self._grid = False
self._size = 0., 0.
self._title = ''
self._displayed = True
@property
def isDirty(self):
"""True if it need to refresh graphic rendering, False otherwise."""
return self._renderResources is None
GRID_NONE = 0
GRID_MAIN_TICKS = 1
GRID_SUB_TICKS = 2
GRID_ALL_TICKS = (GRID_MAIN_TICKS + GRID_SUB_TICKS)
@property
def displayed(self):
"""Whether axes and their labels are displayed or not (bool)"""
return self._displayed
@displayed.setter
def displayed(self, displayed):
displayed = bool(displayed)
if displayed != self._displayed:
self._displayed = displayed
self._dirty()
@property
def margins(self):
"""Margins in pixels around the plot."""
if not self.displayed:
return self._NoDisplayMargins
else:
return self._margins
@property
def grid(self):
"""Grid display mode:
- 0: No grid.
- 1: Grid on main ticks.
- 2: Grid on sub-ticks for log scale axes.
- 3: Grid on main and sub ticks."""
return self._grid
@grid.setter
def grid(self, grid):
assert grid in (self.GRID_NONE, self.GRID_MAIN_TICKS,
self.GRID_SUB_TICKS, self.GRID_ALL_TICKS)
if grid != self._grid:
self._grid = grid
self._dirty()
@property
def size(self):
"""Size in pixels of the plot area including margins."""
return self._size
@size.setter
def size(self, size):
assert len(size) == 2
size = tuple(size)
if size != self._size:
self._size = size
self._dirty()
@property
def plotOrigin(self):
"""Plot area origin (left, top) in widget coordinates in pixels."""
return self.margins.left, self.margins.top
@property
def plotSize(self):
"""Plot area size (width, height) in pixels."""
w, h = self.size
w -= self.margins.left + self.margins.right
h -= self.margins.top + self.margins.bottom
return w, h
@property
def title(self):
"""Main title as a str in latin-1."""
return self._title
@title.setter
def title(self, title):
if title != self._title:
self._title = title
self._dirty()
# In-place update
# if self._renderResources is not None:
# self._renderResources[-1][-1].text = title
def _dirty(self):
# When Text2D require discard we need to handle it
self._renderResources = None
def _buildGridVertices(self):
if self._grid == self.GRID_NONE:
return []
elif self._grid == self.GRID_MAIN_TICKS:
def test(text):
return text is not None
elif self._grid == self.GRID_SUB_TICKS:
def test(text):
return text is None
elif self._grid == self.GRID_ALL_TICKS:
def test(_):
return True
else:
logging.warning('Wrong grid mode: %d' % self._grid)
return []
return self._buildGridVerticesWithTest(test)
def _buildGridVerticesWithTest(self, test):
"""Override in subclass to generate grid vertices"""
return []
def _buildVerticesAndLabels(self):
# To fill with copy of axes lists
vertices = []
labels = []
for axis in self.axes:
axisVertices, axisLabels = axis.getVerticesAndLabels()
vertices += axisVertices
labels += axisLabels
vertices = numpy.array(vertices, dtype=numpy.float32)
# Add main title
xTitle = (self.size[0] + self.margins.left -
self.margins.right) // 2
yTitle = self.margins.top - self._TICK_LENGTH_IN_PIXELS
labels.append(Text2D(text=self.title,
x=xTitle,
y=yTitle,
align=CENTER,
valign=BOTTOM))
# grid
gridVertices = numpy.array(self._buildGridVertices(),
dtype=numpy.float32)
self._renderResources = (vertices, gridVertices, labels)
_program = Program(
_SHADERS['vertex'], _SHADERS['fragment'], attrib0='position')
def render(self):
if not self.displayed:
return
if self._renderResources is None:
self._buildVerticesAndLabels()
vertices, gridVertices, labels = self._renderResources
width, height = self.size
matProj = mat4Ortho(0, width, height, 0, 1, -1)
gl.glViewport(0, 0, width, height)
prog = self._program
prog.use()
gl.glLineWidth(self._LINE_WIDTH)
gl.glUniformMatrix4fv(prog.uniforms['matrix'], 1, gl.GL_TRUE,
matProj.astype(numpy.float32))
gl.glUniform4f(prog.uniforms['color'], 0., 0., 0., 1.)
gl.glUniform1f(prog.uniforms['tickFactor'], 0.)
gl.glEnableVertexAttribArray(prog.attributes['position'])
gl.glVertexAttribPointer(prog.attributes['position'],
2,
gl.GL_FLOAT,
gl.GL_FALSE,
0, vertices)
gl.glDrawArrays(gl.GL_LINES, 0, len(vertices))
for label in labels:
label.render(matProj)
def renderGrid(self):
if self._grid == self.GRID_NONE:
return
if self._renderResources is None:
self._buildVerticesAndLabels()
vertices, gridVertices, labels = self._renderResources
width, height = self.size
matProj = mat4Ortho(0, width, height, 0, 1, -1)
gl.glViewport(0, 0, width, height)
prog = self._program
prog.use()
gl.glLineWidth(self._LINE_WIDTH)
gl.glUniformMatrix4fv(prog.uniforms['matrix'], 1, gl.GL_TRUE,
matProj.astype(numpy.float32))
gl.glUniform4f(prog.uniforms['color'], 0.7, 0.7, 0.7, 1.)
gl.glUniform1f(prog.uniforms['tickFactor'], 0.) # 1/2.) # 1/tickLen
gl.glEnableVertexAttribArray(prog.attributes['position'])
gl.glVertexAttribPointer(prog.attributes['position'],
2,
gl.GL_FLOAT,
gl.GL_FALSE,
0, gridVertices)
gl.glDrawArrays(gl.GL_LINES, 0, len(gridVertices))
# GLPlotFrame2D ###############################################################
class GLPlotFrame2D(GLPlotFrame):
def __init__(self, margins):
"""
:param margins: The margins around plot area for axis and labels.
:type margins: dict with 'left', 'right', 'top', 'bottom' keys and
values as ints.
"""
super(GLPlotFrame2D, self).__init__(margins)
self.axes.append(PlotAxis(self,
tickLength=(0., -5.),
labelAlign=CENTER, labelVAlign=TOP,
titleAlign=CENTER, titleVAlign=TOP,
titleRotate=0,
titleOffset=(0, self.margins.bottom // 2)))
self._x2AxisCoords = ()
self.axes.append(PlotAxis(self,
tickLength=(5., 0.),
labelAlign=RIGHT, labelVAlign=CENTER,
titleAlign=CENTER, titleVAlign=BOTTOM,
titleRotate=ROTATE_270,
titleOffset=(-3 * self.margins.left // 4,
0)))
self._y2Axis = PlotAxis(self,
tickLength=(-5., 0.),
labelAlign=LEFT, labelVAlign=CENTER,
titleAlign=CENTER, titleVAlign=TOP,
titleRotate=ROTATE_270,
titleOffset=(3 * self.margins.right // 4,
0))
self._isYAxisInverted = False
self._dataRanges = {
'x': (1., 100.), 'y': (1., 100.), 'y2': (1., 100.)}
self._baseVectors = (1., 0.), (0., 1.)
self._transformedDataRanges = None
self._transformedDataProjMat = None
self._transformedDataY2ProjMat = None
def _dirty(self):
super(GLPlotFrame2D, self)._dirty()
self._transformedDataRanges = None
self._transformedDataProjMat = None
self._transformedDataY2ProjMat = None
@property
def isDirty(self):
"""True if it need to refresh graphic rendering, False otherwise."""
return (super(GLPlotFrame2D, self).isDirty or
self._transformedDataRanges is None or
self._transformedDataProjMat is None or
self._transformedDataY2ProjMat is None)
@property
def xAxis(self):
return self.axes[0]
@property
def yAxis(self):
return self.axes[1]
@property
def y2Axis(self):
return self._y2Axis
@property
def isY2Axis(self):
"""Whether to display the left Y axis or not."""
return len(self.axes) == 3
@isY2Axis.setter
def isY2Axis(self, isY2Axis):
if isY2Axis != self.isY2Axis:
if isY2Axis:
self.axes.append(self._y2Axis)
else:
self.axes = self.axes[:2]
self._dirty()
@property
def isYAxisInverted(self):
"""Whether Y axes are inverted or not as a bool."""
return self._isYAxisInverted
@isYAxisInverted.setter
def isYAxisInverted(self, value):
value = bool(value)
if value != self._isYAxisInverted:
self._isYAxisInverted = value
self._dirty()
DEFAULT_BASE_VECTORS = (1., 0.), (0., 1.)
"""Values of baseVectors for orthogonal axes."""
@property
def baseVectors(self):
"""Coordinates of the X and Y axes in the orthogonal plot coords.
Raises ValueError if corresponding matrix is singular.
2 tuples of 2 floats: (xx, xy), (yx, yy)
"""
return self._baseVectors
@baseVectors.setter
def baseVectors(self, baseVectors):
self._dirty()
(xx, xy), (yx, yy) = baseVectors
vectors = (float(xx), float(xy)), (float(yx), float(yy))
det = (vectors[0][0] * vectors[1][1] - vectors[1][0] * vectors[0][1])
if det == 0.:
raise ValueError("Singular matrix for base vectors: " +
str(vectors))
if vectors != self._baseVectors:
self._baseVectors = vectors
self._dirty()
@property
def dataRanges(self):
"""Ranges of data visible in the plot on x, y and y2 axes.
This is different to the axes range when axes are not orthogonal.
Type: ((xMin, xMax), (yMin, yMax), (y2Min, y2Max))
"""
return self._DataRanges(self._dataRanges['x'],
self._dataRanges['y'],
self._dataRanges['y2'])
@staticmethod
def _clipToSafeRange(min_, max_, isLog):
# Clip range if needed
minLimit = FLOAT32_MINPOS if isLog else FLOAT32_SAFE_MIN
min_ = numpy.clip(min_, minLimit, FLOAT32_SAFE_MAX)
max_ = numpy.clip(max_, minLimit, FLOAT32_SAFE_MAX)
assert min_ < max_
return min_, max_
def setDataRanges(self, x=None, y=None, y2=None):
"""Set data range over each axes.
The provided ranges are clipped to possible values
(i.e., 32 float range + positive range for log scale).
:param x: (min, max) data range over X axis
:param y: (min, max) data range over Y axis
:param y2: (min, max) data range over Y2 axis
"""
if x is not None:
self._dataRanges['x'] = \
self._clipToSafeRange(x[0], x[1], self.xAxis.isLog)
if y is not None:
self._dataRanges['y'] = \
self._clipToSafeRange(y[0], y[1], self.yAxis.isLog)
if y2 is not None:
self._dataRanges['y2'] = \
self._clipToSafeRange(y2[0], y2[1], self.y2Axis.isLog)
self.xAxis.dataRange = self._dataRanges['x']
self.yAxis.dataRange = self._dataRanges['y']
self.y2Axis.dataRange = self._dataRanges['y2']
_DataRanges = namedtuple('dataRanges', ('x', 'y', 'y2'))
@property
def transformedDataRanges(self):
"""Bounds of the displayed area in transformed data coordinates
(i.e., log scale applied if any as well as skew)
3-tuple of 2-tuple (min, max) for each axis: x, y, y2.
"""
if self._transformedDataRanges is None:
(xMin, xMax), (yMin, yMax), (y2Min, y2Max) = self.dataRanges
if self.xAxis.isLog:
try:
xMin = math.log10(xMin)
except ValueError:
_logger.info('xMin: warning log10(%f)', xMin)
xMin = 0.
try:
xMax = math.log10(xMax)
except ValueError:
_logger.info('xMax: warning log10(%f)', xMax)
xMax = 0.
if self.yAxis.isLog:
try:
yMin = math.log10(yMin)
except ValueError:
_logger.info('yMin: warning log10(%f)', yMin)
yMin = 0.
try:
yMax = math.log10(yMax)
except ValueError:
_logger.info('yMax: warning log10(%f)', yMax)
yMax = 0.
try:
y2Min = math.log10(y2Min)
except ValueError:
_logger.info('yMin: warning log10(%f)', y2Min)
y2Min = 0.
try:
y2Max = math.log10(y2Max)
except ValueError:
_logger.info('yMax: warning log10(%f)', y2Max)
y2Max = 0.
# Non-orthogonal axes
if self.baseVectors != self.DEFAULT_BASE_VECTORS:
(xx, xy), (yx, yy) = self.baseVectors
skew_mat = numpy.array(((xx, yx), (xy, yy)))
corners = [(xMin, yMin), (xMin, yMax),
(xMax, yMin), (xMax, yMax),
(xMin, y2Min), (xMin, y2Max),
(xMax, y2Min), (xMax, y2Max)]
corners = numpy.array(
[numpy.dot(skew_mat, | |
<reponame>py-sdl/py-sdl2
import os
import sys
import ctypes
import pytest
from io import BytesIO
import sdl2
if sys.version_info[0] >= 3:
byteify = bytes
stringify = lambda x, enc: x.decode(enc)
else:
byteify = lambda x, enc: x.encode(enc)
stringify = lambda x, enc: str(x)
@pytest.fixture
def testfile_path():
testdir = os.path.dirname(os.path.abspath(__file__))
testfile = os.path.join(testdir, "resources", "rwopstest.txt")
testfile = testfile.encode("utf-8")
yield testfile
@pytest.fixture
def with_test_rw():
sdl2.SDL_ClearError()
buf = ctypes.create_string_buffer(b"abcdefghijklmnop")
rw = sdl2.SDL_RWFromMem(buf, len(buf))
assert sdl2.SDL_GetError() == b""
assert isinstance(rw.contents, sdl2.SDL_RWops)
yield (rw, buf)
sdl2.SDL_RWclose(rw)
@pytest.fixture
def test_buf():
buf = BytesIO()
buf.write(b"abcdefghijklmnop")
buf.seek(0, os.SEEK_SET)
yield buf
def test_SDL_RWops():
rw = sdl2.SDL_RWops()
assert isinstance(rw, sdl2.SDL_RWops)
def test_SDL_RWFromFile(testfile_path):
rw = sdl2.SDL_RWFromFile(testfile_path, b"r")
assert isinstance(rw.contents, sdl2.SDL_RWops)
# Read the first 36 bytes(sic!). It should be:
# 'This is a test file for sdl2.rwops!'
length = 36
buf = BytesIO()
while length >= 2:
# Reading in two bytes - we have plain text (1-byte encoding), so
# we read in 2 characters at a time. This means that the first
# character is always stored in the low byte.
ch = sdl2.SDL_ReadLE16(rw)
buf.write(byteify(chr(ch & 0x00FF), "utf-8"))
buf.write(byteify(chr(ch >> 8), "utf-8"))
length -= 2
expected = "This is a test file for sdl2.rwops!"
assert stringify(buf.getvalue(), "utf-8") == expected
@pytest.mark.skip("not implemented")
def test_SDL_RWFromFP():
# Requires a C stdio.h file pointer as input, not worth testing
pass
def test_SDL_RWFromMem():
buf = ctypes.create_string_buffer(b"1234")
rw = sdl2.SDL_RWFromMem(buf, len(buf))
assert sdl2.SDL_GetError() == b""
assert isinstance(rw.contents, sdl2.SDL_RWops)
# Make sure it's writable
value = (
(ord("a")) | (ord("b") << 8)
)
assert sdl2.SDL_WriteLE16(rw, value) == 1
assert buf.value == b"ab34"
def test_SDL_RWFromConstMem():
buf = ctypes.create_string_buffer(b"1234")
rw = sdl2.SDL_RWFromConstMem(buf, len(buf))
assert sdl2.SDL_GetError() == b""
assert isinstance(rw.contents, sdl2.SDL_RWops)
# Make sure it isn't writable
value = (
(ord("a")) | (ord("b") << 8)
)
assert sdl2.SDL_WriteLE16(rw, value) == 0
assert buf.value == b"1234"
def test_SDL_RWsize(with_test_rw):
rw, buf = with_test_rw
assert sdl2.SDL_RWsize(rw) == len(buf)
def test_SDL_RWSeekTell(with_test_rw):
rw, buf = with_test_rw
seek_tests = [
(sdl2.RW_SEEK_END, 0, len(buf)), # Seek to end of RW
(sdl2.RW_SEEK_SET, 0, 0), # Seek to start of RW
(sdl2.RW_SEEK_CUR, 8, 8), # Seek 8 bytes forward from current pos
(sdl2.RW_SEEK_CUR, -3, 5), # Seek 3 bytes back from current pos
(sdl2.RW_SEEK_END, -4, len(buf) - 4), # Seek 4 bytes back from end
]
for whence, offset, expected in seek_tests:
pos1 = sdl2.SDL_RWseek(rw, offset, whence)
pos2 = sdl2.SDL_RWtell(rw)
assert pos1 == pos2
assert pos1 == expected
def test_SDL_RWread(with_test_rw):
rw, buf = with_test_rw
# Read the first two characters
readbuf = ctypes.create_string_buffer(2)
read = sdl2.SDL_RWread(rw, readbuf, 1, 2)
assert read == 2
assert readbuf.raw == b"ab"
# Read the next 5 characters
readbuf = ctypes.create_string_buffer(5)
read = sdl2.SDL_RWread(rw, readbuf, 1, 5)
assert read == 5
assert readbuf.raw == b"cdefg"
def test_SDL_RWwrite(with_test_rw):
rw, buf = with_test_rw
# Overwrite first 2 characters
writebuf = ctypes.create_string_buffer(b"12")
written = sdl2.SDL_RWwrite(rw, writebuf, 1, 2)
assert written == 2
assert buf.value == b"12cdefghijklmnop"
# Overwrite last 4 characters
writebuf = ctypes.create_string_buffer(b"TEST")
sdl2.SDL_RWseek(rw, -5, sdl2.RW_SEEK_END) # NOTE: -5 here b/c of end byte
written = sdl2.SDL_RWwrite(rw, writebuf, 1, 4)
assert written == 4
assert buf.value == b"12cdefghijklTEST"
def test_SDL_RWclose():
buf = ctypes.create_string_buffer(b"abcdefghijklmnop")
rw = sdl2.SDL_RWFromMem(buf, len(buf))
assert sdl2.SDL_GetError() == b""
assert isinstance(rw.contents, sdl2.SDL_RWops)
# Close the RW object and check for any errors
assert sdl2.SDL_RWsize(rw) == len(buf)
ret = sdl2.SDL_RWclose(rw)
assert sdl2.SDL_GetError() == b""
assert ret == 0
def test_SDL_AllocFreeRW():
rw = sdl2.SDL_AllocRW()
assert sdl2.SDL_GetError() == b""
assert isinstance(rw.contents, sdl2.SDL_RWops)
sdl2.SDL_FreeRW(rw)
assert sdl2.SDL_GetError() == b""
@pytest.mark.skipif(sdl2.dll.version < 2006, reason="not available")
def test_SDL_LoadFile_RW(testfile_path):
rw = sdl2.SDL_RWFromFile(testfile_path, b"r")
assert isinstance(rw.contents, sdl2.SDL_RWops)
datasize = ctypes.c_size_t(0)
data_p = sdl2.SDL_LoadFile_RW(rw, ctypes.byref(datasize), 0)
assert sdl2.SDL_GetError() == b""
assert datasize.value > 0
data = ctypes.string_at(data_p, datasize.value)
assert data[:19] == b"This is a test file"
@pytest.mark.skipif(sdl2.dll.version < 2006, reason="not available")
def test_SDL_LoadFile(testfile_path):
datasize = ctypes.c_size_t(0)
data_p = sdl2.SDL_LoadFile(testfile_path, ctypes.byref(datasize))
assert sdl2.SDL_GetError() == b""
assert datasize.value > 0
data = ctypes.string_at(data_p, datasize.value)
assert data[:19] == b"This is a test file"
# SDL RW Read tests
def test_SDL_ReadU8(with_test_rw):
rw, buf = with_test_rw
assert chr(sdl2.SDL_ReadU8(rw)) == "a"
pos = sdl2.SDL_RWseek(rw, 8, sdl2.RW_SEEK_SET)
assert pos == 8
assert chr(sdl2.SDL_ReadU8(rw)) == "i"
def test_SDL_ReadLE16(with_test_rw):
rw, buf = with_test_rw
ch = sdl2.SDL_ReadLE16(rw)
assert chr(ch & 0x00FF) == "a"
assert chr(ch >> 8) == "b"
pos = sdl2.SDL_RWseek(rw, 8, sdl2.RW_SEEK_SET)
assert pos == 8
ch = sdl2.SDL_ReadLE16(rw)
assert chr(ch & 0x00FF) == "i"
assert chr(ch >> 8) == "j"
def test_SDL_ReadBE16(with_test_rw):
rw, buf = with_test_rw
ch = sdl2.SDL_ReadBE16(rw)
assert chr(ch & 0x00FF) == "b"
assert chr(ch >> 8) == "a"
pos = sdl2.SDL_RWseek(rw, 8, sdl2.RW_SEEK_SET)
assert pos == 8
ch = sdl2.SDL_ReadBE16(rw)
assert chr(ch & 0x00FF) == "j"
assert chr(ch >> 8) == "i"
def test_SDL_ReadLE32(with_test_rw):
rw, buf = with_test_rw
ch = sdl2.SDL_ReadLE32(rw)
assert chr(ch & 0x000000FF) == "a"
assert chr((ch & 0x0000FF00) >> 8) == "b"
assert chr((ch & 0x00FF0000) >> 16) == "c"
assert chr((ch & 0xFF000000) >> 24) == "d"
pos = sdl2.SDL_RWseek(rw, 8, sdl2.RW_SEEK_SET)
assert pos == 8
ch = sdl2.SDL_ReadLE32(rw)
assert chr(ch & 0x000000FF) == "i"
assert chr((ch & 0x0000FF00) >> 8) == "j"
assert chr((ch & 0x00FF0000) >> 16) == "k"
assert chr((ch & 0xFF000000) >> 24) == "l"
def test_SDL_ReadBE32(with_test_rw):
rw, buf = with_test_rw
ch = sdl2.SDL_ReadBE32(rw)
assert chr(ch & 0x000000FF) == "d"
assert chr((ch & 0x0000FF00) >> 8) == "c"
assert chr((ch & 0x00FF0000) >> 16) == "b"
assert chr((ch & 0xFF000000) >> 24) == "a"
pos = sdl2.SDL_RWseek(rw, 8, sdl2.RW_SEEK_SET)
assert pos == 8
ch = sdl2.SDL_ReadBE32(rw)
assert chr(ch & 0x000000FF) == "l"
assert chr((ch & 0x0000FF00) >> 8) == "k"
assert chr((ch & 0x00FF0000) >> 16) == "j"
assert chr((ch & 0xFF000000) >> 24) == "i"
def test_SDL_ReadLE64(with_test_rw):
rw, buf = with_test_rw
ch = sdl2.SDL_ReadLE64(rw)
assert chr(ch & 0x00000000000000FF) == "a"
assert chr((ch & 0x000000000000FF00) >> 8) == "b"
assert chr((ch & 0x0000000000FF0000) >> 16) == "c"
assert chr((ch & 0x00000000FF000000) >> 24) == "d"
assert chr((ch & 0x000000FF00000000) >> 32) == "e"
assert chr((ch & 0x0000FF0000000000) >> 40) == "f"
assert chr((ch & 0x00FF000000000000) >> 48) == "g"
assert chr((ch & 0xFF00000000000000) >> 56) == "h"
pos = sdl2.SDL_RWseek(rw, 8, sdl2.RW_SEEK_SET)
assert pos == 8
ch = sdl2.SDL_ReadLE64(rw)
assert chr(ch & 0x00000000000000FF) == "i"
assert chr((ch & 0x000000000000FF00) >> 8) == "j"
assert chr((ch & 0x0000000000FF0000) >> 16) == "k"
assert chr((ch & 0x00000000FF000000) >> 24) == "l"
assert chr((ch & 0x000000FF00000000) >> 32) == "m"
assert chr((ch & 0x0000FF0000000000) >> 40) == "n"
assert chr((ch & 0x00FF000000000000) >> 48) == "o"
assert chr((ch & 0xFF00000000000000) >> 56) == "p"
def test_SDL_ReadBE64(with_test_rw):
rw, buf = with_test_rw
ch = sdl2.SDL_ReadBE64(rw)
assert chr(ch & 0x00000000000000FF) == "h"
assert chr((ch & 0x000000000000FF00) >> 8) == "g"
assert chr((ch & 0x0000000000FF0000) >> 16) == "f"
assert chr((ch & 0x00000000FF000000) >> 24) == "e"
assert chr((ch & 0x000000FF00000000) >> 32) == "d"
assert chr((ch & 0x0000FF0000000000) >> 40) == "c"
assert chr((ch & 0x00FF000000000000) >> 48) == "b"
assert chr((ch & 0xFF00000000000000) >> 56) == "a"
pos = sdl2.SDL_RWseek(rw, 8, sdl2.RW_SEEK_SET)
assert pos == 8
ch = sdl2.SDL_ReadBE64(rw)
assert chr(ch & 0x00000000000000FF) == "p"
assert chr((ch & 0x000000000000FF00) >> 8) == "o"
assert chr((ch & 0x0000000000FF0000) >> 16) == "n"
assert chr((ch & 0x00000000FF000000) >> 24) == "m"
assert chr((ch & 0x000000FF00000000) >> 32) == "l"
assert chr((ch & 0x0000FF0000000000) >> 40) == "k"
assert chr((ch & 0x00FF000000000000) >> 48) == "j"
assert chr((ch & 0xFF00000000000000) >> 56) == "i"
# SDL RW Write tests
def test_SDL_WriteU8(with_test_rw):
rw, buf = with_test_rw
assert sdl2.SDL_WriteU8(rw, ord("1")) == 1
assert buf.value == b"1bcdefghijklmnop"
sdl2.SDL_RWseek(rw, 6, sdl2.RW_SEEK_SET)
assert sdl2.SDL_WriteU8(rw, ord("1")) == 1
assert buf.value == b"1bcdef1hijklmnop"
def test_SDL_WriteLE16(with_test_rw):
rw, buf = with_test_rw
value = (
(ord("1") << 8) | (ord("2"))
)
assert sdl2.SDL_WriteLE16(rw, value) == 1
assert buf.value == b"21cdefghijklmnop"
sdl2.SDL_RWseek(rw, 6, sdl2.RW_SEEK_SET)
assert sdl2.SDL_WriteLE16(rw, value) == 1
assert buf.value == b"21cdef21ijklmnop"
def test_SDL_WriteBE16(with_test_rw):
rw, buf = with_test_rw
value = (
(ord("1") << 8) | (ord("2"))
)
assert sdl2.SDL_WriteBE16(rw, value) == 1
assert buf.value == b"12cdefghijklmnop"
sdl2.SDL_RWseek(rw, 6, sdl2.RW_SEEK_SET)
assert sdl2.SDL_WriteBE16(rw, value) == 1
assert buf.value == b"12cdef12ijklmnop"
def test_SDL_WriteLE32(with_test_rw):
rw, buf = with_test_rw
value = (
(ord("1") << 24) |
(ord("2") | |
<gh_stars>1000+
"""Conll parser"""
import re
import argparse
import time
import os
import io
import pickle
import spacy
import numpy as np
from tqdm import tqdm
from neuralcoref.train.compat import unicode_
from neuralcoref.train.document import (
Mention,
Document,
Speaker,
EmbeddingExtractor,
MISSING_WORD,
extract_mentions_spans,
)
from neuralcoref.train.utils import parallel_process
PACKAGE_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
REMOVED_CHAR = ["/", "%", "*"]
NORMALIZE_DICT = {
"/.": ".",
"/?": "?",
"-LRB-": "(",
"-RRB-": ")",
"-LCB-": "{",
"-RCB-": "}",
"-LSB-": "[",
"-RSB-": "]",
}
CONLL_GENRES = {"bc": 0, "bn": 1, "mz": 2, "nw": 3, "pt": 4, "tc": 5, "wb": 6}
FEATURES_NAMES = [
"mentions_features", # 0
"mentions_labels", # 1
"mentions_pairs_length", # 2
"mentions_pairs_start_index", # 3
"mentions_spans", # 4
"mentions_words", # 5
"pairs_ant_index", # 6
"pairs_features", # 7
"pairs_labels", # 8
"locations", # 9
"conll_tokens", # 10
"spacy_lookup", # 11
"doc", # 12
]
MISSED_MENTIONS_FILE = os.path.join(
PACKAGE_DIRECTORY, "test_mentions_identification.txt"
)
SENTENCES_PATH = os.path.join(PACKAGE_DIRECTORY, "test_sentences.txt")
###################
### UTILITIES #####
def clean_token(token):
cleaned_token = token
if cleaned_token in NORMALIZE_DICT:
cleaned_token = NORMALIZE_DICT[cleaned_token]
if cleaned_token not in REMOVED_CHAR:
for char in REMOVED_CHAR:
cleaned_token = cleaned_token.replace(char, "")
if len(cleaned_token) == 0:
cleaned_token = ","
return cleaned_token
def mention_words_idx(embed_extractor, mention, debug=False):
# index of the word in the tuned embeddings no need for normalizing,
# it is already performed in set_mentions_features()
# We take them in the tuned vocabulary which is a smaller voc tailored from conll
words = []
for _, w in sorted(mention.words_embeddings_.items()):
if w not in embed_extractor.tun_idx:
if debug:
print(
"No matching tokens in tuned voc for word ",
w,
"surrounding or inside mention",
mention,
)
words.append(MISSING_WORD)
else:
words.append(w)
return [embed_extractor.tun_idx[w] for w in words]
def check_numpy_array(feature, array, n_mentions_list, compressed=True):
for n_mentions in n_mentions_list:
if feature == FEATURES_NAMES[0]:
assert array.shape[0] == len(n_mentions)
if compressed:
assert np.array_equiv(
array[:, 3], np.array([len(n_mentions)] * len(n_mentions))
)
assert np.max(array[:, 2]) == len(n_mentions) - 1
assert np.min(array[:, 2]) == 0
elif feature == FEATURES_NAMES[1]:
assert array.shape[0] == len(n_mentions)
elif feature == FEATURES_NAMES[2]:
assert array.shape[0] == len(n_mentions)
assert np.array_equiv(array[:, 0], np.array(list(range(len(n_mentions)))))
elif feature == FEATURES_NAMES[3]:
assert array.shape[0] == len(n_mentions)
assert np.array_equiv(
array[:, 0], np.array([p * (p - 1) / 2 for p in range(len(n_mentions))])
)
elif feature == FEATURES_NAMES[4]:
assert array.shape[0] == len(n_mentions)
elif feature == FEATURES_NAMES[5]:
assert array.shape[0] == len(n_mentions)
elif feature == FEATURES_NAMES[6]:
assert array.shape[0] == len(n_mentions) * (len(n_mentions) - 1) / 2
assert np.max(array) == len(n_mentions) - 2
elif feature == FEATURES_NAMES[7]:
if compressed:
assert array.shape[0] == len(n_mentions) * (len(n_mentions) - 1) / 2
assert np.max(array[:, 7]) == len(n_mentions) - 2
assert np.min(array[:, 7]) == 0
elif feature == FEATURES_NAMES[8]:
assert array.shape[0] == len(n_mentions) * (len(n_mentions) - 1) / 2
###############################################################################################
### PARALLEL FCT (has to be at top-level of the module to be pickled for multiprocessing) #####
def load_file(full_name, debug=False):
"""
load a *._conll file
Input: full_name: path to the file
Output: list of tuples for each conll doc in the file, where the tuple contains:
(utts_text ([str]): list of the utterances in the document
utts_tokens ([[str]]): list of the tokens (conll words) in the document
utts_corefs: list of coref objects (dicts) with the following properties:
coref['label']: id of the coreference cluster,
coref['start']: start index (index of first token in the utterance),
coref['end': end index (index of last token in the utterance).
utts_speakers ([str]): list of the speaker associated to each utterances in the document
name (str): name of the document
part (str): part of the document
)
"""
docs = []
with io.open(full_name, "rt", encoding="utf-8", errors="strict") as f:
lines = list(f) # .readlines()
utts_text = []
utts_tokens = []
utts_corefs = []
utts_speakers = []
tokens = []
corefs = []
index = 0
speaker = ""
name = ""
part = ""
for li, line in enumerate(lines):
cols = line.split()
if debug:
print("line", li, "cols:", cols)
# End of utterance
if len(cols) == 0:
if tokens:
if debug:
print("End of utterance")
utts_text.append("".join(t + " " for t in tokens))
utts_tokens.append(tokens)
utts_speakers.append(speaker)
utts_corefs.append(corefs)
tokens = []
corefs = []
index = 0
speaker = ""
continue
# End of doc
elif len(cols) == 2:
if debug:
print("End of doc")
if cols[0] == "#end":
if debug:
print("Saving doc")
docs.append(
(utts_text, utts_tokens, utts_corefs, utts_speakers, name, part)
)
utts_text = []
utts_tokens = []
utts_corefs = []
utts_speakers = []
else:
raise ValueError("Error on end line " + line)
# New doc
elif len(cols) == 5:
if debug:
print("New doc")
if cols[0] == "#begin":
name = re.match(r"\((.*)\);", cols[2]).group(1)
try:
part = cols[4]
except ValueError:
print("Error parsing document part " + line)
if debug:
print("New doc", name, part, name[:2])
tokens = []
corefs = []
index = 0
else:
raise ValueError("Error on begin line " + line)
# Inside utterance
elif len(cols) > 7:
if debug:
print("Inside utterance")
assert cols[0] == name and int(cols[1]) == int(part), (
"Doc name or part error " + line
)
assert int(cols[2]) == index, "Index error on " + line
if speaker:
assert cols[9] == speaker, "Speaker changed in " + line + speaker
else:
speaker = cols[9]
if debug:
print("speaker", speaker)
if cols[-1] != "-":
coref_expr = cols[-1].split("|")
if debug:
print("coref_expr", coref_expr)
if not coref_expr:
raise ValueError("Coref expression empty " + line)
for tok in coref_expr:
if debug:
print("coref tok", tok)
try:
match = re.match(r"^(\(?)(\d+)(\)?)$", tok)
except:
print("error getting coreferences for line " + line)
assert match is not None, (
"Error parsing coref " + tok + " in " + line
)
num = match.group(2)
assert num is not "", (
"Error parsing coref " + tok + " in " + line
)
if match.group(1) == "(":
if debug:
print("New coref", num)
corefs.append({"label": num, "start": index, "end": None})
if match.group(3) == ")":
j = None
for i in range(len(corefs) - 1, -1, -1):
if debug:
print("i", i)
if (
corefs[i]["label"] == num
and corefs[i]["end"] is None
):
j = i
break
assert j is not None, "coref closing error " + line
if debug:
print("End coref", num)
corefs[j]["end"] = index
tokens.append(clean_token(cols[3]))
index += 1
else:
raise ValueError("Line not standard " + line)
return docs
def set_feats(doc):
doc.set_mentions_features()
def get_feats(doc, i):
return doc.get_feature_array(doc_id=i)
def gather_feats(gathering_array, array, feat_name, pairs_ant_index, pairs_start_index):
if gathering_array is None:
gathering_array = array
else:
if feat_name == FEATURES_NAMES[6]:
array = [a + pairs_ant_index for a in array]
elif feat_name == FEATURES_NAMES[3]:
array = [a + pairs_start_index for a in array]
gathering_array += array
return feat_name, gathering_array
def read_file(full_name):
doc = ""
with io.open(full_name, "rt", encoding="utf-8", errors="strict") as f:
doc = f.read()
return doc
###################
### ConllDoc #####
class ConllDoc(Document):
def __init__(self, name, part, *args, **kwargs):
self.name = name
self.part = part
self.feature_matrix = {}
self.conll_tokens = []
self.conll_lookup = []
self.gold_corefs = []
self.missed_gold = []
super(ConllDoc, self).__init__(*args, **kwargs)
def get_conll_spacy_lookup(self, conll_tokens, spacy_tokens, debug=False):
"""
Compute a look up table between spacy tokens (from spacy tokenizer)
and conll pre-tokenized tokens
Output: list[conll_index] => list of associated spacy tokens (assume spacy tokenizer has a finer granularity)
"""
lookup = []
c_iter = (t for t in conll_tokens)
s_iter = enumerate(t for t in spacy_tokens)
i, s_tok = next(s_iter)
for c_tok in c_iter:
# if debug: print("conll", c_tok, "spacy", s_tok, "index", i)
c_lookup = []
while i is not None and len(c_tok) and c_tok.startswith(s_tok.text):
c_lookup.append(i)
c_tok = c_tok[len(s_tok) :]
i, s_tok = next(s_iter, (None, None))
if debug and len(c_tok):
print("eating token: conll", c_tok, "spacy", s_tok, "index", i)
assert len(c_lookup), "Unmatched conll and spacy tokens"
lookup.append(c_lookup)
return lookup
def add_conll_utterance(
self, parsed, tokens, corefs, speaker_id, use_gold_mentions, debug=False
):
conll_lookup = self.get_conll_spacy_lookup(tokens, parsed)
self.conll_tokens.append(tokens)
self.conll_lookup.append(conll_lookup)
# Convert conll tokens coref index in spacy tokens indexes
identified_gold = [False] * len(corefs)
for coref in corefs:
missing_values = [key for key in ['label', 'start', 'end', ] if coref.get(key, None) is None]
if missing_values:
found_values = {key: coref[key] for key in ['label', 'start', 'end'] if coref.get(key, None) is not None}
raise Exception(f"Coref {self.name} with fields {found_values} has empty values for the keys {missing_values}.")
coref["start"] = conll_lookup[coref["start"]][0]
coref["end"] = conll_lookup[coref["end"]][-1]
if speaker_id not in self.speakers:
speaker_name = speaker_id.split("_")
if debug:
print("New speaker: ", speaker_id, "name: ", speaker_name)
self.speakers[speaker_id] = Speaker(speaker_id, speaker_name)
if use_gold_mentions:
for coref in | |
the dataframe ################
if 'vbat' not in df.columns.values:
raise ValueError("This function requires battery voltage in the loaded dataframe for contextual plotting")
################# Make sure the model is in eval mode ################
model.eval()
################### Take the specific action rnage! #####################
# going to need to set a specific range of actions that we are looking at.
print("Looking around the action of: ", action, "\n for a range of: ", act_range)
# grab unique actions
pwms_vals = np.unique(df[['m1pwm_0', 'm2pwm_0', 'm3pwm_0', 'm4pwm_0']].values)
# grabs the actions within the range for each motor
pwms_vals_range1 = pwms_vals[(pwms_vals < action[0]+act_range) & (pwms_vals > action[0]-act_range)]
pwms_vals_range2 = pwms_vals[(pwms_vals < action[1]+act_range) & (pwms_vals > action[1]-act_range)]
pwms_vals_range3 = pwms_vals[(pwms_vals < action[2]+act_range) & (pwms_vals > action[2]-act_range)]
pwms_vals_range4 = pwms_vals[(pwms_vals < action[3]+act_range) & (pwms_vals > action[3]-act_range)]
# filters the dataframe by these new conditions
df_action_filtered = df.loc[(df['m1pwm_0'].isin(pwms_vals_range1) &
df['m2pwm_0'].isin(pwms_vals_range2) &
df['m3pwm_0'].isin(pwms_vals_range3) &
df['m4pwm_0'].isin(pwms_vals_range4))]
if len(df_action_filtered) == 0:
raise ValueError("Given action not present in dataset")
if len(df_action_filtered) < 10:
print("WARNING: Low data for this action (<10 points)")
print("Number of datapoints found is: ", len(df_action_filtered))
######################## batch data by rounding voltages ################
df = df_action_filtered.sort_values('vbat')
# df = df_action_filtered
num_pts = len(df)
# spacing = np.linspace(0,num_pts,num_ranges+1, dtype=np.int)
# parameters can be changed if desired
state_list, input_list, change_list = model.get_training_lists()
# For this function append vbat if not in
v_in_flag = True
if 'vbat' not in input_list:
v_in_flag = False
input_list.append('vbat')
data_params = {
# Note the order of these matters. that is the order your array will be in
'states' : state_list,
'inputs' : input_list,
'targets' : change_list,
'battery' : True # Need to include battery here too
}
# this will hold predictions and the current state for ease of plotting
predictions = np.zeros((num_pts, 2*9+1))
X, U, dX = df_to_training(df, data_params)
# gather predictions
rmse = np.zeros((9))
for n, (x, u, dx) in enumerate(zip(X, U, dX)):
# predictions[i, n, 9:] = x[:9]+model.predict(x,u)
if ground_truth:
predictions[n, 9:-1] = dx
else:
# hacky solution to comparing models tranined with and without battery
if v_in_flag:
predictions[n, 9:-1] = model.predict(x,u)
else:
predictions[n, 9:-1] = model.predict(x,u[:-1])
# calculate root mean squared error for predictions
rmse += (predictions[n, 9:-1] - dx)**2
predictions[n, :9] = x[:9] # stores for easily separating generations from plotting
predictions[n, -1] = u[-1]
rmse /= n
rmse = np.sqrt(rmse)
print(rmse)
# if normalize, normalizes both the raw states and the change in states by
# the scalars stored in the model
if normalize:
scalarX, scalarU, scalardX = model.getNormScalers()
prediction_holder = np.concatenate((predictions[:,:9],np.zeros((num_pts, (np.shape(X)[1]-9)))),axis=1)
predictions[:,:9] = scalarX.transform(prediction_holder)[:,:9]
predictions[:,9:-1] = scalardX.transform(predictions[:,9:-1])
############################################################################
############################################################################
######################### plot this dataset on Euler angles ################
# this will a subplot with a collection of points showing the next state
# that originates from a initial state. The different battery voltages will
# be different colors. They could be lines, but is easier to thing about
# in an (x,y) case without sorting
# plot properties
font = {'size' : 14}
matplotlib.rc('font', **font)
matplotlib.rc('lines', linewidth=2.5)
############## PLOT ALL POINTS ON 3 EULER ANGLES ###################
if False:
with sns.axes_style("whitegrid"):
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.5
fig1, axes = plt.subplots(nrows=1, ncols=3, sharey=True)
ax1, ax2, ax3 = axes[:]
if ground_truth:
plt.suptitle("Measured State Transitions Battery Voltage Context - Action: {0}".format(action))
if normalize:
ax1.set_ylabel("Measured Normalized Change in State")
else:
ax1.set_ylabel("Measured Change in state (Degrees)")
else:
plt.suptitle("Predicted State Transitions Battery Voltage Context - Action: {0}".format(action))
if normalize:
ax1.set_ylabel("Predicted Normalized Change in State")
else:
ax1.set_ylabel("Predicted Change in state (Degrees)")
ax1.set_title("Pitch")
ax2.set_title("Roll")
ax3.set_title("Yaw")
if normalize:
ax1.set_xlabel("Normalized Pitch")
ax2.set_xlabel("Normalized Roll")
ax3.set_xlabel("Normalized Yaw")
# ax1.set_xlim([-4,4])
# ax2.set_xlim([-4,4])
# ax3.set_xlim([-2,2])
# ax1.set_xlim([-1,1])
# ax2.set_xlim([-1,1])
# ax3.set_xlim([-2,2])
ax1.set_ylim([-1,1])
ax2.set_ylim([-1,1])
ax3.set_ylim([-1,1])
else:
ax1.set_xlabel("Global Pitch")
ax2.set_xlabel("Global Roll")
ax3.set_xlabel("Global Yaw")
ax1.set_xlim([-45,45])
ax2.set_xlim([-45,45])
ax3.set_xlim([-180,180])
fig1.subplots_adjust(right=0.8)
cbar_ax1 = fig1.add_axes([0.85, 0.15, 0.02, 0.7])
# ax1 = plt.subplot(131)
# ax2 = plt.subplot(132)
# ax3 = plt.subplot(133)
# normalize batteris between 0 and 1
# TODO: Figure out the coloring
# predictions[:,:,-1] = (predictions[:,:,-1] - np.min(predictions[:,:,-1]))/(np.max(predictions[:,:,-1])-np.min(predictions[:,:,-1]))
# print(predictions[:,:,-1])
base = 50
prec = 0
vbats = np.around(base * np.around(predictions[:, -1]/base),prec)
# vbats = predicitons[:,-1]
hm = ax1.scatter(predictions[:,3], predictions[:,3+9], c=vbats, alpha = .7, s=3)
ax2.scatter(predictions[:,4], predictions[:,4+9], c=vbats, alpha = .7, s=3)
ax3.scatter(predictions[:,5], predictions[:,5+9], c=vbats, alpha = .7, s=3)
cbar = fig1.colorbar(hm, cax=cbar_ax1)
cbar.ax.set_ylabel('Battery Voltage (mV)')
plt.show()
###############################################################
############## PLOT Pitch for battery cutoff ###################
if False:
battery_cutoff = 3800
battery_cutoff = int(np.mean(predictions[:, -1]))
battery_cutoff = int(np.median(predictions[:, -1]))
print("Plotting Pitch Dynamics for Above and Below {0} mV".format(battery_cutoff))
with sns.axes_style("darkgrid"):
fig2, axes2 = plt.subplots(nrows=1, ncols=2, sharey=True)
ax21, ax22 = axes2[:]
cmap = matplotlib.cm.viridis
norm = matplotlib.colors.Normalize(vmin=np.min(predictions[:, -1]), vmax=np.max(predictions[:, -1]))
if ground_truth:
plt.suptitle("Measured Pitch Transitions Above and Below Mean Vbat: {0}".format(battery_cutoff))
if normalize:
ax21.set_ylabel("Normalized Measured Change in State")
else:
ax21.set_ylabel("Measured Change in state (Degrees)")
else:
plt.suptitle("Predicted Pitch Transitions Above and Below Mean Vbat: {0}".format(battery_cutoff))
if normalize:
ax21.set_ylabel("Normalized Predicted Change in State")
else:
ax21.set_ylabel("Predicted Change in state (Degrees)")
ax21.set_title("Pitch, Vbat > {0}".format(battery_cutoff))
ax22.set_title("Pitch, Vbat < {0}".format(battery_cutoff))
if normalize:
ax21.set_xlabel("Normalized Pitch")
ax22.set_xlabel("Normalized Pitch")
# ax21.set_xlim([-4,4])
# ax22.set_xlim([-4,4])
ax21.set_ylim([-1,1])
ax22.set_ylim([-1,1])
else:
ax21.set_xlabel("Global Pitch")
ax22.set_xlabel("Global Pitch")
ax21.set_xlim([-45,45])
ax22.set_xlim([-45,45])
fig2.subplots_adjust(right=0.8)
cbar_ax = fig2.add_axes([0.85, 0.15, 0.02, 0.7])
dim = 3
base = 50
prec = 1
vbats = np.around(base * np.around(predictions[:, -1]/base),prec)
flag = vbats > battery_cutoff
notflag = np.invert(flag)
# hm2 = plt.scatter(predictions[:,3], predictions[:,3+9], c=predictions[:, -1], alpha = .7, s=3)
# plt.clf()
ax21.scatter(predictions[flag, dim], predictions[flag, dim+9], cmap=cmap, norm=norm, c=vbats[flag], alpha = .7, s=3)
ax22.scatter(predictions[notflag, dim], predictions[notflag, dim+9], cmap=cmap, norm=norm, c=vbats[notflag], alpha = .7, s=3)
cbar = fig2.colorbar(hm, cax=cbar_ax)
cbar.ax.set_ylabel('Battery Voltage (mV)')
plt.show()
###############################################################
if False:
num_subplots = 9
vbats = predictions[:, -1]
# generate battery ranges for the plot
pts = len(vbats)
pts_breaks = np.linspace(0,pts-1, num_subplots+1, dtype =np.int)
bat_ranges = vbats[pts_breaks]
# bat_ranges = np.linspace(np.min(vbats), np.max(vbats),num_subplots+1)
with sns.axes_style("darkgrid"):
fig3, axes3 = plt.subplots(nrows=3, ncols=3, sharey=True, sharex=True)
# ax31, ax32, ax33, ax34, ax35, ax36 = axes3[:,:]
cmap = matplotlib.cm.viridis
norm = matplotlib.colors.Normalize(vmin=bat_ranges[0], vmax=bat_ranges[-1])
if ground_truth:
plt.suptitle("Measured Pitch Transitions For Varying Battery Voltage")
if normalize:
fig3.text(0.5, 0.04, 'Normalize Global State', ha='center')
fig3.text(0.04, 0.5, 'Normalized Measured Change in State', va='center', rotation='vertical')
else:
fig3.text(0.5, 0.04, 'Global State', ha='center')
fig3.text(0.04, 0.5, 'Measured Change in State', va='center', rotation='vertical')
else:
plt.suptitle("Predicted Pitch Transitions For Varying Battery Voltage")
if normalize:
fig3.text(0.5, 0.04, 'Normalize Global State', ha='center')
fig3.text(0.04, 0.5, 'Normalized Predicted Change in State', va='center', rotation='vertical')
else:
fig3.text(0.5, 0.04, 'Global State', ha='center')
fig3.text(0.04, 0.5, 'Predicted Change in State', va='center', rotation='vertical')
for i, ax in enumerate(axes3.flatten()):
# get range values
low = bat_ranges[i]
high = bat_ranges[i+1]
ax.set_title("Voltage [{0},{1}]".format(int(low), int(high)))
if normalize:
# ax.set_xlabel("Normalized Pitch")
ax.set_ylim([-1,1])
else:
# ax.set_xlabel("Global Pitch")
ax.set_xlim([-45,45])
dim = 4
flag = (vbats > low) & (vbats < high)
hm = ax.scatter(predictions[flag, dim], predictions[flag, dim+9], cmap = cmap, norm = norm, c=vbats[flag], alpha = .7, s=3)
if normalize:
ax.set_ylim([-1,1])
else:
ax.set_ylim([-3,3])
fig3.subplots_adjust(right=0.8)
cbar_ax1 = fig3.add_axes([0.85, 0.15, 0.02, 0.7])
cbar = fig3.colorbar(hm, cax=cbar_ax1)
cbar.ax.set_ylabel('Battery Voltage (mV)')
plt.show()
###############################################################
############## PLOT single angle for ground truth, with battery, without battery ###################
if True:
# gather predictions for second model
# this will hold predictions and the current state for ease of plotting
predictions_nobat = np.zeros((num_pts, 2*9+1))
pred_ground_truth = np.zeros((num_pts, 2*9+1))
# gather predictions
rmse = np.zeros((9))
for n, (x, u, dx) in enumerate(zip(X, U, dX)):
# predictions[i, n, 9:] = x[:9]+model.predict(x,u)
pred_ground_truth[n, 9:-1] = dx
predictions_nobat[n, 9:-1] = model_nobat.predict(x, u[:-1])
# calculate root mean squared error for predictions
rmse += (predictions_nobat[n, 9:-1] - dx)**2
# stores for easily separating generations from plotting
predictions_nobat[n, :9] = x[:9]
predictions_nobat[n, -1] = u[-1]
# rmse /= n
# rmse = np.sqrt(rmse)
# print(rmse)
if normalize:
scalarX, scalarU, scalardX = model.getNormScalers()
pred_ground_truth_holder = np.concatenate(
(pred_ground_truth[:, :9], np.zeros((num_pts, (np.shape(X)[1]-9)))), axis=1)
pred_ground_truth[:, :9] = scalarX.transform(
pred_ground_truth_holder)[:, :9]
pred_ground_truth[:, 9:-
1] = scalardX.transform(pred_ground_truth[:, 9:-1])
prediction_nobat_holder = np.concatenate(
(predictions_nobat[:, :9], np.zeros((num_pts, (np.shape(X)[1]-9)))), axis=1)
predictions_nobat[:, :9] = scalarX.transform(
prediction_nobat_holder)[:, :9]
predictions_nobat[:, 9:-
1] = scalardX.transform(predictions_nobat[:, 9:-1])
# Plot here, will be a 3x5 plot of voltage context
n_row = 3
num_subplots = 5
vbats = predictions[:, -1]
# generate battery | |
<filename>easyawscli.py
#!/usr/bin/python3
from os import system, name
import boto3
from botocore.exceptions import ClientError
import getpass
import requests
# from SecureString import clearmem
source_code_url = "https://github.com/navilg/easyawscli"
def main_menu():
print("\n\n---Main Menu---\n")
print("0. Logout\n1. Start EC2 instance\n2. Stop EC2 instance\n3. Tag an instance\n4. Add inbound rule in Security Group")
print("5. Remove an inbound rule from Security Group\n6. Autoscaling Group suspend process")
main_menu_choice = int(input("Choose from above (0 to 6) >> "))
return main_menu_choice
def submenu(action_name=""):
print("\n\n---Sub Menu---\n")
print("\n0. Logout\n1. Repeat '" + str(action_name) + "'\n2. Main Menu")
subchoice = int(input("Choose from above (0 to 2) >> "))
return subchoice
def login():
print("\n\n---Login---\n")
region = set_region()
if region == "":
exit(0)
key = str(input("Enter AWS access key >> "))
try:
secret = getpass.getpass(prompt='Enter AWS secret (Input text will NOT be visible) >> ')
except Exception as error:
print('ERROR', error)
exit(1)
try:
session = boto3.Session(aws_access_key_id=key, aws_secret_access_key=secret, region_name=region)
# clearmem(secret)
except ClientError as e:
print(e.response['Error']['Message'])
# clearmem(secret)
exit(1)
print("Validating your credentials...")
try:
sts = session.client('sts')
sts.get_caller_identity()
except ClientError as e:
print(e.response['Error']['Message'])
print("Login failed")
exit(1)
except Exception as e:
print(e, ":", type(e).__name__)
if type(e).__name__ == "EndpointConnectionError":
print("'"+region+"'","may not be a valid AWS region.")
print("Login Failed")
exit(1)
return region,session
def set_region():
print("Choose one region from list below.")
print("------------------------------------------------------------------------------------------------------------"
"-------------------------------------")
print("| 1. us-east-1 (N. Virginia)\t\t2. us-east-2 (Ohio)\t\t3. us-west-1 (N. California)\t\t4. us-west-2 ("
"Oregon)\t\t|")
print("| 5. af-south-1 (Cape Town)\t\t6. ap-east-1 (Hong Kong)\t7. ap-south-1 (Mumbai)\t\t\t8. "
"ap-northeast-3 ( "
"Osaka)\t|")
print("| 9. ap-northeast-2 (Seoul)\t\t10. ap-southeast-1 (Singapore)\t11. ap-southeast-2 (Sydney)\t\t12. "
"ap-northeast-1 (Tokyo)\t|")
print("| 13. ca-central-1 (Canada/Central)\t14. eu-central-1 (Frankfurt)\t15. eu-west-1 (Ireland)\t\t\t16. "
"eu-west-2 (London)\t\t|")
print("| 17. eu-south-1 (Milan)\t\t18. eu-west-3 (Paris)\t\t19. eu-north-1 (Stockholm)\t\t20. me-south-1 ("
"Bahrain)\t|")
print("| 21. sa-east-1 (Sao Paulo)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t|")
print("------------------------------------------------------------------------------------------------------------"
"-------------------------------------\n")
while True:
try:
region_name = int(input("Choose a region from above (1 to 21). Type 0 to exit: "))
except ValueError as e:
print('Invalid choice')
continue
except Exception as e:
print("Error:", e)
return ""
if region_name == 1:
return "us-east-1"
elif region_name == 2:
return "us-east-2"
elif region_name == 3:
return "us-west-1"
elif region_name == 4:
return "us-west-2"
elif region_name == 5:
return "af-south-1"
elif region_name == 6:
return "ap-east-1"
elif region_name == 7:
return "ap-south-1"
elif region_name == 8:
return "ap-northeast-3"
elif region_name == 9:
return "ap-northeast-2"
elif region_name == 10:
return "ap-southeast-1"
elif region_name == 11:
return "ap-southeast-2"
elif region_name == 12:
return "ap-northeast-1"
elif region_name == 13:
return "ca-central-1"
elif region_name == 14:
return "eu-central-1"
elif region_name == 15:
return "eu-west-1"
elif region_name == 16:
return "eu-west-2"
elif region_name == 17:
return "eu-south-1"
elif region_name == 18:
return "eu-west-3"
elif region_name == 19:
return "eu-north-1"
elif region_name == 20:
return "me-south-1"
elif region_name == 21:
return "sa-east-1"
elif region_name == 0:
return ""
else:
print("Invalid choice.")
def get_ec2(region,state,tagname,tagvalue,session):
print("Searching instances with tag","'"+tagname+":", tagvalue+"'","in region", region)
reservations = []
ec2_obj = session.client('ec2',region_name=region)
if state == "all":
filters = [{'Name': 'tag:' + tagname, 'Values': [tagvalue]}]
else:
filters = [{'Name': 'instance-state-name','Values': [state]},{'Name': 'tag:'+tagname,'Values': [tagvalue]}]
reservations = ec2_obj.describe_instances(Filters=filters).get("Reservations")
return reservations,ec2_obj
def startEC2(region,session):
print("\n\n---Start EC2 Instance---\n")
print("Active region:",region)
tagname = str(input("Enter tag name >> "))
tagvalue = str(input("Enter tag value >> "))
reservations,ec2_obj = get_ec2(region,'stopped',tagname,tagvalue,session)
# If list is empty
if not reservations:
print("No stopped instances to start.")
return [],0
print("\nBelow instances found with tag '" + tagname + "':'" + tagvalue + "'")
print("No.\tInstance_ID\t\t" + tagname + "\t\tPrivate_IP_Address\t\tLaunch_Time")
i = 0
for reservation in reservations:
for instance in reservation["Instances"]:
i += 1
instance_id = instance["InstanceId"]
private_ip = instance["PrivateIpAddress"]
launch_time = instance["LaunchTime"]
print(str(i)+"\t"+str(instance_id)+"\t"+tagvalue+"\t\t"+str(private_ip)+"\t\t"+str(launch_time))
print("\nChoose instances to start from above list (0 to "+str(i)+"). Separate them by commas. Just type 0 to return to submenu.")
start_choice = input("Example: 1,3,4 >> ")
if start_choice == '0':
return [],0
choice_list = start_choice.rstrip().split(",")
print("Instances to start:", choice_list)
confirm = str(input("Do you confirm ? (Type 'confirm') >> "))
if confirm != 'confirm':
print("You seem to be confused.")
return [],0
print("Starting Instances...")
instance_state_changed = 0
instances_started = []
i = 0
for reservation in reservations:
for instance in reservation["Instances"]:
i += 1
if str(i) in choice_list:
try:
print("Starting instance ", instance["InstanceId"])
ec2_obj.start_instances(InstanceIds=[instance["InstanceId"]])
instances_started.append(instance["InstanceId"])
instance_state_changed += 1
except ClientError as e:
print(e.response['Error']['Message'])
return instances_started,instance_state_changed
def stopEC2(region,session):
print("\n\n---Stop EC2 Instance---\n")
print("Active region:",region)
tagname = str(input("Enter tag name >> "))
tagvalue = str(input("Enter tag value >> "))
reservations,ec2_obj = get_ec2(region,'running',tagname,tagvalue,session)
# If list is empty
if not reservations:
print("No running instances to stop.")
return [],0
print("\nBelow instances found with tag '" + tagname + "':'" + tagvalue + "'")
print("No.\tInstance_ID\t\t" + tagname + "\t\tPrivate_IP_Address\t\tLaunch_Time")
i = 0
for reservation in reservations:
for instance in reservation["Instances"]:
i += 1
instance_id = instance["InstanceId"]
private_ip = instance["PrivateIpAddress"]
launch_time = instance["LaunchTime"]
print(str(i) + "\t" + str(instance_id) + "\t" + tagvalue + "\t\t" + str(private_ip) + "\t\t" + str(launch_time))
print("\nChoose instances to stop from above list (0 to " + str(i) + "). Separate them by commas. Just type 0 to return to submenu.")
stop_choice = input("Example: 1,3,4 >> ")
if stop_choice == '0':
return [],0
choice_list = stop_choice.rstrip().split(",")
print("Instances to stop:", choice_list)
confirm = str(input("Do you confirm ? (Type 'confirm') >> "))
if confirm != 'confirm':
print("You seem to be confused.")
return [], 0
print("Stopping Instances...")
instance_state_changed = 0
instances_stopped = []
i = 0
for reservation in reservations:
for instance in reservation["Instances"]:
i += 1
if str(i) in choice_list:
try:
print("Stopping instance ", instance["InstanceId"])
ec2_obj.stop_instances(InstanceIds=[instance["InstanceId"]])
instances_stopped.append(instance["InstanceId"])
instance_state_changed += 1
except ClientError as e:
print(e.response['Error']['Message'])
return instances_stopped,instance_state_changed
def tagInstance():
print("Tag Instance. Coming soon...")
def add_inbound_rule_in_sg(region,session):
print("\n\n---Add inbound rule in Security Group---\n")
print("Active region:", region)
print("Enter tag name and its value to filter the EC2 instances.")
tagname = str(input("Enter tag name >> "))
tagvalue = str(input("Enter tag value >> "))
reservations, ec2_obj = get_ec2(region, 'all', tagname, tagvalue, session)
# If list is empty
if not reservations:
print("No instances found.")
return ""
print("\nBelow instances found with tag '" + tagname + "':'" + tagvalue + "'")
print("No.\tInstance_ID\t\t" + tagname + "\t\tVPC_ID\t\tPrivate_IP_Address\t\tLaunch_Time")
i = 0
for reservation in reservations:
for instance in reservation["Instances"]:
i += 1
instance_id = instance["InstanceId"]
private_ip = instance["PrivateIpAddress"]
launch_time = instance["LaunchTime"]
vpc_id = instance["VpcId"]
print(str(i) + "\t" + str(instance_id) + "\t" + tagvalue + "\t\t" + str(vpc_id) + "\t\t" + str(private_ip) + "\t\t" + str(launch_time))
print("\nChoose one instances from above list (0 to " + str(i) + "). 0 to return to submenu")
instance_choice = int(input("Example: 3 >> "))
if instance_choice == 0:
return ""
print("Below Security Groups are attached to chosen instance.")
print("No.\tSecurity_Group_ID\t\tSecurity_Group_Name")
i = 0
security_group_id_list = []
security_group_name_list = []
for reservation in reservations:
for instance in reservation["Instances"]:
i += 1
if i == instance_choice:
security_groups = instance["SecurityGroups"]
j = 0
for security_group in security_groups:
j += 1
security_group_name = security_group["GroupName"]
security_group_id = security_group["GroupId"]
security_group_name_list.append(security_group_name)
security_group_id_list.append(security_group_id)
print(str(j) + "\t" + str(security_group_id) + "\t\t" + security_group_name)
break
print("\nChoose one security group from above (0 to", j,"). Type 0 to return to submenu.")
security_group_choice = int(input(">> "))
if security_group_choice == 0:
return ""
security_group_details = ec2_obj.describe_security_groups(GroupIds=[security_group_id_list[security_group_choice - 1]])
print("Below inbound rules are currently authorized to security group", security_group_id_list[security_group_choice - 1])
print("No.\tPorts\t\tIP_Protocol\tSource\t\t\t\tDescription")
i = 0
for ip_permission in security_group_details['SecurityGroups'][0]['IpPermissions']:
i += 1
current_ip_protocol = ip_permission['IpProtocol']
current_ip_ranges = ip_permission['IpRanges']
current_cidrs = []
current_description = []
for current_ip_range in current_ip_ranges:
current_cidrs.append(current_ip_range['CidrIp'])
if 'Description' in current_ip_range:
current_description.append(current_ip_range['Description'])
else:
current_description.append('')
current_to_port = ip_permission['ToPort']
print(str(i) + "\t" + str(current_to_port) + "\t\t" + str(current_ip_protocol) + "\t\t" + str(current_cidrs) + "\t\t\t\t" + str(current_description))
if i == 0:
print("No inbound rule authorized to security group", security_group_id_list[security_group_choice - 1])
print("\nEnter below details for new inbound rule.")
ip_protocol = str(input("Enter IP Protocol (tcp/udp) >> ")).lower()
to_port = int(input("Enter port number >> "))
your_public_ip = str(requests.get('http://ipinfo.io/json').json()['ip']) + "/32"
ip_range = str(input("Enter CIDR IP (default: {}) >> ".format(your_public_ip)))
description = str(input("Enter description (default: '') >> "))
if ip_range == "":
ip_range = your_public_ip
ip_perm = [{'IpProtocol': ip_protocol, 'ToPort': to_port, 'FromPort': to_port, 'IpRanges': [{'CidrIp': ip_range, 'Description': description}]}]
print(ip_perm)
confirm = str(input("Do you confirm ? (Type 'confirm') >> "))
if confirm != 'confirm':
print("You seem to be confused.")
return ""
print("Adding inbound | |
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ClaimResponseItemAdjudication(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Adjudication details.
The adjudication results.
"""
resource_type = Field("ClaimResponseItemAdjudication", const=True)
amount: fhirtypes.MoneyType = Field(
None,
alias="amount",
title="Monetary amount",
description="Monetary amount associated with the code.",
# if property is element of this resource.
element_property=True,
)
category: fhirtypes.CodeableConceptType = Field(
...,
alias="category",
title="Adjudication category such as co-pay, eligible, benefit, etc.",
description="Code indicating: Co-Pay, deductible, eligible, benefit, tax, etc.",
# if property is element of this resource.
element_property=True,
)
reason: fhirtypes.CodeableConceptType = Field(
None,
alias="reason",
title="Explanation of Adjudication outcome",
description="Adjudication reason such as limit reached.",
# if property is element of this resource.
element_property=True,
)
value: fhirtypes.Decimal = Field(
None,
alias="value",
title="Non-monetary value",
description=(
"A non-monetary value for example a percentage. Mutually exclusive to "
"the amount element above."
),
# if property is element of this resource.
element_property=True,
)
value__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_value", title="Extension field for ``value``."
)
class ClaimResponseItemDetail(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Detail line items.
The second tier service adjudications for submitted services.
"""
resource_type = Field("ClaimResponseItemDetail", const=True)
adjudication: typing.List[fhirtypes.ClaimResponseItemAdjudicationType] = Field(
None,
alias="adjudication",
title="Detail level adjudication details",
description="The adjudications results.",
# if property is element of this resource.
element_property=True,
)
noteNumber: typing.List[fhirtypes.PositiveInt] = Field(
None,
alias="noteNumber",
title="List of note numbers which apply",
description="A list of note references to the notes provided below.",
# if property is element of this resource.
element_property=True,
)
noteNumber__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_noteNumber", title="Extension field for ``noteNumber``.")
sequenceLinkId: fhirtypes.PositiveInt = Field(
None,
alias="sequenceLinkId",
title="Service instance",
description="A service line number.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
sequenceLinkId__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_sequenceLinkId", title="Extension field for ``sequenceLinkId``."
)
subDetail: typing.List[fhirtypes.ClaimResponseItemDetailSubDetailType] = Field(
None,
alias="subDetail",
title="Subdetail line items",
description="The third tier service adjudications for submitted services.",
# if property is element of this resource.
element_property=True,
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2502(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("sequenceLinkId", "sequenceLinkId__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ClaimResponseItemDetailSubDetail(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Subdetail line items.
The third tier service adjudications for submitted services.
"""
resource_type = Field("ClaimResponseItemDetailSubDetail", const=True)
adjudication: typing.List[fhirtypes.ClaimResponseItemAdjudicationType] = Field(
None,
alias="adjudication",
title="Subdetail level adjudication details",
description="The adjudications results.",
# if property is element of this resource.
element_property=True,
)
noteNumber: typing.List[fhirtypes.PositiveInt] = Field(
None,
alias="noteNumber",
title="List of note numbers which apply",
description="A list of note references to the notes provided below.",
# if property is element of this resource.
element_property=True,
)
noteNumber__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_noteNumber", title="Extension field for ``noteNumber``.")
sequenceLinkId: fhirtypes.PositiveInt = Field(
None,
alias="sequenceLinkId",
title="Service instance",
description="A service line number.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
sequenceLinkId__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_sequenceLinkId", title="Extension field for ``sequenceLinkId``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3395(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("sequenceLinkId", "sequenceLinkId__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class ClaimResponsePayment(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Payment details, if paid.
Payment details for the claim if the claim has been paid.
"""
resource_type = Field("ClaimResponsePayment", const=True)
adjustment: fhirtypes.MoneyType = Field(
None,
alias="adjustment",
title="Payment adjustment for non-Claim issues",
description=(
"Adjustment to the payment of this transaction which is not related to "
"adjudication of this transaction."
),
# if property is element of this resource.
element_property=True,
)
adjustmentReason: fhirtypes.CodeableConceptType = Field(
None,
alias="adjustmentReason",
title="Explanation for the non-claim adjustment",
description="Reason for the payment adjustment.",
# if property is element of this resource.
element_property=True,
)
amount: fhirtypes.MoneyType = Field(
None,
alias="amount",
title="Payable amount after adjustment",
description="Payable less any payment adjustment.",
# if property is element of this resource.
element_property=True,
)
date: fhirtypes.Date = Field(
None,
alias="date",
title="Expected data of Payment",
description="Estimated payment data.",
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="Identifier of the payment instrument",
description="Payment identifier.",
# if property is element of this resource.
element_property=True,
)
type: fhirtypes.CodeableConceptType = Field(
None,
alias="type",
title="Partial or Complete",
description="Whether this represents partial or complete payment of the claim.",
# if property is element of this resource.
element_property=True,
)
class ClaimResponseProcessNote(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Processing notes.
Note text.
"""
resource_type = Field("ClaimResponseProcessNote", const=True)
language: fhirtypes.CodeableConceptType = Field(
None,
alias="language",
title="Language | |
= scale_data_1[loc_mask]
if self.scale_mode == 'linear':
scale_space -= min_scale ## smallest value is 0
scale_space *= 1.0/( max_scale - min_scale ) ## largest value is 1
else: # log
scale_space *= 1.0/min_scale ## smallist is 0
np.log(scale_space, out=scale_space)
scale_space *= 1.0/np.log( max_scale/min_scale ) ## largest is 1
time_scale = self.time_scale_space[:len(plotX)]
time_scale[:] = scale_space
space_scale = scale_space
time_scale *= self.time_dot_scale
if self.pol_mode == 'line':
space_scale *= self.space_line_scale
else: ## none
space_scale *= self.space_dot_scale
N_before = len(plotX)
# if self.max_num_points > 0 and self.max_num_points < N_before:
# decimation_factor = self.max_num_points / float(N_before)
#
# data = self.decimation_TMP[:N_before]
# mask = self.decimation_mask[:N_before]
#
# data[:] = decimation_factor
# np.cumsum(data, out=data)
# np.floor(data, out=data)
# np.greater(data[1:], data[:-1], out=mask[1:])
# mask[0] = 0
#
# plotX = plotX[mask]
# plotY = plotY[mask]
# plotZ = plotZ[mask]
# plotZt = plotZt[mask]
# plotT = plotT[mask]
#
# try:
# B4 = color[mask]
# color = B4 ## hope this works?
# except:
# pass
print(self.name, "plotting", len(plotX), '/', len(self.X_array) )#, "have:", N_before)
try:
if not self._ignore_time:
self.AltVsT_paths = AltVsT_axes.scatter(x=plotT, y=plotZt, c=color, marker='o',
s=time_scale,
cmap=self.cmap, vmin=color_min, vmax=color_max)
if self.pol_mode == 'none':
self.AltVsEw_paths = AltVsEw_axes.scatter(x=plotX, y=plotZ, c=color, marker='o', s=space_scale,
cmap=self.cmap, vmin=color_min, vmax=color_max)
self.NsVsEw_paths = NsVsEw_axes.scatter(x=plotX, y=plotY, c=color, marker='o', s=space_scale,
cmap=self.cmap, vmin=color_min, vmax=color_max)
self.NsVsAlt_paths = NsVsAlt_axes.scatter(x=plotZ, y=plotY, c=color, marker='o', s=space_scale,
cmap=self.cmap, vmin=color_min, vmax=color_max)
else:
print('a')
dirX_tmp = self.dirX_memory[:len(plotX)]
dirY_tmp = self.dirY_memory[:len(plotX)]
dirZ_tmp = self.dirZ_memory[:len(plotX)]
dirX_tmp[:] = self.dirX_array[self.total_mask][loc_mask]
dirY_tmp[:] = self.dirY_array[self.total_mask][loc_mask]
dirZ_tmp[:] = self.dirZ_array[self.total_mask][loc_mask]
dirX_tmp *= 0.5
dirY_tmp *= 0.5
dirZ_tmp *= 0.5
dirX_tmp *= space_scale
dirY_tmp *= space_scale
dirZ_tmp *= space_scale
# dirX_tmp *= self.pol_scale*0.5
# dirY_tmp *= self.pol_scale*0.5
# dirZ_tmp *= self.pol_scale*0.5
#
# if self.pol_mode=='intensity' or self.pol_mode=='log_intensity':
# intensity_tmp = np.array( self.intensity_array[self.total_mask][loc_mask] )
#
# if self.pol_mode=='log_intensity':
# np.log(intensity_tmp, out=intensity_tmp)
#
# intensity_tmp /= np.max( intensity_tmp )
#
# dirX_tmp *= intensity_tmp
# dirY_tmp *= intensity_tmp
# dirZ_tmp *= intensity_tmp
X_low = np.array(plotX)
X_high = np.array(plotX)
X_low -= dirX_tmp
X_high += dirX_tmp
Y_low = np.array(plotY)
Y_high = np.array(plotY)
Y_low -= dirY_tmp
Y_high += dirY_tmp
Z_low = np.array(plotZ)
Z_high = np.array(plotZ)
Z_low -= dirZ_tmp
Z_high += dirZ_tmp
for i in range(len(X_low)):
X = [ X_low[i], X_high[i] ]
Y = [ Y_low[i], Y_high[i] ]
Z = [ Z_low[i], Z_high[i] ]
c = None
if (color_min is not None) and (color_max is not None):
c = color[i]
c -= color_min
c /= color_max-color_min
c = self.cmap( c )
else:
c = color
AltVsEw_axes.plot(X,Z,'-', lw=self.line_width, c=c)
# cmap=self.cmap, vmin=color_min, vmax=color_max)
NsVsEw_axes.plot(X,Y,'-', lw=self.line_width, c=c)
# cmap=self.cmap, vmin=color_min, vmax=color_max)
NsVsAlt_axes.plot(Z,Y,'-', lw=self.line_width, c=c)
# cmap=self.cmap, vmin=color_min, vmax=color_max)
except Exception as e:
print(e)
# def get_viewed_events(self):
# print("get viewed events not implemented")
# return []
# return [PSE for PSE in self.PSE_list if
# (self.loc_filter(PSE.PolE_loc) and PSE.PolE_RMS<self.max_RMS and PSE.num_even_antennas>self.min_numAntennas)
# or (self.loc_filter(PSE.PolO_loc) and PSE.PolO_RMS<self.max_RMS and PSE.num_odd_antennas>self.min_numAntennas) ]
def clear(self):
pass
def use_ancillary_axes(self):
return False
def toggle_on(self):
self.display = True
def toggle_off(self):
self.display = False
self.clear()
## the res of this is wrong
def ignore_time(self, ignore=None):
if ignore is not None:
self._ignore_time = ignore
return self._ignore_time
def print_info(self, coordinate_system):
self.set_total_mask()
N = np.sum(self.total_mask)
#### random book keeping ####
self.clear()
# if self.color_mode in self.color_options:
# color = self.color_options[self.color_mode][self.total_mask] ## should fix this to not make memory
#### set cuts and transforms
self.X_TMP[:] = self.X_array
self.Y_TMP[:] = self.Y_array
self.Z_TMP[:] = self.Z_array
self.T_TMP[:] = self.T_array
Xtmp = self.X_TMP[:N]
Ytmp = self.Y_TMP[:N]
Ztmp = self.Z_TMP[:N]
Ttmp = self.T_TMP[:N]
np.compress(self.total_mask, self.X_TMP, out=Xtmp)
np.compress(self.total_mask, self.Y_TMP, out=Ytmp)
np.compress(self.total_mask, self.Z_TMP, out=Ztmp)
np.compress(self.total_mask, self.T_TMP, out=Ttmp)
Xtmp += self.X_offset
Ytmp += self.Y_offset
Ztmp += self.Z_offset
Ttmp += self.T_offset
if self.transform_memory is None:
self.transform_memory = coordinate_system.make_workingMemory(len(self.X_array))
coordinate_system.set_workingMemory(self.transform_memory)
loc_mask = np.empty(N, dtype=np.bool)
plotX, plotY, plotZ, plotZt, plotT = coordinate_system.transform_and_filter(
Xtmp, Ytmp, Ztmp, Ttmp,
make_copy=False, ignore_T=self._ignore_time, bool_workspace=loc_mask)
print( "dataset:", self.name )
IDS = self.source_IDs[self.total_mask][loc_mask]
inX = self.X_array[self.total_mask][loc_mask]
inY = self.Y_array[self.total_mask][loc_mask]
inZ = self.Z_array[self.total_mask][loc_mask]
inT = self.T_array[self.total_mask][loc_mask]
infoplot = { key:data[self.total_mask][loc_mask] for key,data in self.extra_info.items() }
for i in range(len(IDS)):
print(inT[i], ', #', inX[i], inY[i], inZ[i])
# print('source', IDS[i])
# print(' plot at', plotX[i], plotY[i], plotZ[i], plotZt[i] )
# print(' plot t:', plotT[i] )
# print(' at', inX[i], inY[i], inZ[i])
# print(' T', inT[i])
# for key, d in infoplot.items():
# print(' ', key, d[i])
# print()
def LMAHeader_to_StationLocs(LMAheader, groupLOFARStations=False, center='LOFAR', name='stations', color=None, marker='s', textsize=None):
station_names = []
stationX = []
stationY = []
for station in LMAheader.antenna_info_list:
name = station.name
use = True
if groupLOFARStations:
LOFAR_SID = int(name[:3])
name = SId_to_Sname[ LOFAR_SID ]
if name in station_names:
use = False
if use:
station_names.append( name )
stationXYZ = station.get_XYZ(center=center)
stationX.append( stationXYZ[0] )
stationY.append( stationXYZ[1] )
return station_locations_DataSet( stationX, stationY, station_names, name='stations', color=color, marker=marker, textsize=textsize )
def IterMapper_StationLocs(header, name='stations', color=None, marker='s', textsize=None):
station_names = []
stationX = []
stationY = []
for name, antList in zip(header.station_names, header.antenna_info):
station_names.append( name )
ant = antList[0]
stationX.append( ant.location[0] )
stationY.append( ant.location[1] )
return station_locations_DataSet( stationX, stationY, station_names, name='stations', color=color, marker=marker, textsize=textsize )
class station_locations_DataSet( DataSet_Type ):
def __init__(self, station_X_array, station_Y_array, station_names, name, color, marker, textsize ):
self.marker = marker
self.marker_size = 10
self.color = color
self.name = name
self.textsize = textsize
self.display= True
self.X_array = station_X_array
self.Y_array = station_Y_array
self.station_names = station_names
self.X_TMP = np.empty(len(self.X_array), dtype=np.double)
self.Y_TMP = np.empty(len(self.X_array), dtype=np.double)
self.Z_TMP = np.empty(len(self.X_array), dtype=np.double)
self.T_TMP = np.empty(len(self.X_array), dtype=np.double)
self.transform_memory = None
self.show_text = True
def set_show_all(self, coordinate_system, do_set_limits=True):
if do_set_limits:
self.X_TMP[:] = self.X_array
self.Y_TMP[:] = self.Y_array
self.Z_TMP[:] = 0.0
self.T_TMP[:] = 0.0
if self.transform_memory is None:
self.transform_memory = coordinate_system.make_workingMemory( len(self.X_array) )
coordinate_system.set_workingMemory( self.transform_memory )
plotX, plotY, plotZ, plotZt, plotT = coordinate_system.transform(
self.X_TMP, self.Y_TMP, self.Z_TMP, self.T_TMP,
make_copy=False)
if len(plotX) > 0:
coordinate_system.set_plotX( np.min(plotX), np.max(plotX) )
coordinate_system.set_plotY( np.min(plotY), np.max(plotY) )
def bounding_box(self, coordinate_system):
self.X_TMP[:] = self.X_array
self.Y_TMP[:] = self.Y_array
self.Z_TMP[:] = 0.0
self.T_TMP[:] = 0.0
if self.transform_memory is None:
self.transform_memory = coordinate_system.make_workingMemory( len(self.X_array) )
coordinate_system.set_workingMemory( self.transform_memory )
## transform
plotX, plotY, plotZ, plotZt, plotT = coordinate_system.transform(
self.X_TMP, self.Y_TMP, self.Z_TMP, self.T_TMP,
make_copy=False)
## return actual bounds
if len(plotX) > 0:
Xbounds = [np.min(plotX), np.max(plotX)]
Ybounds = [np.min(plotY), np.max(plotY)]
else:
Xbounds = [0,1]
Ybounds = [0,1]
Zbounds = [np.nan,np.nan]
Ztbounds = [np.nan,np.nan]
Tbounds = [np.nan,np.nan]
return Xbounds, Ybounds, Zbounds, Ztbounds, Tbounds
def T_bounds(self, coordinate_system):
return [np.nan, np.nan]
def get_all_properties(self):
ret = {"marker size":str(self.marker_size), 'name':self.name, 'marker':self.marker,
"color":self.color, "textsize":str(self.textsize), "show text": str(int(self.show_text))}
return ret
def set_property(self, name, str_value):
try:
if name == "marker size":
self.marker_size = int(str_value)
elif name == "color":
self.color = str_value
elif name == 'name':
self.name = str_value
elif name == 'marker':
self.marker = str_value
elif name == 'textsize':
self.textsize = int(str_value)
elif name == 'show text':
self.show_text = bool(int(str_value))
else:
print("do not have property:", name)
except:
print("error in setting property", name, str_value)
pass
def clear(self):
pass
def use_ancillary_axes(self):
return False
def toggle_on(self):
self.display = True
def toggle_off(self):
self.display = False
self.clear()
def plot(self, AltVsT_axes, AltVsEw_axes, NsVsEw_axes, NsVsAlt_axes, ancillary_axes, coordinate_system):
self.X_TMP[:] = self.X_array
self.Y_TMP[:] = self.Y_array
self.Z_TMP[:] = 0.0
self.T_TMP[:] = 0.0
if self.transform_memory is None:
self.transform_memory = coordinate_system.make_workingMemory( len(self.X_array) )
coordinate_system.set_workingMemory( self.transform_memory )
## transform
plotX, plotY, plotZ, plotZt, plotT = coordinate_system.transform(
self.X_TMP, self.Y_TMP, self.Z_TMP, self.T_TMP,
make_copy=False)
if (not self.display) or len(plotX)==0:
print(self.name, "not display.")
return
print(self.name, "plotting")
try:
self.NsVsEw_paths = NsVsEw_axes.scatter(x=plotX, y=plotY, c=self.color, marker=self.marker, s=self.marker_size)
if self.show_text:
for sname, X, Y in zip(self.station_names, plotX, plotY):
NsVsEw_axes.annotate(sname, (X,Y), size=self.textsize)
except Exception as e: print(e)
class linearSpline_DataSet(DataSet_Type):
### NOTE: this whole thing only works in plot-coordinates. Thus, only functional if consistantly used with cartesian system
def __init__(self, name, color, linewidth=None, initial_points=None):
self.name = name
self.display = True
self.color = color
self.LW = linewidth
if | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: oracle_asmdg
short_description: Manage diskgroups in an Oracle database
description:
- Manage diskgroups in an Oracle database
version_added: "2.1.0.0"
options:
name:
description:
- The name of the diskgroup
required: true
default: None
aliases: ['diskgroup','dg']
state:
description:
- The intended state of the diskgroup. 'status' will just show the status of the diskgroup
default: present
choices: ['present','absent','status']
disks:
description:
- A list of disks that should be part of the diskgroup. Only the listed disks will be part of the DG, meaning if the disk is removed from the list it will also be removed from the DG
default: None
redundancy:
description:
- The redundancy configuration for the diskgroup, It does not yet support putting disks in specific failure groups
default: external
choices: ['external','normal','high']
attribute_name:
description:
- The attribute name (e.g compatible.rdbms)
default: None
aliases: ['an']
attribute_value:
description:
- The attribute value (e.g 172.16.31.10)
default: None
aliases: ['av']
username:
description:
- The ASM username
required: false
default: sys
aliases: ['un']
password:
description:
- The password for the ASM user
required: false
default: None
aliases: ['pw']
service_name:
description:
- The diskgroup_name to connect to the database if using dbms_diskgroup.
required: false
default: +ASM
aliases: ['sn']
hostname:
description:
- The host of the database if using dbms_diskgroup
required: false
default: localhost
aliases: ['host']
port:
description:
- The listener port to connect to the database if using dbms_diskgroup
required: false
default: 1521
oracle_home:
description:
- The GI ORACLE_HOME
required: false
default: None
aliases: ['oh']
notes:
- cx_Oracle needs to be installed
requirements: [ "cx_Oracle" ]
author: <NAME>, <EMAIL>, @oravirt
'''
EXAMPLES = '''
# Create a diskgroup
oracle_asmdg:
name: MYDG1
disks:
- ORCL:MYDG1
- ORCL:MYDG2
attribute_name: compatible.asm
attribute_value: 172.16.31.10
redundancy: external
state: present
un: sys
pw: oracle123
sn: '+ASM'
host: localhost
oh: /u01/app/oracle/192.168.3.11/grid
oracle_asmdg:
name: DATA
disks:
- /dev/oracle/data1
- /dev/oracle/data2
attributes:
- {name: compatible.asm, value: 12.2.0.1.0 }
- {name: compatible.rdbms, value: 12.2.0.1.0 }
redundancy: external
state: present
un: sys
pw: oracle123
sn: '+ASM'
host: localhost
oh: /u01/app/oracle/192.168.127.12/grid
'''
import os
try:
import cx_Oracle
except ImportError:
cx_oracle_exists = False
else:
cx_oracle_exists = True
# Check if the diskgroup exists
def check_diskgroup_exists(cursor, module, msg, name):
sql = 'select count(*) from gv$asm_diskgroup where lower (name) = \'%s\'' % (name.lower())
result = execute_sql_get(module, msg, cursor, sql)
#msg = 'Normal Result is: %s, [0] is: %s, [0][0] is: %s, len is: %s, type is: %s' % (result,result[0],result[0][0],len(result), type(result))
#module.exit_json(msg=msg)
if result[0][0] > 0:
return True
else:
return False
def create_diskgroup(cursor, module, msg, oracle_home, name, disks, redundancy, attribute_name, attribute_value):
add_attr = False
if not any(x == 'None' for x in attribute_name):
add_attr = True
if not any(x == None for x in attribute_name):
add_attr = True
if add_attr:
attributes =','.join(['\''+str(n)+'\'' +'='+'\''+ str(v) + '\'' for n,v in zip(attribute_name,attribute_value)])
disklist = "','".join(disks)
sql = 'create diskgroup %s ' % (name)
sql += '%s redundancy ' % (redundancy)
sql += 'disk \'%s\' ' % (disklist)
if add_attr:
sql += ' attribute %s' % (attributes.lower())
if execute_sql(module, msg, cursor, sql):
if rac:
command = '%s/bin/srvctl start diskgroup -g %s' % (oracle_home, name.lower())
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
if 'CRS-5702' in stdout: #'Edge-case', where there is only one instance in the cluster. The diskgroup is already running after create statement so this command errors
return True
else:
msg = 'Error, couldn\'t mount the dg on all nodes. stdout: %s, stderr: %s, command is %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return True
else:
return True
else:
msg = 'error in exec sql create'
module.fail_json(msg=msg, changed=False)
return False
def remove_diskgroup(cursor, module, msg, oracle_home, name):
mountsql = 'alter diskgroup %s mount' % (name.lower())
dropsql = 'drop diskgroup %s' % (name.lower())
# If in a rac config, we need to unmount the dg on all nodes, then mount
if rac:
command = '%s/bin/srvctl stop diskgroup -g %s' % (oracle_home, name.lower())
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error, couldn\'t unmount the dg. stdout: %s, stderr: %s, command is %s' % (stdout, stderr, command)
return False
if execute_sql(module, msg, cursor, mountsql):
if execute_sql(module, msg, cursor, dropsql):
return True
else:
return False
else:
return False
else:
if execute_sql(module, msg, cursor, dropsql):
return True
else:
return False
def ensure_diskgroup_state(cursor, module, msg, name, state, disks, attribute_name, attribute_value):
total_sql = []
#disk_sql = []
disk_sql = 'alter diskgroup %s ' % (name.upper())
change_attr = False
change_disk = False
get_ro_attr_sql = 'select distinct(name) from v$asm_attribute where read_only = \'Y\''
read_only_attributes = []
# Deal with attribute differences
if (attribute_name and attribute_value):
# Get all read only attributes
get_ro_attr = execute_sql_get(module, msg, cursor, get_ro_attr_sql)
for a in get_ro_attr:
read_only_attributes.append(a[0])
# Make sure properties are lower case
attribute_name = [x.lower() for x in attribute_name]
attribute_value = [y.lower() for y in attribute_value]
wanted_attributes = zip(attribute_name,attribute_value)
# Make sure we don't try to modify read only attributes. Removing them from the wanted_attributes list
for a in wanted_attributes:
if a[0] in read_only_attributes:
wanted_attributes.remove(a)
# Check the current attributes
attribute_names_ =','.join(['\''+str(n[0])+'\'' for n in (wanted_attributes)])
# Only get current attributes if we still have attributes in the wanted list
if len(attribute_names_) != 0:
current_properties = get_current_properties (cursor, module, msg, name, attribute_names_)
# Convert to dict and compare current with wanted
if cmp(dict(current_properties),dict(wanted_attributes)) is not 0:
change_attr = True
for i in wanted_attributes:
total_sql.append("alter diskgroup %s set attribute '%s'='%s'" % (name, i[0], i[1]))
list_current_name = []
list_current_path = []
list_wanted = [x.upper() if ':' in x else x for x in disks]
list_current = get_current_disks(cursor, module, msg, name)
for p,n in list_current:
list_current_name.append(n)
list_current_path.append(p)
# List of disks to add
list_add=set(list_wanted).difference(list_current_path)
# List of disks to remove
list_remove=set(list_current_path).difference(list_wanted)
# Pick out the v$asm_disk.name from the diskgroup
remove_disks = [a[1] for a in list_current if a[0] in list_remove ]
add_disk = "','".join(list_add)
remove_disk= "','".join(remove_disks)
if sorted(list_current_path) == sorted(list_wanted) and change_attr == False:
msg = "Diskgroup %s is in the intended state" % (name)
module.exit_json(msg=msg, changed=False)
if len(list_add)>= 1:
change_disk = True
disk_sql += ' add disk '
disk_sql += "'%s'" % add_disk
if len(list_remove) >= 1:
#disk_sql = 'alter diskgroup %s ' % (name.upper())
change_disk = True
disk_sql += ' drop disk '
disk_sql += "'%s'" % remove_disk
if change_disk:
total_sql.append(disk_sql)
if ensure_diskgroup_state_sql(module,msg,cursor,total_sql):
msg = 'Diskgroup %s has been put in the intended state' % (name)
module.exit_json(msg=msg, changed=True)
else:
return False
def ensure_diskgroup_state_sql(module,msg,cursor,total_sql):
for a in total_sql:
execute_sql(module, msg, cursor, a)
return True
def get_current_disks(cursor, module, msg, name):
sql = 'select d.path,d.name from v$asm_disk d, v$asm_diskgroup dg '
sql += 'where dg.group_number = d.group_number '
sql += 'and upper(dg.name) = \'%s\'' % (name.upper())
result = execute_sql_get(module, msg, cursor, sql)
return result
def get_current_properties(cursor, module, msg, name,attribute_names_):
sql = 'select lower(a.name),lower(a.value) from v$asm_attribute a, v$asm_diskgroup dg '
sql += 'where dg.group_number = a.group_number '
sql += 'and upper(dg.name) = \'%s\' ' % (name.upper())
sql += 'and a.name in (%s) ' % (attribute_names_.lower())
result = execute_sql_get(module, msg, cursor, sql)
return result
def execute_sql_get(module, msg, cursor, sql):
#module.exit_json(msg="In execute_sql_get", changed=False)
try:
cursor.execute(sql)
result = (cursor.fetchall())
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Something went wrong while executing sql_get - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return False
return result
def execute_sql(module, msg, cursor, sql):
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Something went wrong while executing sql - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return False
return True
def main():
msg = ['']
cursor = None
mode = 'sysasm'
global rac
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases = ['diskgroup','dg']),
disks = dict(required=False, type='list'),
redundancy = dict(default="external", choices = ["external","normal","high","flex"]),
attribute_name = dict(required=False, type='list', aliases=['an']),
attribute_value = dict(required=False, type='list', aliases=['av']),
state = dict(default="present", choices = ["present", "absent", "status"]),
user = dict(required=False, aliases = ['un','username']),
password = dict(required=False, no_log=True, aliases = ['pw']),
hostname = dict(required=False, default = 'localhost', aliases = ['host']),
port = dict(required=False, default = 1521),
service_name = dict(required=False, default = '+ASM', aliases = ['sn']),
oracle_home = dict(required=False, aliases = ['oh']),
),
)
name = module.params["name"]
disks = module.params["disks"]
redundancy = module.params["redundancy"]
attribute_name = module.params["attribute_name"]
attribute_value = module.params["attribute_value"]
state = module.params["state"]
user = module.params["user"]
password = module.params["password"]
hostname = module.params["hostname"]
port = module.params["port"]
service_name = module.params["service_name"]
oracle_home = module.params["oracle_home"]
if not cx_oracle_exists:
msg = "The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick. If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set"
module.fail_json(msg=msg)
wallet_connect = '/@%s' % service_name
try:
if (not user and not password) : # If neither user or password is supplied, the use of an | |
for scope in cmds_formatted:
permissions = {}
new_cmds = cmds_formatted[scope]
existing_cmds = await self.req.get_all_commands(guild_id=scope)
existing_by_name = {}
to_send = []
changed = False
for cmd in existing_cmds:
existing_by_name[cmd["name"]] = model.CommandData(**cmd)
if len(new_cmds) != len(existing_cmds):
changed = True
for command in new_cmds:
cmd_name = command["name"]
permissions[cmd_name] = command.pop("permissions")
if cmd_name in existing_by_name:
cmd_data = model.CommandData(**command)
existing_cmd = existing_by_name[cmd_name]
if cmd_data != existing_cmd:
changed = True
to_send.append(command)
else:
command_with_id = command
command_with_id["id"] = existing_cmd.id
to_send.append(command_with_id)
else:
changed = True
to_send.append(command)
if changed:
self.logger.debug(
f"Detected changes on {scope if scope is not None else 'global'}, updating them"
)
try:
existing_cmds = await self.req.put_slash_commands(
slash_commands=to_send, guild_id=scope
)
except discord.HTTPException as ex:
if ex.status == 400:
# catch bad requests
cmd_nums = set(
re.findall(r"In\s(\d).", ex.args[0])
) # find all discords references to commands
error_string = ex.args[0]
for num in cmd_nums:
error_command = to_send[int(num)]
error_string = error_string.replace(
f"In {num}",
f"'{error_command.get('name')}'",
)
ex.args = (error_string,)
raise ex
else:
self.logger.debug(
f"Detected no changes on {scope if scope is not None else 'global'}, skipping"
)
id_name_map = {}
for cmd in existing_cmds:
id_name_map[cmd["name"]] = cmd["id"]
for cmd_name in permissions:
cmd_permissions = permissions[cmd_name]
cmd_id = id_name_map[cmd_name]
for applicable_guild in cmd_permissions:
if applicable_guild not in permissions_map:
permissions_map[applicable_guild] = []
permission = {
"id": cmd_id,
"guild_id": applicable_guild,
"permissions": cmd_permissions[applicable_guild],
}
permissions_map[applicable_guild].append(permission)
self.logger.info("Syncing permissions...")
self.logger.debug(f"Commands permission data are {permissions_map}")
for scope in permissions_map:
existing_perms = await self.req.get_all_guild_commands_permissions(scope)
new_perms = permissions_map[scope]
changed = False
if len(existing_perms) != len(new_perms):
changed = True
else:
existing_perms_model = {}
for existing_perm in existing_perms:
existing_perms_model[existing_perm["id"]] = model.GuildPermissionsData(
**existing_perm
)
for new_perm in new_perms:
if new_perm["id"] not in existing_perms_model:
changed = True
break
if existing_perms_model[new_perm["id"]] != model.GuildPermissionsData(
**new_perm
):
changed = True
break
if changed:
self.logger.debug(f"Detected permissions changes on {scope}, updating them")
await self.req.update_guild_commands_permissions(scope, new_perms)
else:
self.logger.debug(f"Detected no permissions changes on {scope}, skipping")
if delete_from_unused_guilds:
self.logger.info("Deleting unused guild commands...")
other_guilds = [
guild.id for guild in self._discord.guilds if guild.id not in cmds["guild"]
]
# This is an extremly bad way to do this, because slash cmds can be in guilds the bot isn't in
# But it's the only way until discord makes an endpoint to request all the guild with cmds registered.
for guild in other_guilds:
with suppress(discord.Forbidden):
existing = await self.req.get_all_commands(guild_id=guild)
if len(existing) != 0:
self.logger.debug(f"Deleting commands from {guild}")
await self.req.put_slash_commands(slash_commands=[], guild_id=guild)
if delete_perms_from_unused_guilds:
self.logger.info("Deleting unused guild permissions...")
other_guilds = [
guild.id for guild in self._discord.guilds if guild.id not in permissions_map.keys()
]
for guild in other_guilds:
with suppress(discord.Forbidden):
self.logger.debug(f"Deleting permissions from {guild}")
existing_perms = await self.req.get_all_guild_commands_permissions(guild)
if len(existing_perms) != 0:
await self.req.update_guild_commands_permissions(guild, [])
self.logger.info("Completed syncing all commands!")
def add_slash_command(
self,
cmd,
name: str = None,
description: str = None,
guild_ids: typing.List[int] = None,
options: list = None,
default_permission: bool = True,
permissions: typing.Dict[int, list] = None,
connector: dict = None,
has_subcommands: bool = False,
):
"""
Registers slash command to SlashCommand.
.. warning::
Just using this won't register slash command to Discord API.
To register it, check :meth:`.utils.manage_commands.add_slash_command` or simply enable `sync_commands`.
:param cmd: Command Coroutine.
:type cmd: Coroutine
:param name: Name of the slash command. Default name of the coroutine.
:type name: str
:param description: Description of the slash command. Defaults to command docstring or ``None``.
:type description: str
:param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.
:type guild_ids: List[int]
:param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.
:type options: list
:param default_permission: Sets if users have permission to run slash command by default, when no permissions are set. Default ``True``.
:type default_permission: bool
:param permissions: Dictionary of permissions of the slash command. Key being target guild_id and value being a list of permissions to apply. Default ``None``.
:type permissions: dict
:param connector: Kwargs connector for the command. Default ``None``.
:type connector: dict
:param has_subcommands: Whether it has subcommand. Default ``False``.
:type has_subcommands: bool
"""
name = name or cmd.__name__
name = name.lower()
guild_ids = guild_ids if guild_ids else []
if not all(isinstance(item, int) for item in guild_ids):
raise error.IncorrectGuildIDType(
f"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name}' will be deactivated and broken until fixed."
)
if name in self.commands:
tgt = self.commands[name]
if not tgt.has_subcommands:
raise error.DuplicateCommand(name)
has_subcommands = tgt.has_subcommands
for x in tgt.allowed_guild_ids:
if x not in guild_ids:
guild_ids.append(x)
description = description or getdoc(cmd)
if options is None:
options = manage_commands.generate_options(cmd, description, connector)
_cmd = {
"func": cmd,
"description": description,
"guild_ids": guild_ids,
"api_options": options,
"default_permission": default_permission,
"api_permissions": permissions,
"connector": connector or {},
"has_subcommands": has_subcommands,
}
obj = model.BaseCommandObject(name, _cmd)
self.commands[name] = obj
self.logger.debug(f"Added command `{name}`")
return obj
def add_subcommand(
self,
cmd,
base,
subcommand_group=None,
name=None,
description: str = None,
base_description: str = None,
base_default_permission: bool = True,
base_permissions: typing.Dict[int, list] = None,
subcommand_group_description: str = None,
guild_ids: typing.List[int] = None,
options: list = None,
connector: dict = None,
):
"""
Registers subcommand to SlashCommand.
:param cmd: Subcommand Coroutine.
:type cmd: Coroutine
:param base: Name of the base command.
:type base: str
:param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.
:type subcommand_group: str
:param name: Name of the subcommand. Default name of the coroutine.
:type name: str
:param description: Description of the subcommand. Defaults to command docstring or ``None``.
:type description: str
:param base_description: Description of the base command. Default ``None``.
:type base_description: str
:param default_permission: Sets if users have permission to run base command by default, when no permissions are set. Default ``True``.
:type default_permission: bool
:param base_permissions: Dictionary of permissions of the slash command. Key being target guild_id and value being a list of permissions to apply. Default ``None``.
:type base_permissions: dict
:param subcommand_group_description: Description of the subcommand_group. Default ``None``.
:type subcommand_group_description: str
:param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.
:type guild_ids: List[int]
:param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.
:type options: list
:param connector: Kwargs connector for the command. Default ``None``.
:type connector: dict
"""
base = base.lower()
subcommand_group = subcommand_group.lower() if subcommand_group else subcommand_group
name = name or cmd.__name__
name = name.lower()
description = description or getdoc(cmd)
guild_ids = guild_ids if guild_ids else []
if not all(isinstance(item, int) for item in guild_ids):
raise error.IncorrectGuildIDType(
f"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name}' will be deactivated and broken until fixed."
)
if base in self.commands:
for x in guild_ids:
if x not in self.commands[base].allowed_guild_ids:
self.commands[base].allowed_guild_ids.append(x)
if options is None:
options = manage_commands.generate_options(cmd, description, connector)
_cmd = {
"func": None,
"description": base_description,
"guild_ids": guild_ids.copy(),
"api_options": [],
"default_permission": base_default_permission,
"api_permissions": base_permissions,
"connector": {},
"has_subcommands": True,
}
_sub = {
"func": cmd,
"name": name,
"description": description,
"base_desc": base_description,
"sub_group_desc": subcommand_group_description,
"guild_ids": guild_ids,
"api_options": options,
"connector": connector or {},
}
if base not in self.commands:
self.commands[base] = model.BaseCommandObject(base, _cmd)
else:
base_command = self.commands[base]
base_command.has_subcommands = True
if base_permissions:
for applicable_guild in base_permissions:
if applicable_guild not in base_command.permissions:
base_command.permissions[applicable_guild] = []
base_command.permissions[applicable_guild].extend(
base_permissions[applicable_guild]
)
if base_command.description:
_cmd["description"] = base_command.description
if base not in self.subcommands:
self.subcommands[base] = {}
if subcommand_group:
if subcommand_group not in self.subcommands[base]:
self.subcommands[base][subcommand_group] = {}
if name in self.subcommands[base][subcommand_group]:
raise error.DuplicateCommand(f"{base} {subcommand_group} {name}")
obj = model.SubcommandObject(_sub, base, name, subcommand_group)
self.subcommands[base][subcommand_group][name] = obj
else:
if name in self.subcommands[base]:
raise error.DuplicateCommand(f"{base} {name}")
obj = model.SubcommandObject(_sub, base, name)
self.subcommands[base][name] = obj
self.logger.debug(
f"Added subcommand `{base} {subcommand_group or ''} {name or cmd.__name__}`"
)
return obj
def slash(
self,
*,
name: str = None,
description: str = None,
guild_ids: typing.List[int] = None,
options: typing.List[dict] = None,
default_permission: bool = True,
permissions: dict = | |
= parser.get_metadata('ascii')
return parser
def parse(self):
line_no = 0
section_line_no = 0
for line in self.voxfile:
section_line_no += 1
line_no += 1
debug().current_line_num = line_no
line = line.strip()
if line.startswith('//'):
continue
if line.startswith('#'):
token_state = self.State.from_token(line.split('#')[1])
if token_state is None:
continue
if type(token_state) is tuple:
self.state = token_state[0]
self.state_track = int(token_state[1])
else:
self.state = token_state
section_line_no = 0
elif line.startswith('define\t'):
splitted = line.split('\t')
if len(splitted) != 3:
debug().record(Debug.Level.WARNING, 'fx_define', f'define line "{line}" does not have 3 operands')
continue
self.vox_defines[splitted[1]] = int(splitted[2])
if int(splitted[2]) != 0:
self.effect_defines[int(splitted[2])] = KshEffectDefine.from_pre_v4_vox_sound_id(int(splitted[2]))
elif self.state is not None:
self.process_state(line, section_line_no)
self.finalized = True
def process_state(self, line, section_line_num):
splitted = line.split('\t')
if line == '':
return
now = Timing.from_time_str(splitted[0])
if now is not None:
self.timing_point(now)
if self.state == self.State.FORMAT_VERSION:
self.vox_version = int(line)
elif self.state == self.State.BEAT_INFO:
timesig = TimeSignature(int(splitted[1]), int(splitted[2]))
self.events[now][EventKind.TIMESIG] = timesig
elif self.state == self.State.BPM:
now = Timing(1, 1, 0)
self.timing_point(now)
try:
self.events[now][EventKind.BPM] = float(line)
except ValueError:
# Jomanda adv seems to have the string "BAROFF" at one point.
debug().record_last_exception(Debug.Level.ABNORMALITY, tag='bpm_parse')
elif self.state == self.State.BPM_INFO:
if splitted[2].endswith('-'):
# There is a stop.
self.stop_point = StopEvent()
self.stop_point.moment = now
last_timesig = None
for t, e in self.time_iter(Timing(1, 1, 0), self.events[Timing(1, 1, 0)][EventKind.TIMESIG]):
if t.measure > MAX_MEASURES:
# When parsing a Stop event, the end of the chart may not yet be parsed, so we make an
# assumption for how long a chart could possibly be.
break
if e is not None and EventKind.TIMESIG in e:
last_timesig = e[EventKind.TIMESIG]
if e is not None and t == now:
self.stop_point.timesig = last_timesig
if self.stop_point.timesig is None:
raise VoxParseError('bpm_info', 'unable to find end for stop event')
else:
if self.stop_point is not None:
self.events[self.stop_point.moment][EventKind.STOP] = now.diff(
self.stop_point.moment, self.stop_point.timesig)
self.stop_point = None
if splitted[2] != '4' and splitted[2] != '4-':
debug().record(Debug.Level.ABNORMALITY, 'bpm_info', f'non-4 beat division in bpm info: {splitted[2]}')
self.events[now][EventKind.BPM] = float(splitted[1])
elif self.state == self.State.TILT_INFO:
try:
self.events[now][EventKind.TILTMODE] = TiltMode.from_vox_id(int(splitted[1]))
except ValueError:
debug().record_last_exception(level=Debug.Level.WARNING)
elif self.state == self.State.END_POSITION:
self.end = now
elif self.state == self.State.SOUND_ID:
# The `define` handler takes care of this outside of this loop.
debug().record(Debug.Level.WARNING,
'vox_parse',
f'({self.state}) line other than a #define was encountered in SOUND ID')
elif self.state == self.State.TAB_EFFECT:
# TODO Tab effects
if TabEffectInfo.line_is_abnormal(section_line_num, line):
debug().record(Debug.Level.ABNORMALITY, 'tab_effect', f'tab effect info abnormal: {line}')
elif self.state == self.State.FXBUTTON_EFFECT:
if self.vox_version < 6:
# Below v6, the defines come one after another with no spacing between other than the newline.
try:
self.effect_defines[section_line_num - 1] = KshEffectDefine.from_effect_info_line(line)
except ValueError:
self.effect_defines[section_line_num - 1] = KshEffectDefine.default_effect()
debug().record_last_exception(tag='fx_load')
else:
if (section_line_num - 1) % 3 < 2:
# The < 2 condition will allow the second line to override the first.
if line.isspace():
debug().record(Debug.Level.WARNING, 'fx_load', 'fx effect info line is blank')
elif splitted[0] != '0,':
index = int(section_line_num / 3)
try:
self.effect_defines[index] = KshEffectDefine.from_effect_info_line(line)
except ValueError:
self.effect_defines[index] = KshEffectDefine.default_effect()
debug().record_last_exception(level=Debug.Level.WARNING, tag='fx_load')
elif self.state == self.State.TAB_PARAM_ASSIGN:
if TabParamAssignInfo.line_is_abnormal(line):
debug().record(Debug.Level.ABNORMALITY, 'tab_param_assign', f'tab param assign info abnormal: {line}')
elif self.state == self.State.SPCONTROLLER:
try:
param = SpcParam.from_vox_name(splitted[1])
except ValueError:
debug().record_last_exception(tag='spcontroller_load')
return
if param is not None:
try:
self.events[now][(EventKind.SPCONTROLLER, param)] = CameraNode(
float(splitted[4]), float(splitted[5]), int(splitted[3]))
except ValueError:
# Just record it as an abnormality.
pass
if SpcParam.line_is_abnormal(param, splitted):
debug().record(Debug.Level.ABNORMALITY, 'spcontroller_load', 'spcontroller line is abnormal')
elif self.state == self.state.TRACK:
if self.state_track == 1 or self.state_track == 8:
laser_node = LaserNode.Builder()
laser_node.side = LaserSide.LEFT if self.state_track == 1 else LaserSide.RIGHT
laser_node.position = int(splitted[1])
laser_node.node_type = LaserCont(int(splitted[2]))
try:
laser_node.roll_kind = next(iter([r for r in RollKind if r.value == int(splitted[3])]))
except StopIteration:
if splitted[3] != '0':
debug().record(Debug.Level.ABNORMALITY, 'roll_parse', f'roll type: {splitted[3]}')
if len(splitted) > 4:
try:
laser_node.filter = KshFilter.from_vox_filter_id(int(splitted[4]))
except ValueError:
debug().record_last_exception(tag='laser_load')
if len(splitted) > 5:
laser_node.range = int(splitted[5])
laser_node = LaserNode(laser_node)
# Check if it's a slam.
if (EventKind.TRACK, self.state_track) in self.events[now]:
self.new_laser = False
else:
self.new_laser = True
if not (EventKind.TRACK, self.state_track) in self.events[now]:
self.events[now][(EventKind.TRACK, self.state_track)] = laser_node
else:
slam_start = self.events[now][(EventKind.TRACK, self.state_track)]
if type(slam_start) is LaserSlam:
# A few charts have three laser nodes at the same time point for some reason.
slam_start = slam_start.end
try:
slam = LaserSlam(slam_start, laser_node)
except ValueError:
debug().record_last_exception(Debug.Level.WARNING, tag='slam_parse')
return
self.events[now][(EventKind.TRACK, self.state_track)] = slam
else:
try:
button = Button.from_track_num(self.state_track)
except ValueError:
debug().record_last_exception(tag='button_load')
return
if button is None:
# Ignore track 9 buttons.
return
fx_data = None
duration = int(splitted[1])
if button.is_fx():
# Process effect assignment.
if duration > 0:
# Fx hold.
if self.vox_version < 4:
fx_data = int(splitted[3]) if splitted[3].isdigit() else int(self.vox_defines[splitted[3]])
else:
if 2 <= int(splitted[2]) <= 13:
# It's a regular effect.
fx_data = int(splitted[2]) - 2
elif int(splitted[2]) == 254:
debug().record(Debug.Level.WARNING,
'button_fx',
'reverb effect is unimplemented, using fallback')
fx_data = -1
else:
debug().record(Debug.Level.WARNING,
'button_fx',
'out of bounds fx index for FX hold, using fallback')
fx_data = -1
else:
# Fx chip, check for sound.
if self.vox_version >= 9:
sound_id = int(splitted[2])
if sound_id != -1 and sound_id != 255 and (sound_id >= FX_CHIP_SOUND_COUNT or sound_id < 0):
debug().record(Debug.Level.WARNING,
'chip_sound_parse',
f'unhandled chip sound id {sound_id}')
elif 1 <= sound_id < FX_CHIP_SOUND_COUNT:
fx_data = sound_id
self.required_chip_sounds.add(sound_id)
self.events[now][(EventKind.TRACK, self.state_track)] = ButtonPress(button, int(splitted[1]), fx_data)
def write_to_ksh(self, jacket_idx=None, using_difficulty_audio=None, file=sys.stdout):
global args
track_basename = f'track_{self.difficulty.to_abbreviation()}{AUDIO_EXTENSION}' if using_difficulty_audio else \
f'track{AUDIO_EXTENSION}'
jacket_basename = '' if jacket_idx is None else f'jacket_{jacket_idx}.png'
track_bg = Background.from_vox_id(int(self.get_metadata('bg_no')))
header = f'''// Source: {self.source_file_name}
// Created by vox2ksh-{os.popen('git rev-parse HEAD').read()[:8].strip()}.
title={self.get_metadata('title_name')}
artist={self.get_metadata('artist_name')}
effect={self.get_metadata('effected_by', True)}
sorttitle={self.get_metadata('title_yomigana')}
sortartist={self.get_metadata('artist_yomigana')}
jacket={jacket_basename}
illustrator={self.get_metadata('illustrator', True)}
difficulty={self.difficulty.to_ksh_name()}
level={self.get_metadata('difnum', True)}
t={self.bpm_string()}
m={track_basename}
mvol={self.get_metadata('volume')}
o=0
bg={track_bg}
layer={track_bg}
po={int(config['Audio']['hidden_preview_position']) * 1000}
plength=11000
pfiltergain={KSH_DEFAULT_FILTER_GAIN}
filtertype=peak
chokkakuautovol=0
chokkakuvol={KSH_DEFAULT_SLAM_VOL}
ver=167'''
print(header, file=file)
print('--', file=file)
class SpControllerCountdown(dataobject):
event: CameraNode
time_left: int
# Below begins the main printing loop.
# We iterate through each tick of the song and print a KSH line. If there are events, we put stuff on that line.
# The currently active BT holds.
holds = {}
# The currently active SpController nodes.
ongoing_spcontroller_events = {p: None for p in SpcParam}
# Whether there is an ongoing laser on either side.
lasers = {s: None for s in LaserSide}
slam_status = {}
last_laser_timing = {s: None for s in LaserSide}
last_filter = KshFilter.PEAK
current_timesig = self.events[Timing(1, 1, 0)][EventKind.TIMESIG]
debug().current_line_num = len(header.split('\n')) + 1
measure_iter = range(self.end.measure)
for m in measure_iter:
measure = m + 1
now = Timing(measure, 1, 0)
# Laser range resets every measure in ksh.
laser_range = {LaserSide.LEFT: 1, LaserSide.RIGHT: 1}
if now in self.events and EventKind.TIMESIG in self.events[now]:
current_timesig = self.events[now][EventKind.TIMESIG]
print(f'beat={current_timesig.top}/{current_timesig.bottom}', file=file)
for b in range(current_timesig.top):
# Vox beats are also 1-indexed.
beat = b + 1
print(f'// #{measure},{beat}', file=file)
for o in range(int(float(TICKS_PER_BEAT) * (4 / current_timesig.bottom))):
# However, vox offsets are 0-indexed.
now = Timing(measure, beat, o)
buffer = KshLineBuf()
if now in self.events:
for kind, event in self.events[now].items():
if kind == EventKind.TIMESIG and (beat != 1 or o != 0):
raise KshConvertError('time signature change in the middle of a measure')
elif kind == EventKind.BPM:
event: float
buffer.meta.append(f't={str(event).rstrip("0").rstrip(".").strip()}')
elif kind == EventKind.STOP:
event: int
buffer.meta.append(f'stop={event}')
elif type(kind) is tuple and kind[0] == EventKind.SPCONTROLLER:
event: CameraNode
cam_param: SpcParam = kind[1]
if cam_param.to_ksh_value() is not None:
if ongoing_spcontroller_events[cam_param] is not None and ongoing_spcontroller_events[cam_param].time_left != 0:
debug().record(Debug.Level.WARNING, 'spnode_output', f'spcontroller node at {now} interrupts another of same kind ({cam_param})')
ongoing_spcontroller_events[cam_param] = SpControllerCountdown(event=event, time_left=event.duration)
buffer.meta.append(f'{cam_param.to_ksh_name()}={cam_param.to_ksh_value(event.start_param)}')
elif cam_param.is_state():
buffer.meta.append(f'{cam_param.to_ksh_name()}={event.duration}')
elif kind == EventKind.TILTMODE:
event: TiltMode
buffer.meta.append(f'tilt={event.to_ksh_name()}')
elif type(kind) is tuple and kind[0] == EventKind.TRACK:
if kind[1] == 1 or kind[1] == 8:
# Laser
if type(event) is LaserSlam:
event: LaserSlam
# TODO Laser countdown for different timesigs
laser = event.start
if event.side in map(lambda x: x.side(), slam_status):
raise KshConvertError('new laser node spawn while trying to resolve slam')
slam_status[event] = SLAM_TICKS
if laser.roll_kind is not None:
if buffer.spin != '':
debug().record(Debug.Level.WARNING, 'ksh_laser', 'spin on both lasers')
if laser.roll_kind.value <= 3:
buffer.spin = '@'
if event.direction() == LaserSlam.Direction.LEFT:
buffer.spin += '('
else:
buffer.spin += ')'
# My assumption right now is that the MEASURE kind will always take one
# measure's worth of ticks. Likewise for the other ones.
if laser.roll_kind == RollKind.MEASURE:
buffer.spin += str(int(current_timesig.top * current_timesig.ticks_per_beat() * 0.85))
elif laser.roll_kind == RollKind.HALF_MEASURE:
buffer.spin += str(int((current_timesig.top * current_timesig.ticks_per_beat()) / 2.95))
elif laser.roll_kind == | |
<reponame>gmr/dynamodb-backup
#!/usr/bin/env python3
"""
Backup DynamoDB to CSV or Avro
"""
import argparse
import base64
import csv
import datetime
import gzip
import io
import json
import logging
import multiprocessing
import pathlib
import queue
import sys
import tempfile
import time
import typing
from urllib import parse
import boto3
from botocore import exceptions
import coloredlogs
import fastavro
from fastavro import validation
import requests
try:
import snappy
except ImportError:
snappy = None
from dynamodb_backup import version
LOGGER = logging.getLogger(__name__)
LOGGERS = {
'boto3',
'botocore',
'botocore.vendored.requests.packages.urllib3.connectionpool',
'requests',
's3transfer',
'urllib3'}
CODEC_CHOICES = {'none', 'deflate'}
if snappy:
CODEC_CHOICES.add('snappy')
LOGGING_FORMAT = '%(asctime)s %(levelname) -10s %(name) -10s %(message)s'
LOGGING_FIELD_STYLES = {'hostname': {'color': 'magenta'},
'programname': {'color': 'cyan'},
'name': {'color': 'blue'},
'levelname': {'color': 'white', 'bold': True},
'asctime': {'color': 'white'}}
MAX_QUEUE_DEPTH = 100000
class Process(multiprocessing.Process):
"""Child process for paginating from DynamoDB and putting rows in the work
queue.
"""
def __init__(self, group=None, target=None, name=None,
args=None, kwargs=None):
super().__init__(group, target, name, args or (), kwargs or {})
self.cli_args = kwargs['cli_args']
self.error_exit = kwargs['error_exit']
self.finished = kwargs['finished']
self.queued = kwargs['queued']
self.processed = kwargs['processed']
self.segment = kwargs['segment']
self.units = kwargs['unit_count']
self.work_queue = kwargs['work_queue']
def run(self) -> typing.NoReturn:
"""Executes on start, paginate through the table"""
_configure_logging(self.cli_args)
client = boto3.client('dynamodb')
paginator = client.get_paginator('scan')
LOGGER.debug('Starting to page through %s for segment %s of %s',
self.cli_args.table, self.segment + 1,
self.cli_args.processes)
kwargs = {'TableName': self.cli_args.table,
'TotalSegments': self.cli_args.processes,
'ConsistentRead': True,
'PaginationConfig': {'PageSize': 500},
'ReturnConsumedCapacity': 'TOTAL',
'Segment': self.segment}
if self.cli_args.expression:
self.cli_args.expression.seek(0)
kwargs.update(json.load(self.cli_args.expression))
queued = 0
for page in paginator.paginate(**kwargs):
if self.error_exit.is_set():
break
for row in [_unmarshall(i) for i in page.get('Items', [])]:
self.work_queue.put(row)
with self.queued.get_lock():
self.queued.value += 1
queued += 1
if self.error_exit.is_set():
break
with self.units.get_lock():
self.units.value += page['ConsumedCapacity']['CapacityUnits']
while self._queue_size(queued) > 0:
if self.error_exit.is_set():
break
LOGGER.debug('Waiting for %i items in queue',
self._queue_size(queued))
try:
time.sleep(0.25)
except KeyboardInterrupt:
self.error_exit.set()
break
LOGGER.debug('Finished paginating in segment %i', self.segment + 1)
with self.finished.get_lock():
self.finished.value += 1
if self.finished.value == self.cli_args.processes:
LOGGER.debug('Closing the queue')
self.work_queue.close()
LOGGER.debug('Exiting process %i', self.segment + 1)
def _queue_size(self, queued: int) -> int:
try:
return self.work_queue.qsize()
except NotImplementedError:
return queued - self.processed.value
class QueueIterator:
"""Generator to return the records from the DynamoDB table"""
def __init__(self,
error_exit: multiprocessing.Event,
finished: multiprocessing.Value,
processed: multiprocessing.Value,
queued: multiprocessing.Value,
segments: int,
status_interval: int,
units: multiprocessing.Value,
work_queue: multiprocessing.Queue):
self.error_exit = error_exit
self.finished = finished
self.processed = processed
self.queued = queued
self.records = 0
self.segments = segments
self.status_interval = status_interval or 10000
self.units = units
self.work_queue = work_queue
def __iter__(self):
return self
def __next__(self) -> dict:
if self._should_exit:
LOGGER.debug('%i items in queue, %i processed',
self._queue_size, self.processed.value)
LOGGER.debug('Exiting iterator on __next__')
raise StopIteration
while not self._should_exit:
try:
return self._get_record()
except queue.Empty:
LOGGER.debug('Queue get timeout %r', self._should_exit)
if self._should_exit:
LOGGER.debug('Exiting iterator on __next__ queue.Empty')
raise StopIteration
LOGGER.debug('%i items in queue, %i processed',
self._queue_size, self.processed.value)
raise StopIteration
def _get_record(self) -> dict:
record = self.work_queue.get(True, 3)
self.records += 1
if self.records % self.status_interval == 0:
LOGGER.debug(
'Wrote %i records, %.2f units consumed, %i records queued',
self.records, self.units.value, self._queue_size)
return record
@property
def _queue_size(self) -> int:
try:
return self.work_queue.qsize()
except NotImplementedError:
return self.records - self.processed.value
@property
def _should_exit(self) -> bool:
return (self.error_exit.is_set() or
(self.finished.value == self.segments and
self.queued.value == self.processed.value))
class Writer:
EXTENSION = 'txt'
def __init__(self,
args: argparse.Namespace,
iterator: QueueIterator,
error_exit: multiprocessing.Event,
processed: multiprocessing.Value):
self.args = args
self.chunk = 0
self.dirty = False
self.error_exit = error_exit
self.iterator = iterator
self.processed = processed
self.s3uri = self.args.s3.rstrip('/') if self.args.s3 else None
self.file = self._get_file()
def close(self) -> typing.NoReturn:
if self.s3uri and not self.error_exit.is_set() and self.dirty:
LOGGER.debug('Uploading to S3 on close')
self._upload_to_s3()
if not self.file.closed:
self.file.close()
if not self.dirty:
LOGGER.debug('Removing empty final file')
self._file_path.unlink()
@property
def destination(self) -> pathlib.Path:
return self.s3uri if self.s3uri else pathlib.Path(self.args.directory)
@property
def extension(self) -> str:
return self.EXTENSION
@property
def _file_path(self) -> pathlib.Path:
if self.args.chunk:
return self.destination / '{}-{:03d}.{}'.format(
self.args.table, self.chunk, self.extension)
return self.destination / '{}.{}'.format(
self.args.table, self.extension)
def _get_file(self) -> typing.Union[tempfile.NamedTemporaryFile,
typing.BinaryIO]:
self.dirty = False
if self.args.s3:
return tempfile.NamedTemporaryFile('xb+')
return self._file_path.open('wb+')
def _s3_key(self):
parsed = parse.urlparse(self.s3uri, 's3')
filename = '{}.{}'.format(self.args.table, self.extension)
if self.args.chunk:
filename = '{}-{:03d}.{}'.format(
self.args.table, self.chunk, self.extension)
return datetime.date.today().strftime(
'{}/%Y/%m/%d/{}'.format(parsed.path.strip('/'), filename))
def _upload_to_s3(self):
"""Create the backup as a file on AmazonS3"""
self.file.flush()
self.file.seek(0)
parsed = parse.urlparse(self.s3uri, 's3')
key = self._s3_key()
LOGGER.info('Uploading to s3://%s/%s', parsed.netloc, key)
try:
boto3.client('s3').upload_file(self.file.name, parsed.netloc, key)
except exceptions.ClientError as error:
LOGGER.error('Error uploading: %s', error)
self.error_exit.set()
class AvroWriter(Writer):
EXTENSION = 'avro'
def __init__(self,
args: argparse.Namespace,
iterator: QueueIterator,
error_exit: multiprocessing.Event,
processed: multiprocessing.Value,
schema: dict):
super().__init__(args, iterator, error_exit, processed)
self.schema = schema
def write_from_queue(self) -> typing.NoReturn:
if self.args.chunk:
rows = []
for row in self._iterator:
rows.append(row)
if len(rows) == self.args.chunk_size:
fastavro.writer(
self.file, self.schema, rows, self.args.codec)
if self.s3uri:
self._upload_to_s3()
self.file.close()
self.chunk += 1
self.file = self._get_file()
rows = []
# Write the remaining rows
if rows:
self.dirty = True
fastavro.writer(
self.file, self.schema, rows, self.args.codec)
else:
self.dirty = True
fastavro.writer(
self.file, self.schema, self._iterator, self.args.codec)
LOGGER.debug('Exiting writer')
@property
def _iterator(self):
for record in self.iterator:
self.processed.value += 1
if self._validate(record):
yield record
def _validate(self, record: dict) -> bool:
try:
validation.validate(record, self.schema)
except validation.ValidationError as error:
LOGGER.warning('Record failed to validate: %s',
str(error).replace('\n', ''))
LOGGER.debug('Invalid record: %r', record)
return False
return True
class CSVWriter(Writer):
EXTENSION = 'csv'
def __init__(self,
args: argparse.Namespace,
iterator: QueueIterator,
error_exit: multiprocessing.Event,
processed: multiprocessing.Value):
super().__init__(args, iterator, error_exit, processed)
self.fields = self._get_fields()
def close(self) -> typing.NoReturn:
super(CSVWriter, self).close()
if not self.dirty and not self.s3uri:
self._file_path.unlink()
@property
def extension(self) -> str:
if self.args.compress:
return '{}.gz'.format(self.EXTENSION)
return self.EXTENSION
def write_from_queue(self) -> typing.NoReturn:
handle, writer = self._get_writer()
for record in self.iterator:
self.dirty = True
writer.writerow(record)
self.processed.value += 1
if self.args.chunk and \
self.processed.value % self.args.chunk_size == 0:
self.chunk += 1
self._finish_file(handle)
handle, writer = self._get_writer()
self._finish_file(handle, False)
def _finish_file(self, handle: typing.TextIO,
open_new: bool = True) -> typing.NoReturn:
handle.seek(0)
if self.args.compress:
self.file.write(gzip.compress(handle.read().encode('UTF-8')))
else:
self.file.write(handle.read().encode('UTF-8'))
if self.s3uri:
self._upload_to_s3()
self.file.close()
if open_new:
self.file = self._get_file()
def _get_fields(self) -> list:
client = boto3.client('dynamodb')
result = client.scan(TableName=self.args.table, Limit=1,
Select='ALL_ATTRIBUTES')
return sorted(result['Items'][0].keys())
def _get_writer(self) -> (typing.TextIO, csv.DictWriter):
handle = io.StringIO()
writer = csv.DictWriter(handle, self.fields)
if self.args.write_header:
writer.writeheader()
return handle, writer
def main() -> typing.NoReturn:
"""Setup and run the backup."""
args = _parse_cli_args()
_configure_logging(args)
_execute(args)
def _configure_logging(args: argparse.Namespace) -> typing.NoReturn:
level = logging.DEBUG if args.verbose else logging.INFO
coloredlogs.install(
level=level, fmt=LOGGING_FORMAT, field_styles=LOGGING_FIELD_STYLES)
for logger in LOGGERS:
logging.getLogger(logger).setLevel(logging.WARNING)
def _execute(args: argparse.Namespace) -> typing.NoReturn:
"""What executes when your application is run. It will spawn the
specified number of threads, reading in the data from the specified file,
adding them to the shared queue. The worker threads will work the queue,
and the application will exit when the queue is empty.
"""
error_exit = multiprocessing.Event()
finished = multiprocessing.Value('f', 0, lock=True)
processed = multiprocessing.Value('f', 0, lock=True)
queued = multiprocessing.Value('f', 0, lock=True)
units = multiprocessing.Value('f', 0, lock=True)
work_queue = multiprocessing.Queue()
iterator = QueueIterator(
error_exit, finished, processed, queued, args.processes,
args.chunk_size, units, work_queue)
if args.format == 'csv':
writer = CSVWriter(args, iterator, error_exit, processed)
else:
writer = AvroWriter(
args, iterator, error_exit, processed, _schema(args.schema))
processes = []
LOGGER.debug('Creating %i processes', args.processes)
for index in range(0, args.processes):
proc = Process(kwargs={'cli_args': args,
'error_exit': error_exit,
'finished': finished,
'processed': processed,
'queued': queued,
'segment': index,
'unit_count': units,
'work_queue': work_queue})
proc.daemon = True
proc.start()
processes.append(proc)
LOGGER.info('Preparing to backup %s from DynamoDB to %s',
args.table, writer.destination)
start_time = time.time()
try:
writer.write_from_queue()
except KeyboardInterrupt:
LOGGER.error('CTRL-C caught, aborting backup')
error_exit.set()
if error_exit.is_set():
LOGGER.error('Exiting in error')
sys.exit(1)
for proc in processes:
if proc.is_alive():
proc.terminate()
# Wait for processes to close out
while sum(1 if p.is_alive() else 0 for p in processes):
alive = sum(1 if p.is_alive() else 0 for p in processes)
LOGGER.debug('Waiting for %i processes to finish', alive)
try:
time.sleep(0.5)
except KeyboardInterrupt:
error_exit.set()
break
writer.close()
LOGGER.info(
'Queued %i records and wrote %i records, using %i DynamoDB '
'units in %.2f seconds',
queued.value, writer.iterator.records, units.value,
time.time() - start_time)
def _parse_cli_args() -> argparse.Namespace:
"""Construct the CLI argument parser and return the parsed the arguments"""
parser = argparse.ArgumentParser(
description='Backup a DynamoDB table',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
dest = parser.add_mutually_exclusive_group(required=True)
dest.add_argument('-d', '--directory',
help='Write the output to the specified directory')
dest.add_argument('--s3', help='S3 URI to upload output to')
f_parser = parser.add_subparsers(title='Output Format', dest='format')
csv_parser = f_parser.add_parser('csv', help='Output to CSV')
csv_parser.add_argument('-c', '--compress', action='store_true',
help='GZip compress CSV output')
csv_parser.add_argument('-w', '--write-header', action='store_true',
help='Write the CSV header line')
avro_parser = f_parser.add_parser('avro', help='Output to Avro Container')
avro_parser.add_argument('-c', '--codec', choices=CODEC_CHOICES,
default='deflate', help='Compression Codec')
avro_parser.add_argument('-s', '--schema', required=True,
help='Avro Schema to use')
f_parser = parser.add_argument_group('Filter Expression Options')
| |
same length as the paths), fscan will assign a different integration
time to each acquisition point.
-If integ_time is positive, it specifies seconds and if negative, specifies
monitor counts.
IMPORTANT Notes:
-no spaces are allowed in the indepvar string.
-all funcs must evaluate to the same number of points
>>> fscan x=[1,3,5,7,9],y=arange(5) 0.1 motor1 x**2 motor2 sqrt(y*x+3)
>>> fscan x=[1,3,5,7,9],y=arange(5) [0.1,0.2,0.3,0.4,0.5] motor1 x**2 \
motor2 sqrt(y*x+3)
"""
# ['integ_time', Type.String, None, 'Integration time']
hints = {'scan': 'fscan',
'allowsHooks': ('pre-scan', 'pre-move', 'post-move', 'pre-acq',
'post-acq', 'post-step', 'post-scan')}
env = ('ActiveMntGrp',)
param_def = [
['indepvars', Type.String, None, 'Independent Variables'],
['integ_time', Type.String, None, 'Integration time'],
['motor_funcs',
ParamRepeat(['motor', Type.Moveable, None, 'motor'],
['func', Type.String, None, 'curve defining path']),
None, 'List of motor and path curves']
]
def prepare(self, *args, **opts):
if args[0].lower() in ["!", "*", "none", None]:
indepvars = {}
else:
indepvars = SafeEvaluator({'dict': dict}).eval(
'dict(%s)' % args[0]) # create a dict containing the indepvars
self.motors = [item[0] for item in args[2]]
self.funcstrings = [item[1] for item in args[2]]
globals_lst = [dict(list(zip(indepvars, values)))
for values in zip(*list(indepvars.values()))]
self.paths = [[SafeEvaluator(globals).eval(
func) for globals in globals_lst] for func in self.funcstrings]
self._integ_time = numpy.array(eval(args[1]), dtype='d')
self.opts = opts
if len(self.motors) == len(self.paths) > 0:
self.N = len(self.motors)
else:
raise ValueError(
'Moveable and func lists must be non-empty and same length')
npoints = len(self.paths[0])
try:
# if everything is OK, the following lines should return a 2D array
# n which each motor path is a row.
# Typical failure is due to shape mismatch due to inconsistent
# input
self.paths = numpy.array(self.paths, dtype='d')
self.paths.reshape((self.N, npoints))
except Exception: # shape mismatch?
# try to give a meaningful description of the error
for p, fs in zip(self.paths, self.funcstrings):
if len(p) != npoints:
raise ValueError('"%s" and "%s" yield different number '
'of points (%i vs %i)' %
(self.funcstrings[0], fs, npoints,
len(p)))
raise # the problem wasn't a shape mismatch
self._nb_points = npoints
if self._integ_time.size == 1:
self._integ_time = self._integ_time * \
numpy.ones(self._nb_points) # extend integ_time
elif self._integ_time.size != self._nb_points:
raise ValueError('time_integ must either be a scalar or '
'length=npoints (%i)' % self._nb_points)
self.name = opts.get('name', 'fscan')
generator = self._generator
moveables = self.motors
env = opts.get('env', {})
constrains = [getCallable(cns) for cns in opts.get(
'constrains', [UNCONSTRAINED])]
# Hooks are not always set at this point. We will call getHooks
# later on in the scan_loop
# self.pre_scan_hooks = self.getHooks('pre-scan')
# self.post_scan_hooks = self.getHooks('post-scan'
self._gScan = SScan(self, generator, moveables, env, constrains)
# _data is the default member where the Macro class stores the data.
# Assign the date produced by GScan (or its subclasses) to it so all
# the Macro infrastructure related to the data works e.g. getter,
# property, etc.
self.setData(self._gScan.data)
def _generator(self):
step = {}
step["pre-move-hooks"] = self.getHooks('pre-move')
step["post-move-hooks"] = self.getHooks('post-move')
step["pre-acq-hooks"] = self.getHooks('pre-acq')
step["post-acq-hooks"] = (self.getHooks('post-acq') +
self.getHooks('_NOHINTS_'))
step["post-step-hooks"] = self.getHooks('post-step')
step["check_func"] = []
for i in range(self._nb_points):
step["positions"] = self.paths[:, i]
step["integ_time"] = self._integ_time[i]
step["point_id"] = i
yield step
def run(self, *args):
for step in self._gScan.step_scan():
yield step
def _get_nr_points(self):
msg = ("nr_points is deprecated since version Jan20. "
"Use nb_points instead.")
self.warning(msg)
return self.nb_points
nr_points = property(_get_nr_points)
class ascanh(aNscan, Macro):
"""Do an absolute scan of the specified motor.
ascan scans one motor, as specified by motor. The motor starts at the
position given by start_pos and ends at the position given by final_pos.
The step size is (start_pos-final_pos)/nr_interv. The number of data
points collected will be nr_interv+1. Count time is given by time which
if positive, specifies seconds and if negative, specifies monitor
counts. """
param_def = [
['motor', Type.Moveable, None, 'Moveable to move'],
['start_pos', Type.Float, None, 'Scan start position'],
['final_pos', Type.Float, None, 'Scan final position'],
['nr_interv', Type.Integer, None, 'Number of scan intervals'],
['integ_time', Type.Float, None, 'Integration time']
]
def prepare(self, motor, start_pos, final_pos, nr_interv, integ_time,
**opts):
self._prepare([motor], [start_pos], [final_pos], nr_interv, integ_time,
mode=HybridMode, **opts)
class scanhist(Macro):
"""Shows scan history information. Give optional parameter scan number to
display details about a specific scan"""
param_def = [
['scan number', Type.Integer, -1,
'scan number. [default=-1 meaning show all scans]'],
]
def run(self, scan_number):
try:
hist = self.getEnv("ScanHistory")
except UnknownEnv:
print("No scan recorded in history")
return
if scan_number < 0:
self.show_all(hist)
else:
self.show_one(hist, scan_number)
def show_one(self, hist, scan_number):
item = None
for h in hist:
if h['serialno'] == scan_number:
item = h
break
if item is None:
self.warning("Could not find scan number %s", scan_number)
return
serialno, title = h['serialno'], h['title']
start = datetime.datetime.fromtimestamp(h['startts'])
end = datetime.datetime.fromtimestamp(h['endts'])
total_time = end - start
start, end, total_time = start.ctime(), end.ctime(), str(total_time)
scan_dir, scan_file = h['ScanDir'], h['ScanFile']
deadtime = '%.1f%%' % h['deadtime']
user = h['user']
store = "Not stored!"
if scan_dir is not None and scan_file is not None:
if isinstance(scan_file, str):
store = os.path.join(scan_dir, scan_file)
else:
store = scan_dir + os.path.sep + str(scan_file)
channels = ", ".join(h['channels'])
cols = ["#", "Title", "Start time", "End time", "Took", "Dead time",
"User", "Stored", "Channels"]
data = [serialno, title, start, end, total_time, deadtime, user, store,
channels]
table = Table([data], row_head_str=cols, row_head_fmt='%*s',
elem_fmt=['%-*s'],
col_sep=' : ')
for line in table.genOutput():
self.output(line)
def show_all(self, hist):
cols = "#", "Title", "Start time", "End time", "Stored"
width = -1, -1, -1, -1, -1
out = List(cols, max_col_width=width)
today = datetime.datetime.today().date()
for h in hist:
start = datetime.datetime.fromtimestamp(h['startts'])
if start.date() == today:
start = start.time().strftime("%H:%M:%S")
else:
start = start.strftime("%Y-%m-%d %H:%M:%S")
end = datetime.datetime.fromtimestamp(h['endts'])
if end.date() == today:
end = end.time().strftime("%H:%M:%S")
else:
end = end.strftime("%Y-%m-%d %H:%M:%S")
scan_file = h['ScanFile']
store = "Not stored!"
if scan_file is not None:
store = ", ".join(scan_file)
row = h['serialno'], h['title'], start, end, store
out.appendRow(row)
for line in out.genOutput():
self.output(line)
class ascanc(aNscan, Macro):
"""Do an absolute continuous scan of the specified motor.
ascanc scans one motor, as specified by motor."""
param_def = [
['motor', Type.Moveable, None, 'Moveable to move'],
['start_pos', Type.Float, None, 'Scan start position'],
['final_pos', Type.Float, None, 'Scan final position'],
['integ_time', Type.Float, None, 'Integration time'],
['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],
]
def prepare(self, motor, start_pos, final_pos, integ_time, slow_down,
**opts):
self._prepare([motor], [start_pos], [final_pos], slow_down,
integ_time, mode=ContinuousMode, **opts)
class a2scanc(aNscan, Macro):
"""two-motor continuous scan"""
param_def = [
['motor1', Type.Moveable, None, 'Moveable 1 to move'],
['start_pos1', Type.Float, None, 'Scan start position 1'],
['final_pos1', Type.Float, None, 'Scan final position 1'],
['motor2', Type.Moveable, None, 'Moveable 2 to move'],
['start_pos2', Type.Float, None, 'Scan start position 2'],
['final_pos2', Type.Float, None, 'Scan final position 2'],
['integ_time', Type.Float, None, 'Integration time'],
['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],
]
def prepare(self, motor1, start_pos1, final_pos1, motor2, start_pos2,
final_pos2, integ_time, slow_down, **opts):
self._prepare([motor1, motor2], [start_pos1, start_pos2],
[final_pos1, final_pos2], slow_down, integ_time,
mode=ContinuousMode, **opts)
class a3scanc(aNscan, Macro):
"""three-motor continuous scan"""
param_def = [
['motor1', Type.Moveable, None, 'Moveable 1 to move'],
['start_pos1', Type.Float, None, 'Scan start position 1'],
['final_pos1', Type.Float, None, 'Scan final position 1'],
['motor2', Type.Moveable, None, 'Moveable 2 to move'],
['start_pos2', Type.Float, None, 'Scan start position 2'],
['final_pos2', Type.Float, None, 'Scan final position 2'],
['motor3', Type.Moveable, None, 'Moveable 3 to move'],
['start_pos3', Type.Float, None, 'Scan start position 3'],
['final_pos3', Type.Float, None, 'Scan final position 3'],
['integ_time', Type.Float, None, 'Integration time'],
['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],
]
def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, integ_time,
slow_down, **opts):
self._prepare([m1, m2, m3], [s1, s2, s3], [f1, f2, f3], slow_down,
integ_time, mode=ContinuousMode, **opts)
class a4scanc(aNscan, Macro):
"""four-motor continuous scan"""
param_def = [
['motor1', Type.Moveable, None, 'Moveable 1 to move'],
['start_pos1', Type.Float, None, 'Scan start position 1'],
['final_pos1', Type.Float, None, 'Scan final position 1'],
['motor2', Type.Moveable, None, 'Moveable 2 to move'],
['start_pos2', Type.Float, None, 'Scan start position 2'],
['final_pos2', Type.Float, None, 'Scan final position 2'],
['motor3', Type.Moveable, None, 'Moveable 3 to move'],
['start_pos3', Type.Float, None, 'Scan start position 3'],
['final_pos3', Type.Float, None, 'Scan final position 3'],
['motor4', Type.Moveable, None, 'Moveable 3 to move'],
['start_pos4', Type.Float, None, 'Scan start position 3'],
['final_pos4', Type.Float, None, 'Scan final position 3'],
['integ_time', Type.Float, None, 'Integration time'],
['slow_down', Type.Float, 1, | |
<filename>Assistant.py
import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
import datetime
import wikipedia #pip install wikipedia
import webbrowser
import os
import smtplib
import requests
import json
import wolframalpha
import subprocess
import wolframalpha
import pyttsx3
import tkinter
import json
import random
import operator
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
import os
import winshell
import pyjokes
import feedparser
import smtplib
import ctypes
import time
import requests
import shutil
from twilio.rest import Client
from clint.textui import progress
from bs4 import BeautifulSoup
import win32com.client as wincl
from urllib.request import urlopen
print("Donut Assistant 12.4.0")
print("Assistant for PCs")
print("Donut Corp @ 2022")
assistanttype = input("Please Select any one: \n \t 1.Donut Assistant Mike \n \t 2.Donut Assistant Annie \n \t 3.Donut Assistant Chloe \n : ")
default_engine = input("Select a default search engine: \n \t Google \n \t Bing \n")
if assistanttype == "1":
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
print("Good Morning!")
speak("Good Morning!")
elif hour>=12 and hour<14:
print("Good Afternoon!")
speak("Good Afternoon!")
else:
print("Good Evening!")
speak("Good Evening!")
print("I am <NAME>. Please tell me how may I help you")
speak("I am <NAME>. Ask me anything!!!")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
print("Say that again please...")
speak('I didnt hear anything, if you said anything please speak loud and clear')
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
email = input("Enter your gmail username: ")
psswrd = input("Enter yourn gmail password: ")
server.login(email, psswrd)
server.sendmail(email, to, content)
server.close()
if __name__ == "__main__":
wishMe()
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif query == "tell me some jokes" or query == "tell some jokes" or query == "tell a joke" or query == "joke" or query == "jokes":
My_joke = pyjokes.get_joke(language="en", category="neutral")
print(My_joke)
speak(My_joke)
elif 'search' in query:
speak('Searching '+ default_engine)
query = query.replace(default_engine, "")
results = wikipedia.summary(query, sentences=2)
speak("According to " + default_engine)
print(results)
speak(results)
elif 'question' in query:
speak('I can answer to computational and geographical questions and what question do you want to ask now')
question=takeCommand()
app_id="NULL"
client = wolframalpha.Client('R2K75H-7ELALHR35X')
res = client.query(question)
answer = next(res.results).text
speak(answer)
print(answer)
elif 'weather' in query:
from bs4 import BeautifulSoup
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win32; x32) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
def weather(city):
city = city.replace(" ", "+")
res = requests.get(
f'https://www.google.com/search?q={city}&oq={city}&aqs=chrome.0.35i39l2j0l4j46j69i60.6128j1j7&sourceid=chrome&ie=UTF-8', headers=headers)
print("Searching...\n")
soup = BeautifulSoup(res.text, 'html.parser')
location = soup.select('#wob_loc')[0].getText().strip()
time = soup.select('#wob_dts')[0].getText().strip()
info = soup.select('#wob_dc')[0].getText().strip()
weather = soup.select('#wob_tm')[0].getText().strip()
print(location)
speak(location)
print(time)
speak(time)
print(info)
speak(info)
print(weather+"°C")
speak(weather+"°C")
city = input("Enter the Name of City -> ")
city = city+" weather"
weather(city)
print("Have a Nice Day:)")
elif "calculate" in query:
app_id = "Wolframalpha api id"
client = wolframalpha.Client(app_id)
indx = query.lower().split().index('calculate')
query = query.split()[indx + 1:]
res = client.query(' '.join(query))
answer = next(res.results).text
print("The answer is " + answer)
speak("The answer is " + answer)
elif 'open youtube' in query:
speak('OK, I will open YouTube in your default browser')
webbrowser.open("youtube.com")
elif 'open browser' in query:
webbrowser.open("bing.com" or "google.com")
elif 'open bing' in query:
speak('Opening bing in your default browser')
webbrowser.open("bing.com")
elif 'send feedback' in query:
speak('This will open Donut Support Website in your default browser, you can give feedback there!')
webbrowser.open("Donutsupport.simdif.com")
elif 'open google' in query:
speak('Opening google in your default browser')
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
speak('Opening StackOverflow in your default browser')
webbrowser.open("stackoverflow.com")
elif 'play music' in query:
try:
musidir = input("Enter directory address: ")
music_dir = musidir
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
except:
speak("Sorry Friend!! I couldn't find the directory specified")
elif 'time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print(strTime)
speak(f"Friend, the time is {strTime}")
elif 'text to speech' in query:
text = input("Type: ")
speak(text)
elif 'when is your birthday' in query:
print("1st March 2022")
speak('I made my debut on 1st March 2022')
elif 'your developers name' in query:
print("<NAME>")
speak("<NAME>")
elif 'open code' in query:
codePath = "C:\\Users\\gauth\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'what is your name' in query:
speak('As I told you in the beginning, my name is <NAME>')
print("I am Don<NAME>")
elif 'who made you' in query:
speak('Who made me??? <NAME>')
speak('He is super genius')
elif 'what do you eat' in query:
speak("I dont't eat the food that humans eat, but i like to have bits and bytes")
elif 'where do you live' in query:
speak("I live in your computer")
elif 'can you sing a song' in query:
speak('Im noot good at singing, since i am a bot')
speak('But since you asked me, i will sing it for you')
speak("I will sing my favourite song")
speak("The song is <NAME>'s Smooth Criminal")
speak('''As he came into the window!!!!
Was the sound of a crescendo!!!
He came into her apartment!!!
He left the bloodstains on the carpet!!!
She ran underneath the table!!!
He could see she was unable!!!
So she ran into the bedroom!!!
She was struck down, it was her doom!!!
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
Will you tell us that you're okay?
There's a sound at the window
Then he struck you, a crescendo Annie
He came into your apartment
He left the bloodstains on the carpet
And then you ran into the bedroom
You were struck down
It was your doom
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
You've been hit by-
You've been hit by-
A smooth criminal
So they came in to the outway
It was Sunday, what a black day
Mouth-to-mouth resuscitation
Sounding heartbeats, intimidation
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
So, Annie, are you okay? Are you okay, Annie?
Annie, are you okay?
Will you tell us that you're okay?
There's a sound at the window
That he struck you a crescendo Annie
He came into your apartment
He left the bloodstains on the carpet
Then you ran into the bedroom
You were struck down
It was your doom
Annie, are you okay? So, Annie, are you okay?
Are you okay, Annie?
You've been hit by-
You've been struck by-
A smooth criminal
Okay, I want everybody to clear the area right now
Annie, are you okay? (I don't know)
Will you tell us, that you're okay? (I don't know)
There's a sound at the window (I don't know)
Then he struck you, a crescendo Annie (I don't know)
He came into your apartment (I don't know)
Left bloodstains on the carpet (I don't know why, baby)
And then you ran into the bedroom (help me)
You were struck down
It was your doom, Annie (dag gone it)
Annie, are you okay? (Dag gone it-baby)
Will you tell us that you're okay? (Dag gone it-baby)
There's a sound at the window (dag gone it-baby)
Then he struck you, a crescendo Annie
He came into your apartment (dag gone it)
Left bloodstains on the carpet (hoo, hoo, hoo)
And then you ran into the bedroom
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.SqlDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import sql_dataset_test_base
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SqlDatasetTest(sql_dataset_test_base.SqlDatasetTestBase,
parameterized.TestCase):
# Test that SqlDataset can read from a database table.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSet(self):
for _ in range(2): # Run twice to verify statelessness of db operations.
dataset = self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string),
num_repeats=2)
self.assertDatasetProduces(
dataset,
expected_output=[(b"John", b"Doe", b"Hi!"),
(b"Jane", b"Moe", b"Hi again!")] * 2,
num_test_iterations=2)
# Test that SqlDataset works on a join query.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetJoinQuery(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT students.first_name, state, motto FROM students "
"INNER JOIN people "
"ON students.first_name = people.first_name "
"AND students.last_name = people.last_name",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"California", b"Hi!"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset can read a database entry with a null-terminator
# in the middle of the text and place the entry in a `string` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetNullTerminator(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, favorite_nonsense_word "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"n\0nsense"), self.evaluate(get_next()))
self.assertEqual((b"Jane", b"Moe", b"nonsense\0"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset works when used on two different queries.
# Because the output types of the dataset must be determined at graph-creation
# time, the two queries must have the same number and types of columns.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetReuseSqlDataset(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"Hi!"), self.evaluate(get_next()))
self.assertEqual((b"Jane", b"Moe", b"Hi again!"), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, state FROM people "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"California"),
self.evaluate(get_next()))
self.assertEqual((b"Benjamin", b"Franklin", b"Pennsylvania"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that an `OutOfRangeError` is raised on the first call to
# `get_next_str_only` if result set is empty.
@combinations.generate(test_base.default_test_combinations())
def testReadEmptyResultSet(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"WHERE first_name = 'Nonexistent'",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that an error is raised when `driver_name` is invalid.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetWithInvalidDriverName(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = self._createSqlDataset(
driver_name="sqlfake",
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string))
self.assertDatasetProduces(dataset, expected_output=[])
# Test that an error is raised when a column name in `query` is nonexistent
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetWithInvalidColumnName(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, fake_column FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next())
# Test that an error is raised when there is a syntax error in `query`.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetOfQueryWithSyntaxError(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELEmispellECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next())
# Test that an error is raised when the number of columns in `query`
# does not match the length of `, output_types`.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetWithMismatchBetweenColumnsAndOutputTypes(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Test that no results are returned when `query` is an insert query rather
# than a select query. In particular, the error refers to the number of
# output types passed to the op not matching the number of columns in the
# result set of the query (namely, 0 for an insert statement.)
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetOfInsertQuery(self):
get_next = self.getNext(
self._createSqlDataset(
query="INSERT INTO students (first_name, last_name, motto) "
"VALUES ('Foo', 'Bar', 'Baz'), ('Fizz', 'Buzz', 'Fizzbuzz')",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int8` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt8(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int8)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int8` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt8NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int8, dtypes.int8)))
self.assertEqual((b"John", 0, -2), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int8` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt8MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT desk_number, favorite_negative_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.int8, dtypes.int8)))
self.assertEqual((9, -2), self.evaluate(get_next()))
# Max and min values of int8
self.assertEqual((127, -128), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int16` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt16(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int16` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt16NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16, dtypes.int16)))
self.assertEqual((b"John", 0, -2), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int16` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt16MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16)))
# Max value of int16
self.assertEqual((b"John", 32767), self.evaluate(get_next()))
# Min value of int16
self.assertEqual((b"Jane", -32768), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int32` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt32(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int32` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt32NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 0), self.evaluate(get_next()))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int32` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt32MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
# Max value of int32
self.assertEqual((b"John", 2147483647), self.evaluate(get_next()))
# Min value of int32
self.assertEqual((b"Jane", -2147483648), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a numeric `varchar` from a SQLite database
# table and place it in an `int32` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt32VarCharColumnAsInt(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, school_id FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 123), self.evaluate(get_next()))
self.assertEqual((b"Jane", 1000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in an `int64` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt64(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a | |
" + outdirfile + " -h " + size + " -D -b '#ffffff'"
fileonweb = '/get?image=' + outfile
if format == 'shapefile':
thisfilter = year + '_' + code + '_'
infile = year + '_' + code + '_' + 'tmp.json'
outfile = year + '_' + code + '_' + 'tmp.shp'
indirfile = imagepathloc + '/' + infile
outdirfile = imagepathloc + '/' + outfile
webapicmd = website + "/api/maps?format=geojson&year=" + year
if province:
webapicmd = webapicmd + "&province=" + province
cmd = "/usr/bin/wget \"" + webapicmd +"\" -O " + indirfile
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
result = p.communicate()[0]
cmd = "/usr/bin/ogr2ogr -f \"ESRI Shapefile\" " + outdirfile + " " + indirfile
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
result = p.communicate()[0]
if outdirfile:
cmd = "cd " + imagepathloc + ";tar -cf " + thisfilter + ".tar *" + thisfilter + "*;gzip " + thisfilter + ".tar;rm -rf *" + thisfilter + "*tmp*"
shapefile = '/get?nlgis=' + thisfilter + ".tar.gz"
if format == 'pdf':
outfile = year + '_' + code + '_' + 'map.PDF'
outdirfile = imagepathloc + '/' + outfile
cmd = "/usr/bin/inkscape " + filesvg + " --export-pdf=" + outdirfile + " -D -b '#ffffff'"
fileonweb = ''
pdffile = '/get?pdf=' + outfile
if cmd:
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
result = p.communicate()[0]
image = outfile
if shapefile:
return "<a href=\"" + shapefile + "\">Download ShapeFile</a>"
resp = make_response(render_template('download.html', image=fileonweb, svgfile=svgfileout, pdffile=pdffile))
return resp
@app.route('/treemap')
def treemap(settings=''):
(years, ctrlist) = ([], '')
showpanel = 'yes'
config = configuration()
if config['error']:
return config['error']
(historical, handle, handles, thisyear) = ('', '', [], '')
if request.args.get('face'):
facehandle = request.args.get('face')
if facehandle not in handles:
handles.append(facehandle)
handle = facehandle
if request.args.get('handle'):
handledataset = request.args.get('handle')
try:
(pids, pidslist) = pidfrompanel(handledataset)
handle = pids[0]
handles.append(handle)
except:
handles.append(handledataset)
handle = handledataset
nopanel = 'yes'
if request.args.get('historical'):
historical = request.args.get('historical')
if request.args.get('year'):
thisyear = request.args.get('year')
if request.args.get('hist'):
historical = request.args.get('hist')
if request.args.get('ctrlist'):
ctrlist = request.args.get('ctrlist')
if ctrlist == config['ctrlist']:
ctrlist = ''
mainlink = '&handle=' + str(handle)
try:
(title, units, years) = dpemetadata(config, handle)
except:
(title, units, years) = ('Panel Data', '', [])
if historical:
mainlink = str(mainlink) + '&historical=on'
if thisyear:
mainlink = str(mainlink) + '&year=' + str(thisyear)
if ctrlist:
mainlink = str(mainlink) + '&ctrlist=' + str(ctrlist)
links = graphlinks(mainlink)
apitreemap = config['apiroot'] + "/api/treemap?action=showyears&handle=" + str(handles[0]) + "&ctrlist=" + str(ctrlist)
years = load_api_data(apitreemap, 1)
total = len(years)
lastyear = years[-1]
resp = make_response(render_template('treemap.html', handle=handle, chartlib=links['chartlib'], barlib=links['barlib'], panellib=links['panellib'], treemaplib=links['treemaplib'], q=handle, showpanel=showpanel, historical=historical, title=title, thisyear=thisyear, years=years, total=total, lastyear=lastyear, ctrlist=ctrlist))
return resp
# Visualize
@app.route('/visualize')
def visualize():
config = configuration()
resp = 'visualize'
view = 'panel'
if request.args.get('view'):
view = request.args.get('view')
if config['error']:
return config['error']
if view == 'panel':
resp = panel()
elif view == 'time-series':
resp = chartlib()
elif view == 'treemap':
resp = treemap()
return resp
@app.route('/panel')
def panel(settings=''):
showpanel = ''
handle = ''
handler = ''
config = configuration()
if config['error']:
return config['error']
f = request.args
for q in f:
value = f[q]
if value:
handle = str(handle) + '&' + str(q) + '=' + str(f[q])
# Default countris
try:
if not f['yearmin']:
handle = str(handle) + '&yearmin=1500&yearmax=2013'
except:
handle = str(handle) + '&yearmin=1500&yearmax=2013'
try:
if not f['ctrlist']:
handle = str(handle) + '&ctrlist=' + config['ctrlist']
except:
handle = str(handle) + '&ctrlist=' + config['ctrlist']
try:
if f['print']:
showpanel = ''
except:
showpanel = 'yes'
try:
(title, units, years) = dpemetadata(config, handle)
except:
(title, units, years) = ('Panel Data', '', [])
links = graphlinks(handle)
resp = make_response(render_template('panel.html', handle=handle, chartlib=links['chartlib'], barlib=links['barlib'], panellib=links['panellib'], treemaplib=links['treemaplib'], q=handle, showpanel=showpanel, title=title))
return resp
@app.route('/chartlib')
def chartlib():
(thismapurl, apilink, ctrlist, title, units, switch, hist) = ('', '', '', 'Title', 'Units', 'modern', '')
handleface = []
config = configuration()
ctrlist = config['ctrlist']
if config['error']:
return config['error']
urlmatch = re.search(r'(.+)\&face', request.url)
try:
if urlmatch.group(0):
thismapurl = urlmatch.group(1)
except:
thismapurl = request.url
if 'sandbox' not in thismapurl:
thismapurl = thismapurl.replace('http://', 'https://')
handles = []
showpanel = 'yes'
try:
if request.args.get('print'):
showpanel = ''
except:
showpanel = 'yes'
f = request.args
handle = ''
for q in f:
value = f[q]
if value:
handle = str(handle) + '&' + str(q) + '=' + str(f[q])
if request.args.get('ctrlist'):
ctrlist = request.args.get('ctrlist')
if request.args.get('hist'):
switch = 'historical'
hist = request.args.get('hist')
if request.args.get('handle'):
handledataset = request.args.get('handle')
try:
(pids, pidslist) = pidfrompanel(handledataset)
handles.append(pids[0])
except:
handles.append(handledataset)
nopanel = 'yes'
if pids[0]:
apilink = "/api/tabledata?handle=" + str(pids[0])
if ctrlist:
apilink = apilink + '&ctrlist=' + ctrlist
if request.args.get('hist'):
apilink = apilink + '&hist=' + hist
if request.args.get('face'):
handles = []
handleface = request.args.get('face')
handles.append(handleface)
if handleface:
apilink = "/api/tabledata?handle=" + str(handleface)
if ctrlist:
apilink = apilink + '&ctrlist=' + ctrlist
if request.args.get('hist'):
apilink = apilink + '&hist=' + hist
try:
pids.remove(handleface)
except:
nothing = 1
if set(pids) == set(handles):
pids[:] = []
links = graphlinks('&face=' + str(handles[0]) + '&hist=' + hist)
(geocoder, geolist, oecd2webmapper, modern, historical) = request_geocoder(config, '')
# vty hist
(origdata, maindata, metadata) = request_datasets(config, switch, modern, historical, handles, geolist)
try:
title = metadata[handles[0]]['title']
units = metadata[handles[0]]['units']
except:
skip = 0
handledict = {}
if handles:
handle = handles[0]
if pids:
try:
if handles[1]:
pids.remove(handles[0])
except:
skip = 1
hquery = formdatasetquery(pids,'')
d = readdatasets('datasets', json.loads(hquery))
for x in d:
thishandle = x['handle']
if thishandle != handle:
handledict[thishandle] = x['title']
resp = make_response(render_template('chartlib.html', thismapurl=thismapurl, indicators=handledict, apilink=apilink, title=title, units=units, showpanel=showpanel, handle=handle, chartlib=links['chartlib'], barlib=links['barlib'], panellib=links['panellib'], treemaplib=links['treemaplib']))
return resp
@app.route('/graphlib')
def graphlib(settings=''):
showpanel = 'yes'
try:
if request.args.get('print'):
showpanel = ''
except:
showpanel = 'yes'
f = request.args
handle = ''
for q in f:
value = f[q]
if value:
handle = str(handle) + '&' + str(q) + '=' + str(f[q])
links = graphlinks(handle)
resp = make_response(render_template('graphlib.html', handle=handle, chartlib=links['chartlib'], barlib=links['barlib'], panellib=links['panellib'], treemaplib=links['treemaplib'], q=handle, showpanel=showpanel))
return resp
@app.route('/datasetspace')
def datasetspace(settings=''):
(where, query, datasets, metadata, s, permissions) = ({}, '', [], [], {}, 'yes')
where = {'collab': '', 'iish': '', 'harvard': ''}
pagetitle = "Public datasets"
config = configuration()
if config['error']:
return config['error']
root = config['apiroot']
dataversename = 'global'
if request.args.get('dv'):
dataversename = request.args.get('dv')
if request.args.get('q'):
query = request.args.get('q')
if request.args.get('permissions'):
permissions = request.args.get('permissions')
if request.args.get('where'):
where[request.args.get('where')] = 'checked="checked"'
settings = Configuration()
sconnection = ExtrasearchAPI(settings.config['dataverseroot'], dataversename)
if where['harvard']:
# Extract host for Dataverse connection
findhost = re.search('(http\:\/\/|https\:\/\/)(.+)', settings.config['harvarddataverse'])
if findhost:
settings.config['dataversehostname'] = findhost.group(2)
connection = Connection(settings.config['dataversehostname'], settings.config['harvardkey'])
else:
try:
connection = Connection(config['hostname'], settings.config['key'])
except:
return 'Error: no connection to Dataverse. Please try later...'
handlestr = ''
if query:
s['q'] = query
metadata = search_by_keyword(connection, s)
else:
try:
dataverse = connection.get_dataverse(dataversename)
item = dataverse.get_contents()
active = 'yes'
except:
active = None
if active:
try:
for item in dataverse.get_contents():
handlestr+= item['identifier'] + ' '
active = 'yes'
except:
active = None
if not active:
handlestr = sconnection.read_all_datasets()
if handlestr:
s['q'] = handlestr
s['per_page'] = 100
metadata = search_by_keyword(connection, s)
#return str(metadata['items'])
for dataset in metadata['items']:
active = ''
# Private datasets
if permissions == 'closed':
pagetitle = "Restricted datasets"
try:
if (sconnection.has_restricted_data(dataset['global_id'])):
active = 'yes'
except:
active = ''
# Public data
else:
try:
if not (sconnection.has_restricted_data(dataset['global_id'])):
active = 'yes'
except:
active = ''
if active:
try:
for author in dataset['authors']:
dataset['author'] = str(author) + ', '
dataset['author'] = dataset['author'][:-2]
except:
dataset['author'] = str(dataset['description'])
datasets.append(dataset)
if where['harvard']:
datasets.append(dataset)
(username, projectname) = ('','')
fields = {}
resp = make_response(render_template('search.html', projectname=projectname, username=username, datasets=datasets, searchq=query, pagetitle=pagetitle, where=where, fields=fields))
return resp
@app.route('/')
def start(settings=''):
activepage = 'Home'
config = configuration()
if config['error']:
return config['error']
path = config['path']
pages = getindex(activepage)
perlbin = "/usr/bin/perl "
project = "frontpage"
base = ''
varproject = request.args.get('project')
varbase = request.args.get('base')
if varproject:
project = varproject
if varbase:
base = varbase
cmd = perlbin + path + "/../../bin/collab2api.pl " + project + " '' " + " " + base
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
maincontent = p.communicate()[0]
resp = make_response(render_template('clioinfra.html', maincontent=maincontent))
return resp
@app.route('/benford')
def benford(cmddataset='', cmdyear='', settings=''):
varbase = request.args.get('base')
dataset = request.args.get('dataset')
if cmddataset:
dataset = cmddataset
if cmdyear:
year = cmdyear
m = re.search(r'(\S+)\/(\S+)\:(\d+)\:(\d+)', dataset)
handle = m.group(2) + m.group(3) + '_' + m.group(4)
if not handle:
return 'no dataset'
year = request.args.get('year')
action = request.args.get('action')
apiurl = "http://localhost/api/data?db=strikes_test&handle=" + handle + "&categories=8&datarange=calculate"
if year:
varyear = year
else:
varyear = '1890'
| |
# -*- coding:utf-8 -*-
import os
import time
import six
import eventlet
import cPickle
import contextlib
import mysql
import mysql.connector
from simpleutil.config import cfg
from simpleutil.log import log as logging
from simpleutil.utils.systemutils import ExitBySIG
from simpleutil.utils.systemutils import UnExceptExit
from simpleservice.ormdb.tools.backup import mysqldump
from simpleservice.ormdb.tools.backup import mysqlload
from simpleflow.utils.storage_utils import build_session
from simpleflow.api import load
from simpleflow.task import Task
from simpleflow.types import failure
from simpleflow.patterns import linear_flow as lf
from simpleflow.patterns import unordered_flow as uf
from goperation.manager.rpc.agent import sqlite
from simpleflow.storage.middleware import LogBook
from simpleflow.storage import Connection
from simpleflow.engines.engine import ParallelActionEngine
from goperation.utils import safe_fork
from goperation.manager import common as manager_common
from gogamechen3 import common
from gogamechen3.api import exceptions
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SWALLOW = 'SWALLOW'
DUMPING = 'DUMPING'
SWALLOWED = 'SWALLOWED'
INSERT = 'INSERT'
FINISHED = 'FINISHED'
def sqlfile(entity):
return '%s-db-%d.sql' % (common.GAMESERVER, entity)
@contextlib.contextmanager
def dbconnect(host, port, user, passwd, schema,
raise_on_warnings=True):
if not schema:
raise ValueError('Schema is none?')
kwargs = dict(user=user, passwd=passwd,
host=host, port=port,
database=schema,
raise_on_warnings=raise_on_warnings)
conn = mysql.connector.connect(**kwargs)
try:
yield conn
finally:
conn.close()
def cleandb(host, port, user, passwd, schema):
"""drop 所有表"""
with dbconnect(host=host, port=port,
user=user, passwd=<PASSWORD>,
schema=schema) as conn:
cursor = conn.cursor()
cursor.execute('show tables')
tables = cursor.fetchall()
for table in tables:
cursor.execute('drop table %s' % table[0])
# cursor.fetchall()
cursor.close()
class Swallow(Task):
def __init__(self, uuid, steps, entity, endpoint):
self.endpoint = endpoint
self.entity = entity
self.stpes = steps
self.uuid = uuid
super(Swallow, self).__init__(name='swallow_%d' % entity, provides='db_%d' % entity)
def execute(self, entity, timeout):
step = self.stpes[self.entity]
if step in (DUMPING, SWALLOW):
with self.endpoint.mlock:
result = self.endpoint.client.swallow_entity(self.entity, self.uuid, entity)
if result.get('resultcode') != manager_common.RESULT_SUCCESS or not result.get('data'):
LOG.error('Swallow success, but can not find database from result')
return None
data = result.get('data')
databases = data[0].get('databases')
if not databases:
LOG.error('Swallow success, databases is empty')
return None
self.stpes[self.entity] = DUMPING
return databases
return None
class DumpData(Task):
NODUMPTABLES = [
'battlefield_log_lowfight',
'limit_level',
'mining_area',
'pay_censoring',
'player_censoring',
'quick_report',
'pvp_arena_pet_rank',
'var_world',
'pvp_cupmatch_fight_log',
'oper_record_plot',
'timer_boss',
'pvp_arena_rank',
'pve_campaign_log',
]
DUMPONLYONE = [
'var_world'
]
def __init__(self, uuid, steps, entity,
endpoint=None,
skip_only_one=True):
self.entity = entity
self.stpes = steps
self.uuid = uuid
self.endpoint = endpoint
self.skip_only_one = skip_only_one
super(DumpData, self).__init__(name='dump_%d' % entity,
rebind=['mergeroot', 'dtimeout', 'db_%d' % entity])
def _ext_args(self, schema):
extargs = ['-t', '-c']
nodumps = (self.NODUMPTABLES + self.DUMPONLYONE) if self.skip_only_one else self.NODUMPTABLES
for table in nodumps:
extargs.append('--ignore-table=%s.%s' % (schema, table))
return extargs
@staticmethod
def _prepare_database(databases):
return databases[common.DATADB]
def execute(self, root, timeout, databases):
"""
导出需要合并的实体数据库
如果init.sql文件不存在,导出一份init.sql文件
"""
step = self.stpes[self.entity]
if step == DUMPING:
_file = os.path.join(root, sqlfile(self.entity))
if os.path.exists(_file):
return
database = DumpData._prepare_database(databases)
try:
mysqldump(_file,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None,
extargs=self._ext_args(database.get('schema')),
logfile=None, callable=safe_fork,
timeout=timeout)
except (ExitBySIG, UnExceptExit):
LOG.error('Dump database of entity %d fail' % self.entity)
if os.path.exists(_file):
try:
os.remove(_file)
except (OSError, OSError):
LOG.error('Try remove file %d fail!' % _file)
raise exceptions.MergeException('Remove error file %s fail' % _file)
else:
self.stpes[self.entity] = SWALLOWED
# create init file
initfile = os.path.join(root, 'init.sql')
if not os.path.exists(initfile):
try:
with self.endpoint.mlock:
if not os.path.exists(initfile):
LOG.info('Dump init sql from entity %d, schema %s' % (self.entity, database.get('schema')))
mysqldump(initfile,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=['-R', '-d'],
logfile=None, callable=safe_fork,
timeout=timeout)
except (ExitBySIG, UnExceptExit):
if os.path.exists(initfile):
try:
os.remove(initfile)
except (OSError, OSError):
LOG.error('Try remove init sql file fail!')
class Swallowed(Task):
def __init__(self, uuid, steps, entity, endpoint):
self.endpoint = endpoint
self.entity = entity
self.stpes = steps
self.uuid = uuid
super(Swallowed, self).__init__(name='swallowed_%d' % entity)
def execute(self, entity, timeout):
step = self.stpes[self.entity]
if step == SWALLOWED:
with self.endpoint.mlock:
result = self.endpoint.client.swallowed_entity(self.entity, self.uuid, entity)
try:
if result.get('resultcode') != manager_common.RESULT_SUCCESS or not result.get('data'):
LOG.error('Swallowed success, but can not find areas from result')
return None
data = result.get('data')
areas = data[0].get('areas')
if not areas:
raise KeyError('Not areas found')
except KeyError as e:
LOG.error('Get areas fail %s' % e.message)
else:
self.stpes[self.entity] = INSERT
for i in range(5):
if entity not in self.endpoint.konwn_appentitys:
eventlet.sleep(3)
try:
self.endpoint.konwn_appentitys[entity]['areas'].extend(areas)
except KeyError:
raise exceptions.MergeException('Target entity %d not in konwn appentitys' % entity)
LOG.debug('Extend new areas of konwn appentitys success')
class SafeCleanDb(Task):
def __init__(self):
super(SafeCleanDb, self).__init__(name='cleandb')
def execute(self, root, database):
"""清空前备份数据库,正常情况下备份内容为空"""
LOG.debug('Try backup database before clean')
safebak = os.path.join(root, 'safebak.%d.gz' % time.time())
# back up database
mysqldump(safebak,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=['-R'],
logfile=None, callable=safe_fork,
timeout=15)
LOG.debug('Backup database before clean success, try clean it')
# drop all table
cleandb(host=database.get('host'), port=database.get('port'),
user=database.get('user'), passwd=database.<PASSWORD>('passwd'),
schema=database.get('schema'))
class InitDb(Task):
def __init__(self):
super(InitDb, self).__init__(name='initdb')
@staticmethod
def _predo(root, database):
"""对原始数据库做特殊处理"""
prefile = os.path.join(root, 'pre.sql')
if os.path.exists(prefile):
mysqlload(prefile,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=None,
logfile=None, callable=safe_fork,
timeout=30)
def execute(self, timeline, root, database):
LOG.debug('Try init databases')
initfile = os.path.join(root, 'init.sql')
logfile = os.path.join(root, 'initdb.err.%d.log' % timeline)
mysqlload(initfile,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=None,
logfile=logfile, callable=safe_fork,
timeout=15)
LOG.debug('Init databases success, try call pre.sql')
os.remove(logfile)
self._predo(root, database)
class InserDb(Task):
"""插入各个实体的数据库"""
def __init__(self, entity, stoper):
self.entity = entity
self.stoper = stoper
super(InserDb, self).__init__(name='insert-%d' % entity)
def execute(self, timeline, root, database, timeout):
if self.stoper[0]:
raise exceptions.MergeException('Stop mark is true')
_file = os.path.join(root, sqlfile(self.entity))
logfile = os.path.join(root, 'insert-%d.err.%d.log' % (self.entity, timeline))
LOG.info('Insert database of entity %d, sql file %s' % (self.entity, _file))
mysqlload(_file,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=None,
logfile=logfile, callable=safe_fork,
timeout=timeout)
LOG.info('Insert database of entity %d success' % self.entity)
os.remove(logfile)
def revert(self, result, database, **kwargs):
"""插入失败清空数据库"""
if isinstance(result, failure.Failure):
if not self.stoper[0]:
LOG.warning('Insert database of entity %d fail' % self.entity)
self.stoper[0] = 1
else:
LOG.warning('Insert database of entity %d get stop mark' % self.entity)
class PostDo(Task):
def __init__(self, uuid, endpoint):
self.uuid = uuid
self.endpoint = endpoint
super(PostDo, self).__init__(name='postdo')
@staticmethod
def _postdo(root, database):
"""合并完成后特殊处理"""
postfile = os.path.join(root, 'post.sql')
if not os.path.exists(postfile):
with open(postfile, 'w') as f:
f.write('delete from var_player where `key` = 100;\n')
f.write('update guilds set is_change_name = 0;\n')
if os.path.exists(postfile):
mysqlload(postfile,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=None,
logfile=None, callable=safe_fork,
timeout=30)
def execute(self, root, database):
"""post execute"""
try:
self._postdo(root, database)
except Exception:
LOG.exception('Post databse execute fail')
raise
def create_merge(appendpoint, uuid, entitys, middleware, opentime, chiefs):
mergepath = 'merge-%s' % uuid
mergeroot = os.path.join(appendpoint.endpoint_backup, mergepath)
if not os.path.exists(mergeroot):
os.makedirs(mergeroot)
stepsfile = os.path.join(mergeroot, 'steps.dat')
if os.path.exists(stepsfile):
raise exceptions.MergeException('Steps file exist, can not merge')
data = {}
steps = {}
for _entity in entitys:
steps[_entity] = SWALLOW
data['opentime'] = opentime
data['chiefs'] = chiefs
data['steps'] = steps
with open(stepsfile, 'wb') as f:
cPickle.dump(data, f)
merge_entitys(appendpoint, uuid, middleware.entity, middleware.databases)
def merge_entitys(appendpoint, uuid, entity, databases):
datadb = databases[common.DATADB]
mergepath = 'merge-%s' % uuid
mergeroot = os.path.join(appendpoint.endpoint_backup, mergepath)
stepsfile = os.path.join(mergeroot, 'steps.dat')
initfile = os.path.join(mergeroot, 'init.sql')
if not os.path.exists(stepsfile):
raise exceptions.MergeException('Steps file not exist')
with open(stepsfile, 'rb') as f:
data = cPickle.load(f)
steps = data['steps']
prepares = []
for _entity, step in six.iteritems(steps):
# 一些post sql执行错误对整体无影响情况下
# 可以直接讲step改为FINISHED避免重复合服步骤
if step == FINISHED:
for _step in six.itervalues(steps):
if _step != FINISHED:
raise exceptions.MergeException('Steps is finish?')
appendpoint.client.finish_merge(uuid)
appendpoint.flush_config(entity, databases,
opentime=data['opentime'],
chiefs=data['chiefs'])
return
if step != INSERT:
prepares.append(_entity)
mini_entity = min(prepares)
if prepares:
name = 'prepare-merge-at-%d' % int(time.time())
book = LogBook(name=name)
store = dict(timeout=5, dtimeout=600, mergeroot=mergeroot, entity=entity)
taskflow_session = build_session('sqlite:///%s' % os.path.join(mergeroot, '%s.db' % name))
connection = Connection(taskflow_session)
prepare_uflow = uf.Flow(name)
for _entity in prepares:
entity_flow = lf.Flow('prepare-%d' % _entity)
entity_flow.add(Swallow(uuid, steps, _entity, appendpoint))
entity_flow.add(DumpData(uuid, steps, _entity, appendpoint, _entity != mini_entity))
entity_flow.add(Swallowed(uuid, steps, _entity, appendpoint))
prepare_uflow.add(entity_flow)
engine = load(connection, prepare_uflow, store=store,
book=book, engine_cls=ParallelActionEngine,
max_workers=4)
try:
engine.run()
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception('Prepare merge task execute fail')
raise exceptions.MergeException('Prepare merge task execute fail, %s %s' % (e.__class__.__name__, str(e)))
finally:
connection.session = None
taskflow_session.close()
with open(stepsfile, 'wb') as f:
cPickle.dump(data, f)
for _entity, step in six.iteritems(steps):
if step != INSERT:
raise exceptions.MergeException('Some step not on %s' % INSERT)
if not os.path.exists(os.path.join(mergeroot, sqlfile(_entity))):
raise exceptions.MergeException('Entity %d sql file not exist' % _entity)
if not os.path.exists(initfile):
LOG.error('Init database file not exist')
raise exceptions.MergeException('Init database file not exist')
LOG.info('Prepare merge success, try merge database')
now = int(time.time())
name = 'merge-at-%d' % now
book = LogBook(name=name)
store = dict(timeout=1800, root=mergeroot, database=datadb, timeline=now)
taskflow_session = build_session('sqlite:///%s' % os.path.join(mergeroot, '%s.db' % name))
connection = Connection(taskflow_session)
merge_flow = lf.Flow('merge-to')
merge_flow.add(SafeCleanDb())
merge_flow.add(InitDb())
insert_lflow = lf.Flow('insert-db')
stoper = [0]
for _entity in steps:
insert_lflow.add(InserDb(_entity, stoper))
merge_flow.add(insert_lflow)
merge_flow.add(PostDo(uuid, appendpoint))
engine = load(connection, merge_flow, store=store,
book=book, engine_cls=ParallelActionEngine,
max_workers=4)
try:
engine.run()
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception('Merge database task execute fail')
raise exceptions.MergeException('Merge database task execute fail, %s %s' % (e.__class__.__name__, str(e)))
else:
for _entity in steps:
steps[_entity] = FINISHED
with open(stepsfile, 'wb') as f:
cPickle.dump(data, f)
appendpoint.client.finish_merge(uuid)
appendpoint.flush_config(entity, databases,
opentime=data['opentime'],
chiefs=data['chiefs'])
LOG.info('Merge | |
'''
Original repository:
https://github.com/eriklindernoren/PyTorch-YOLOv3
https://github.com/junsukchoe/ADL
'''
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from .model_utils import build_targets, to_cpu, non_max_suppression
__all__ = ['ADL', 'Upsample', 'EmptyLayer', 'YOLOLayer']
class ADL(nn.Module):
'''
attention drop layer
default: 0.75 0.8
'''
def __init__(self, adl_drop_rate=0.5, adl_drop_threshold=0.5):
super(ADL, self).__init__()
if not (0 <= adl_drop_rate <= 1):
raise ValueError("Drop rate must be in range [0, 1].")
if not (0 <= adl_drop_threshold <= 1):
raise ValueError("Drop threshold must be in range [0, 1].")
self.adl_drop_rate = adl_drop_rate
self.adl_drop_threshold = adl_drop_threshold
self.attention = None
self.drop_mask = None
def forward(self, input_):
if self.training:
attention = torch.mean(input_, dim=1, keepdim=True)
importance_map = torch.sigmoid(attention)
drop_mask = self._drop_mask(attention)
selected_map = self._select_map(importance_map, drop_mask)
return input_.mul(selected_map)
else:
return input_
def _select_map(self, importance_map, drop_mask):
random_tensor = torch.rand([], dtype=torch.float32) + self.adl_drop_rate
binary_tensor = random_tensor.floor()
return (1. - binary_tensor) * importance_map + binary_tensor * drop_mask
def _drop_mask(self, attention):
b_size = attention.size(0)
max_val, _ = torch.max(attention.view(b_size, -1), dim=1, keepdim=True)
thr_val = max_val * self.adl_drop_threshold
thr_val = thr_val.view(b_size, 1, 1, 1)
return (attention < thr_val).float()
def extra_repr(self):
return 'adl_drop_rate={}, adl_drop_threshold={}'.format(
self.adl_drop_rate, self.adl_drop_threshold)
class Upsample(nn.Module):
""" nn.Upsample is deprecated """
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
class YOLOLayer(nn.Module):
"""Detection layer"""
def __init__(self, anchors, img_dim=512):
# def __init__(self, anchors, num_classes, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
# self.num_classes = num_classes
self.num_classes = 0
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim1 = img_dim
self.img_dim2 = img_dim
self.img_dim = (self.img_dim1,self.img_dim2)
self.grid_size1 = -1 # grid size
self.grid_size2 = -1 # grid size
self.grid_size = (self.grid_size1,self.grid_size2)
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size1, self.grid_size2 = self.grid_size = grid_size
g1, g2 = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride1 = self.img_dim1 / self.grid_size1
self.stride2 = self.img_dim2 / self.grid_size2
self.stride = (self.stride1,self.stride2)
# Calculate offsets for each grid
self.grid_x = torch.arange(g2).repeat(g1, 1).view([1, 1, g1, g2]).type(FloatTensor)
self.grid_y = torch.arange(g1).repeat(g2, 1).t().view([1, 1, g1, g2]).type(FloatTensor)
self.scaled_anchors = FloatTensor([(a_w / self.stride2, a_h / self.stride1) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, img_dim=None, scene_conf=None):
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
self.img_dim1 = img_dim[0]
self.img_dim2 = img_dim[1]
self.img_dim = (self.img_dim1,self.img_dim2)
num_samples = x.size(0)
grid_size1 = x.size(2)
grid_size2 = x.size(3)
grid_size = (grid_size1,grid_size2)
prediction = (
x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size1, grid_size2)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
# pred_cls = torch.sigmoid(prediction[..., 5:]) # No Cls pred.
# If grid size does not match current we compute new offsets
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = (x.data + self.grid_x) * self.stride1
pred_boxes[..., 1] = (y.data + self.grid_y) * self.stride2
pred_boxes[..., 2] = (torch.exp(w.data) * self.anchor_w) * self.stride1
pred_boxes[..., 3] = (torch.exp(h.data) * self.anchor_h)* self.stride2
# print('image :', self.img_dim)
# print('grid : ',self.grid_size)
# print('stride: ',self.stride)
if scene_conf is not None:
# scene_align = F.interpolate(scene_conf,scale_factor=grid_size/scene_conf.size(2),mode='nearest')
scene_align = F.interpolate(scene_conf,scale_factor=grid_size1/scene_conf.size(2),mode='area')
pred_conf = pred_conf * scene_align
output = torch.cat(
(
pred_boxes.view(num_samples, -1, 4),
# pred_boxes.view(num_samples, -1, 4) * self.stride,
pred_conf.view(num_samples, -1, 1),
# pred_cls.view(num_samples, -1, self.num_classes),
),
-1,
)
if targets is None:
return output, 0
else:
# iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(
iou_scores, obj_mask, noobj_mask, tx, ty, tw, th, tconf = build_targets(
pred_boxes=pred_boxes,
# pred_cls=pred_cls,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
# Loss : Mask outputs to ignore non-existing objects (except with conf. loss)
loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])
loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])
loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])
loss_h = self.mse_loss(h[obj_mask], th[obj_mask])
loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])
loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
# loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])
# total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf
# Metrics
# cls_acc = 100 * class_mask[obj_mask].mean()
conf_obj = pred_conf[obj_mask].mean()
conf_noobj = pred_conf[noobj_mask].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores > 0.5).float()
iou75 = (iou_scores > 0.75).float()
# detected_mask = conf50 * class_mask * tconf
detected_mask = conf50 * tconf
precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)
recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)
recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)
self.metrics = {
"loss": to_cpu(total_loss).item(),
"x": to_cpu(loss_x).item(),
"y": to_cpu(loss_y).item(),
"w": to_cpu(loss_w).item(),
"h": to_cpu(loss_h).item(),
"conf": to_cpu(loss_conf).item(),
"recall50": to_cpu(recall50).item(),
"recall75": to_cpu(recall75).item(),
"precision": to_cpu(precision).item(),
"conf_obj": to_cpu(conf_obj).item(),
"conf_noobj": to_cpu(conf_noobj).item(),
"grid_size": grid_size[0],
}
return output, total_loss
class YOLOLayer_BAK(nn.Module):
"""
Detection layer: only for square images
"""
def __init__(self, anchors, img_dim=512):
# def __init__(self, anchors, num_classes, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
# self.num_classes = num_classes
self.num_classes = 0
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim = img_dim
self.grid_size = -1 # grid size
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size = grid_size
g = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride = self.img_dim / self.grid_size
# Calculate offsets for each grid
self.grid_x = torch.arange(g).repeat(g, 1).view([1, 1, g, g]).type(FloatTensor)
self.grid_y = torch.arange(g).repeat(g, 1).t().view([1, 1, g, g]).type(FloatTensor)
self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, img_dim=None, scene_conf=None):
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
self.img_dim = img_dim[0]
num_samples = x.size(0)
grid_size = x.size(2)
prediction = (
x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size, grid_size)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
# pred_cls = torch.sigmoid(prediction[..., 5:]) # No Cls pred.
# If grid size does not match current we compute new offsets
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + self.grid_x
pred_boxes[..., 1] = y.data + self.grid_y
pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
if scene_conf is not None:
# scene_align = F.interpolate(scene_conf,scale_factor=grid_size/scene_conf.size(2),mode='nearest')
scene_align = F.interpolate(scene_conf,scale_factor=grid_size/scene_conf.size(2),mode='area')
pred_conf = pred_conf * scene_align
output = torch.cat(
(
pred_boxes.view(num_samples, -1, 4) * self.stride,
pred_conf.view(num_samples, -1, 1),
# pred_cls.view(num_samples, -1, self.num_classes),
),
-1,
)
if targets is None:
return output, 0
else:
# iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(
iou_scores, obj_mask, noobj_mask, tx, ty, tw, th, tconf = build_targets(
pred_boxes=pred_boxes,
# pred_cls=pred_cls,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
# Loss : Mask outputs to ignore non-existing objects (except with conf. loss)
loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])
loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])
loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])
loss_h = self.mse_loss(h[obj_mask], th[obj_mask])
loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])
loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
# loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])
# total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf
# Metrics
# cls_acc = 100 * class_mask[obj_mask].mean()
conf_obj = pred_conf[obj_mask].mean()
conf_noobj = pred_conf[noobj_mask].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores | |
physics" and other longitudinal studies (where we ask you to come back for more than one session), we'll be collecting enough data to give you a report of your child's responses after you complete all the sessions. Because this is a fairly labor-intensive process (we generally have two undergraduates watch each video and record where your child is looking each frame, and then we do some analysis to create a summary), it may take us several weeks to produce your report.</p>
<p>Please note that none of the measures we collect are diagnostic! For instance, while we hope you'll be interested to learn that your child looked 70% of the time at videos where things fell up versus falling down, we won't be able to tell you whether this means your child is going to be especially good at physics.</p>
<p>We are generally able to provide general feedback (confirming that we got your video and will be able to use it) within a week. To see this feedback, log in, then go to "Studies" and select "Past Studies."</p>
<p>If you're interested in getting individual results right away, please see our <a href="/resources">Resources</a> section for fun at-home activities you can try with your child.</p>
</div>
</div>
</div>
</div>
</div>
<h3>Technical</h3>
<div class="panel-group" role="tablist">
<div class="panel panel-default">
<div class="panel-heading" role="tab">
<h4 class="panel-title"><a data-toggle="collapse" data-parent="#accordion" href="#collapse18">What browsers are supported?</a></h4>
</div>
<div id="collapse18" class="panel-collapse collapse">
<div class="panel-body">
<div>
<p>Lookit supports recent versions of Chrome, Firefox, and Safari. We are not currently able to support Internet Explorer.</p>
</div>
</div>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading" role="tab">
<h4 class="panel-title"><a data-toggle="collapse" data-parent="#accordion" href="#collapse19">Can we do a study on my phone or tablet?</a></h4>
</div>
<div id="collapse19" class="panel-collapse collapse">
<div class="panel-body">
<div>
<p>Not yet! Because we're measuring kids' looking patterns, we need a reasonably stable view of their eyes and a big enough screen that we can tell whether they're looking at the left or the right side of it. We're excited about the potential for touchscreen studies that allow us to observe infants and toddlers exploring, though!</p>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
""",
),
dict(
url="/scientists/",
title="The Scientists",
content=f"""
<div class="main">
<div class="lookit-row lookit-page-title">
<div class="container">
<h2>Meet the Scientists</h2>
</div>
</div>
<div class="lookit-row scientists-row">
<div class="container">
<div class="row">
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/kimandremy.png"></div>
<h3><NAME></h3>
<p>Kim is a graduate student in the Early Childhood Cognition Lab and Mama to six-year-old Remy. She developed Lookit partly to enable other single parents to participate in research!</p>
<p>Research interests: Origins of conscious experience--or what it's like to be a baby</p>
<p>Hobbies: Board games, aerial silks, bicycling</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/laura.jpg"></div>
<h3><NAME></h3>
<p>Laura is the PI of the Early Childhood Cognition Lab.</p>
<p>Research interests: How children arrive at a common-sense understanding of the physical and social world through exploration and instruction.</p>
<p>Hobbies: hiking, reading, and playing</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/molly.jpg"></div>
<h3><NAME></h3>
<p>Molly is a graduate student in the Lab for Developmental Studies at Harvard. She’ll be starting her own lab in July 2017 as an Assistant Professor at NYU. Molly explores the origins of abstract thought, especially in the domains of geometry and number.</p>
<p>Hobbies: ballet, tennis, speaking French</p>
</div>
</div>
<div class="row">
<h3>Alumni & Collaborators</h3>
<hr>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/jessica.jpg"></div>
<h3><NAME></h3>
<p>Undergraduate, MIT</p>
<p>Hobbies: Playing cards, eating watermelon, and hanging out with friends</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/audrey.jpg"></div>
<h3><NAME> (Summer 2016)</h3>
<p>Undergraduate, MIT</p>
<p>Hobbies: Biking, learning and exploring</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/hope.jpg"></div>
<h3>Hope Fuller-Becker (Sp 2015, Sp 2016)</h3>
<p>Undergraduate, Wellesley College</p>
<p>Hobbies: drawing, painting, reading and running</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/rianna.jpg"></div>
<h3><NAME> (IAP, Sp, Fall 2015; IAP, Sp 2016)</h3>
<p>Undergraduate, MIT</p>
<p>Hobbies: Singing, playing the piano, and karate!</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/junyi.jpg"></div>
<h3><NAME> (Summer 2015)</h3>
<p>Recent graduate, Vanderbilt University</p>
<p>Hobbies: Rock climbing, board and card games</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/joseph.jpg"></div>
<h3><NAME> (Summer 2015)</h3>
<p>Undergraduate, Skidmore College</p>
<p>Hobbies: Creating, discovering, and playing electric guitar!</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/annie.jpg"></div>
<h3><NAME> (IAP, Sp 2015)</h3>
<p>Undergraduate, MIT</p>
<p>Hobbies: Running, spending time outdoors, listening to music</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/jeanyu.jpg"></div>
<h3><NAME> (IAP, Sp 2015)</h3>
<p>Undergraduate, Wellesley College</p>
<p>Hobbies: ballet, figure skating, piano, making art, learning about art, reading about art, learning about the brain</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/daniela.jpg"></div>
<h3><NAME> (Sp 2015)</h3>
<p>Undergraduate, MIT</p>
<p>Hobbies: Crossfit athlete, swimming, boxing, painting and sketching</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/jean.jpg"></div>
<h3><NAME> (Fa 2014)</h3>
<p>Undergraduate, MIT</p>
<p>Research interests: cognitive development and learning in young children</p>
<p>Hobbies: Running, cycling, Taekwondo, art, being outdoors, chilling with her dog</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/melissa.png"></div>
<h3><NAME></h3>
<p>As a graduate student in the Early Childhood Cognition Lab, Melissa advised and designed stimuli for the "Learning New Verbs" study.</p>
<p>Hobbies: Sewing, tango, and singing.</p>
</div>
<div class="lookit-scientist col-sm-6 col-md-4 col-lg-3">
<div class="profile-img"><img src="{settings.STATIC_URL}images/rachel.png"></div>
<h3><NAME></h3>
<p>Rachel is now a graduate student at ECCL; in her two years as our lab coordinator she helped get Lookit off the ground!</p>
<p>Hobbies: Reading historical fiction and cooking</p>
</div>
</div>
</div>
</div>
</div>
""",
),
dict(
url="/resources/",
title="Resources",
content=f"""
<div class="main">
<div class="lookit-row lookit-page-title">
<div class="container">
<h2>Resources</h2>
</div>
</div>
<div class="lookit-row resources-row">
<div class="container">
<div class="resources-item">
<h3>Find a developmental lab near you</h3>
<p>Interested in participating in research in person? Find a list of labs that study child development in your state.</p>
<p>Did we miss your lab, or one you know about? Our apologies, and please let us know at <a href="mailto:<EMAIL>"><EMAIL></a></p>
<div>
<div class="row resources-local">
<div class="col-md-4">
<select id="state-select" onchange="populateLabList(this.value)">
<option></option>
<option>Alabama</option>
<option>Alaska</option>
<option>Arizona</option>
<option>Arkansas</option>
<option>California</option>
<option>Colorado</option>
<option>Connecticut</option>
<option>Delaware</option>
<option>Florida</option>
<option>Georgia</option>
<option>Hawaii</option>
<option>Idaho</option>
<option>Illinois</option>
<option>Indiana</option>
<option>Iowa</option>
<option>Kansas</option>
<option>Kentucky</option>
<option>Louisiana</option>
<option>Maine</option>
<option>Maryland</option>
<option>Massachusetts</option>
<option>Michigan</option>
<option>Minnesota</option>
<option>Mississippi</option>
<option>Missouri</option>
<option>Montana</option>
<option>Nebraska</option>
<option>Nevada</option>
<option>New Hampshire</option>
<option>New Jersey</option>
<option>New Mexico</option>
<option>New York</option>
<option>North Carolina</option>
<option>North Dakota</option>
<option>Ohio</option>
<option>Oklahoma</option>
<option>Oregon</option>
<option>Pennsylvania</option>
<option>Rhode Island</option>
<option>South Carolina</option>
<option>South Dakota</option>
<option>Tennessee</option>
<option>Texas</option>
<option>Utah</option>
<option>Vermont</option>
<option>Virginia</option>
<option>Washington</option>
<option>West Virginia</option>
<option>Wisconsin</option>
<option>Wyoming</option>
</select>
</div>
<div class="col-md-8">
<b class='selected-state'></b>
<p id="nothing-to-show"></p>
<ul id="lab-list">
</ul>
</div>
</div>
</div>
</div>
<hr>
<div class="resources-item">
<h3>Looking for other 'citizen science' projects your family can help?</h3>
<p>Check out <a href="http://scistarter.com/index.html">SciStarter</a>, which has many projects suitable for elementary school age children and up, and for the whole family to explore together!</p>
</div>
<hr>
<div class="resources-item">
<h3>Activities to try at home</h3>
<p>Want to learn more about cognitive development? Here are some activities that may give you some insight into your own child's developing mind. Instead of studies our lab is running, these are "at home labs" for parents to try on their own--but please feel free to contact us with any questions!</p>
<h4>Learning about other minds: your child's developing theory of mind</h4><iframe allowfullscreen frameborder="0" height="315" src="https://www.youtube.com/embed/uxyVYATX9-M" width="560"></iframe>
<p><strong>Age range</strong>: 2.5 to 5 years</p>
<p><strong>What you'll need</strong>: For the "Maxi and the chocolate" story, any props you'd like (you can try drawing a picture or acting out the story). For the word-learning task, two containers to hold two objects your child doesn't know a name for (weird household objects like whisks or bike parts work great).</p>
<p>In this lab, you'll see how your child thinks about what other people are thinking. Children under about four years of age tend to show a lot of trouble expressing what's going on in situations where someone else knows less than they do. They will often absolutely insist that everyone knows what they themselves know!</p>
<h4>Learning to count: measuring your child's N-knower level</h4><iframe allowfullscreen frameborder="0" height="315" src="https://www.youtube.com/embed/i0q6H8MRXo8" width="560"></iframe>
<p><strong>Age range</strong>: 1 to 5 years</p>
<p><strong>What you'll need</strong>: At least ten small objects your child can pick up, like pegs or | |
<gh_stars>100-1000
"""Variables common across modules"""
# pylint: disable=too-many-lines
import os
import ssl
import string
import sys
import platform
import re
GAM_AUTHOR = '<NAME> <<EMAIL>>'
GAM_VERSION = '6.22'
GAM_LICENSE = 'Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)'
GAM_URL = 'https://jaylee.us/gam'
GAM_INFO = (
f'GAM {GAM_VERSION} - {GAM_URL} / {GAM_AUTHOR} / '
f'Python {platform.python_version()} {sys.version_info.releaselevel} / '
f'{platform.platform()} {platform.machine()}')
GAM_RELEASES = 'https://github.com/GAM-team/GAM/releases'
GAM_WIKI = 'https://github.com/GAM-team/GAM/wiki'
GAM_ALL_RELEASES = 'https://api.github.com/repos/GAM-team/GAM/releases'
GAM_LATEST_RELEASE = GAM_ALL_RELEASES + '/latest'
GAM_PROJECT_FILEPATH = 'https://raw.githubusercontent.com/GAM-team/GAM/master/src/'
true_values = ['on', 'yes', 'enabled', 'true', '1']
false_values = ['off', 'no', 'disabled', 'false', '0']
usergroup_types = [
'user', 'users', 'group', 'group_ns', 'group_susp', 'group_inde', 'ou',
'org', 'ou_ns', 'org_ns', 'ou_susp', 'org_susp', 'ou_and_children',
'ou_and_child', 'ou_and_children_ns', 'ou_and_child_ns',
'ou_and_children_susp', 'ou_and_child_susp', 'query', 'queries', 'license',
'licenses', 'licence', 'licences', 'file', 'csv', 'csvfile', 'all', 'cros',
'cros_sn', 'crosquery', 'crosqueries', 'crosfile', 'croscsv', 'croscsvfile'
]
ERROR_PREFIX = 'ERROR: '
WARNING_PREFIX = 'WARNING: '
UTF8 = 'utf-8'
UTF8_SIG = 'utf-8-sig'
FN_ENABLEDASA_TXT = 'enabledasa.txt'
FN_EXTRA_ARGS_TXT = 'extra-args.txt'
FN_LAST_UPDATE_CHECK_TXT = 'lastupdatecheck.txt'
MY_CUSTOMER = 'my_customer'
# See https://support.google.com/drive/answer/37603
MAX_GOOGLE_SHEET_CELLS = 10000000
MAX_LOCAL_GOOGLE_TIME_OFFSET = 30
SKUS = {
'1010010001': {
'product': '101001',
'aliases': ['identity', 'cloudidentity'],
'displayName': 'Cloud Identity'
},
'1010050001': {
'product': '101005',
'aliases': ['identitypremium', 'cloudidentitypremium'],
'displayName': 'Cloud Identity Premium'
},
'1010350001': {
'product': '101035',
'aliases': ['cloudsearch'],
'displayName': 'Google Cloud Search',
},
'1010310002': {
'product': '101031',
'aliases': ['gsefe', 'e4e', 'gsuiteenterpriseeducation'],
'displayName': 'Google Workspace for Education Plus - Legacy'
},
'1010310003': {
'product': '101031',
'aliases': ['gsefes', 'e4es', 'gsuiteenterpriseeducationstudent'],
'displayName': 'Google Workspace for Education Plus - Legacy (Student)'
},
'1010310005': {
'product': '101031',
'aliases': ['gwes', 'workspaceeducationstandard'],
'displayName': 'Google Workspace for Education Standard'
},
'1010310006': {
'product': '101031',
'aliases': ['gwesstaff', 'workspaceeducationstandardstaff'],
'displayName': 'Google Workspace for Education Standard (Staff)'
},
'1010310007': {
'product': '101031',
'aliases': ['gwesstudent', 'workspaceeducationstandardstudent'],
'displayName': 'Google Workspace for Education Standard (Extra Student)'
},
'1010310008': {
'product': '101031',
'aliases': ['gwep', 'workspaceeducationplus'],
'displayName': 'Google Workspace for Education Plus'
},
'1010310009': {
'product': '101031',
'aliases': ['gwepstaff', 'workspaceeducationplusstaff'],
'displayName': 'Google Workspace for Education Plus (Staff)'
},
'1010310010': {
'product': '101031',
'aliases': ['gwepstudent', 'workspaceeducationplusstudent'],
'displayName': 'Google Workspace for Education Plus (Extra Student)'
},
'1010330003': {
'product': '101033',
'aliases': ['gvstarter', 'voicestarter', 'googlevoicestarter'],
'displayName': 'Google Voice Starter'
},
'1010330004': {
'product': '101033',
'aliases': ['gvstandard', 'voicestandard', 'googlevoicestandard'],
'displayName': 'Google Voice Standard'
},
'1010330002': {
'product': '101033',
'aliases': ['gvpremier', 'voicepremier', 'googlevoicepremier'],
'displayName': 'Google Voice Premier'
},
'1010360001': {
'product': '101036',
'aliases': ['meetdialing','googlemeetglobaldialing'],
'displayName': 'Google Meet Global Dialing'
},
'1010370001': {
'product': '101037',
'aliases': ['gwetlu', 'workspaceeducationupgrade'],
'displayName': 'Google Workspace for Education: Teaching and Learning Upgrade'
},
'Google-Apps': {
'product': 'Google-Apps',
'aliases': ['standard', 'free'],
'displayName': 'G Suite Legacy'
},
'Google-Apps-For-Business': {
'product': 'Google-Apps',
'aliases': ['gafb', 'gafw', 'basic', 'gsuitebasic'],
'displayName': 'G Suite Basic'
},
'Google-Apps-For-Government': {
'product': 'Google-Apps',
'aliases': ['gafg', 'gsuitegovernment', 'gsuitegov'],
'displayName': 'G Suite Government'
},
'Google-Apps-For-Postini': {
'product': 'Google-Apps',
'aliases': [
'gams', 'postini', 'gsuitegams', 'gsuitepostini',
'gsuitemessagesecurity'
],
'displayName': 'G Suite Message Security'
},
'Google-Apps-Lite': {
'product': 'Google-Apps',
'aliases': ['gal', 'gsl', 'lite', 'gsuitelite'],
'displayName': 'G Suite Lite'
},
'Google-Apps-Unlimited': {
'product': 'Google-Apps',
'aliases': ['gau', 'gsb', 'unlimited', 'gsuitebusiness'],
'displayName': 'G Suite Business'
},
'1010020027': {
'product': 'Google-Apps',
'aliases': ['wsbizstart', 'workspacebusinessstarter'],
'displayName': 'Workspace Business Starter'
},
'1010020028': {
'product': 'Google-Apps',
'aliases': ['wsbizstan', 'workspacebusinessstandard'],
'displayName': 'Workspace Business Standard'
},
'1010020025': {
'product': 'Google-Apps',
'aliases': ['wsbizplus', 'workspacebusinessplus'],
'displayName': 'Workspace Business Plus'
},
'1010060001': {
'product': 'Google-Apps',
'aliases': [
'gsuiteessentials', 'essentials', 'd4e', 'driveenterprise',
'drive4enterprise', 'wsess', 'workspaceesentials'
],
'displayName': 'Google Workspace Essentials'
},
'1010060003': {
'product': 'Google-Apps',
'aliases': ['wsentess', 'workspaceenterpriseessentials'],
'displayName': 'Workspace Enterprise Essentials'
},
'1010020026': {
'product': 'Google-Apps',
'aliases': ['wsentstan', 'workspaceenterprisestandard'],
'displayName': 'Workspace Enterprise Standard'
},
'1010020020': {
'product': 'Google-Apps',
'aliases': ['gae', 'gse', 'enterprise', 'gsuiteenterprise',
'wsentplus', 'workspaceenterpriseplus'],
'displayName': 'Workspace Enterprise Plus'
},
'1010020029': {
'product': 'Google-Apps',
'aliases': ['wes', 'workspaceenterprisestarter'],
'displayName': 'Workspace Enterprise Starter'
},
'1010020030': {
'product': 'Google-Apps',
'aliases': ['workspacefrontline', 'workspacefrontlineworker'],
'displayName': 'Workspace Frontline'
},
'1010340002': {
'product': '101034',
'aliases': ['gsbau', 'businessarchived', 'gsuitebusinessarchived'],
'displayName': 'G Suite Business Archived'
},
'1010340001': {
'product': '101034',
'aliases': ['gseau', 'enterprisearchived', 'gsuiteenterprisearchived'],
'displayName': 'Google Workspace Enterprise Plus Archived'
},
'Google-Drive-storage-20GB': {
'product': 'Google-Drive-storage',
'aliases': ['drive20gb', '20gb', 'googledrivestorage20gb'],
'displayName': 'Google Drive Storage 20GB'
},
'Google-Drive-storage-50GB': {
'product': 'Google-Drive-storage',
'aliases': ['drive50gb', '50gb', 'googledrivestorage50gb'],
'displayName': 'Google Drive Storage 50GB'
},
'Google-Drive-storage-200GB': {
'product': 'Google-Drive-storage',
'aliases': ['drive200gb', '200gb', 'googledrivestorage200gb'],
'displayName': 'Google Drive Storage 200GB'
},
'Google-Drive-storage-400GB': {
'product': 'Google-Drive-storage',
'aliases': ['drive400gb', '400gb', 'googledrivestorage400gb'],
'displayName': 'Google Drive Storage 400GB'
},
'Google-Drive-storage-1TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive1tb', '1tb', 'googledrivestorage1tb'],
'displayName': 'Google Drive Storage 1TB'
},
'Google-Drive-storage-2TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive2tb', '2tb', 'googledrivestorage2tb'],
'displayName': 'Google Drive Storage 2TB'
},
'Google-Drive-storage-4TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive4tb', '4tb', 'googledrivestorage4tb'],
'displayName': 'Google Drive Storage 4TB'
},
'Google-Drive-storage-8TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive8tb', '8tb', 'googledrivestorage8tb'],
'displayName': 'Google Drive Storage 8TB'
},
'Google-Drive-storage-16TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive16tb', '16tb', 'googledrivestorage16tb'],
'displayName': 'Google Drive Storage 16TB'
},
'Google-Vault': {
'product': 'Google-Vault',
'aliases': ['vault', 'googlevault'],
'displayName': 'Google Vault'
},
'Google-Vault-Former-Employee': {
'product': 'Google-Vault',
'aliases': ['vfe', 'googlevaultformeremployee'],
'displayName': 'Google Vault Former Employee'
},
'Google-Chrome-Device-Management': {
'product': 'Google-Chrome-Device-Management',
'aliases': ['chrome', 'cdm', 'googlechromedevicemanagement'],
'displayName': 'Google Chrome Device Management'
}
}
PRODUCTID_NAME_MAPPINGS = {
'101001': 'Cloud Identity Free',
'101005': 'Cloud Identity Premium',
'101031': 'G Suite Workspace for Education',
'101033': 'Google Voice',
'101034': 'G Suite Archived',
'101035': 'Cloud Search',
'101036': 'Google Meet Global Dialing',
'101037': 'G Suite Workspace for Education',
'Google-Apps': 'Google Workspace',
'Google-Chrome-Device-Management': 'Google Chrome Device Management',
'Google-Drive-storage': 'Google Drive Storage',
'Google-Vault': 'Google Vault',
}
# Legacy APIs that use v1 discovery. Newer APIs should all use v2.
V1_DISCOVERY_APIS = {
'drive',
'oauth2',
}
API_NAME_MAPPING = {
'directory': 'admin',
'directory_beta': 'admin',
'reports': 'admin',
'datatransfer': 'admin',
'drive3': 'drive',
'calendar': 'calendar-json',
'cloudidentity_beta': 'cloudidentity',
}
API_VER_MAPPING = {
'accesscontextmanager': 'v1',
'alertcenter': 'v1beta1',
'driveactivity': 'v2',
'calendar': 'v3',
'cbcm': 'v1.1beta1',
'chromemanagement': 'v1',
'chromepolicy': 'v1',
'classroom': 'v1',
'cloudidentity': 'v1',
'cloudidentity_beta': 'v1beta1',
'cloudresourcemanager': 'v3',
'contactdelegation': 'v1',
'datatransfer': 'datatransfer_v1',
'directory': 'directory_v1',
'directory_beta': 'directory_v1.1beta1',
'drive': 'v2',
'drive3': 'v3',
'gmail': 'v1',
'groupssettings': 'v1',
'iam': 'v1',
'iap': 'v1',
'licensing': 'v1',
'oauth2': 'v2',
'pubsub': 'v1',
'reports': 'reports_v1',
'reseller': 'v1',
'servicemanagement': 'v1',
'serviceusage': 'v1',
'sheets': 'v4',
'siteVerification': 'v1',
'storage': 'v1',
'vault': 'v1',
'versionhistory': 'v1',
}
USERINFO_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
API_SCOPE_MAPPING = {
'alertcenter': ['https://www.googleapis.com/auth/apps.alerts',],
'driveactivity': [
'https://www.googleapis.com/auth/drive.activity',
'https://www.googleapis.com/auth/drive',
],
'calendar': ['https://www.googleapis.com/auth/calendar',],
'cloudidentity': ['https://www.googleapis.com/auth/cloud-identity'],
'cloudidentity_beta': ['https://www.googleapis.com/auth/cloud-identity'],
'drive': ['https://www.googleapis.com/auth/drive',],
'drive3': ['https://www.googleapis.com/auth/drive',],
'gmail': [
'https://mail.google.com/',
'https://www.googleapis.com/auth/gmail.settings.basic',
'https://www.googleapis.com/auth/gmail.settings.sharing',
],
'sheets': ['https://www.googleapis.com/auth/spreadsheets',],
}
ADDRESS_FIELDS_PRINT_ORDER = [
'contactName',
'organizationName',
'addressLine1',
'addressLine2',
'addressLine3',
'locality',
'region',
'postalCode',
'countryCode',
]
ADDRESS_FIELDS_ARGUMENT_MAP = {
'contact': 'contactName',
'contactname': 'contactName',
'name': 'organizationName',
'organizationname': 'organizationName',
'address': 'addressLine1',
'address1': 'addressLine1',
'addressline1': 'addressLine1',
'address2': 'addressLine2',
'addressline2': 'addressLine2',
'address3': 'addressLine3',
'addressline3': 'addressLine3',
'city': 'locality',
'locality': 'locality',
'state': 'region',
'region': 'region',
'zipcode': 'postalCode',
'postal': 'postalCode',
'postalcode': 'postalCode',
'country': 'countryCode',
'countrycode': 'countryCode',
}
SERVICE_NAME_TO_ID_MAP = {
'Calendar': '435070579839',
'Currents': '553547912911',
'Drive and Docs': '55656082996',
'Google Data Studio': '810260081642',
}
SERVICE_NAME_CHOICES_MAP = {
'calendar': 'Calendar',
'currents': 'Currents',
'datastudio': 'Google Data Studio',
'google data studio': 'Google Data Studio',
'drive': 'Drive and Docs',
'drive and docs': 'Drive and Docs',
'googledrive': 'Drive and Docs',
'gdrive': 'Drive and Docs',
}
PRINTJOB_ASCENDINGORDER_MAP = {
'createtime': 'CREATE_TIME',
'status': 'STATUS',
'title': 'TITLE',
}
PRINTJOB_DESCENDINGORDER_MAP = {
'CREATE_TIME': 'CREATE_TIME_DESC',
'STATUS': 'STATUS_DESC',
'TITLE': 'TITLE_DESC',
}
PRINTJOBS_DEFAULT_JOB_LIMIT = 0
PRINTJOBS_DEFAULT_MAX_RESULTS = 100
CALENDAR_REMINDER_METHODS = [
'email',
'sms',
'popup',
]
CALENDAR_NOTIFICATION_METHODS = [
'email',
'sms',
]
CALENDAR_NOTIFICATION_TYPES_MAP = {
'eventcreation': 'eventCreation',
'eventchange': 'eventChange',
'eventcancellation': 'eventCancellation',
'eventresponse': 'eventResponse',
'agenda': 'agenda',
}
DEVICE_ORDERBY_CHOICES_MAP = {
'createtime': 'create_time',
'devicetype': 'device_type',
'lastsynctime': 'last_sync_time',
'model': 'model',
'osversion': 'os_version',
'serialnumber': 'serial_number'
}
DRIVEFILE_FIELDS_CHOICES_MAP = {
'alternatelink': 'alternateLink',
'appdatacontents': 'appDataContents',
'cancomment': 'canComment',
'canreadrevisions': 'canReadRevisions',
'contentrestrictions': 'contentRestrictions',
'copyable': 'copyable',
'copyrequireswriterpermission': 'copyRequiresWriterPermission',
'createddate': 'createdDate',
'createdtime': 'createdDate',
'description': 'description',
'driveid': 'driveId',
'editable': 'editable',
'explicitlytrashed': 'explicitlyTrashed',
'fileextension': 'fileExtension',
'filesize': 'fileSize',
'foldercolorrgb': 'folderColorRgb',
'fullfileextension': 'fullFileExtension',
'headrevisionid': 'headRevisionId',
'iconlink': 'iconLink',
'id': 'id',
'lastmodifyinguser': 'lastModifyingUser',
'lastmodifyingusername': 'lastModifyingUserName',
'lastviewedbyme': 'lastViewedByMeDate',
'lastviewedbymedate': 'lastViewedByMeDate',
'lastviewedbymetime': 'lastViewedByMeDate',
'lastviewedbyuser': 'lastViewedByMeDate',
'linksharemetadata': 'linkShareMetadata',
'md5': 'md5Checksum',
'md5checksum': 'md5Checksum',
'md5sum': 'md5Checksum',
'mime': 'mimeType',
'mimetype': 'mimeType',
'modifiedbyme': 'modifiedByMeDate',
'modifiedbymedate': 'modifiedByMeDate',
'modifiedbymetime': 'modifiedByMeDate',
'modifiedbyuser': 'modifiedByMeDate',
'modifieddate': 'modifiedDate',
'modifiedtime': 'modifiedDate',
'name': 'title',
'originalfilename': 'originalFilename',
'ownedbyme': 'ownedByMe',
'ownernames': 'ownerNames',
'owners': 'owners',
'parents': 'parents',
'permissions': 'permissions',
'resourcekey': 'resourceKey',
'quotabytesused': 'quotaBytesUsed',
'quotaused': 'quotaBytesUsed',
'shareable': 'shareable',
'shared': 'shared',
'sharedwithmedate': 'sharedWithMeDate',
'sharedwithmetime': 'sharedWithMeDate',
'sharinguser': 'sharingUser',
'shortcutdetails': 'shortcutDetails',
'spaces': 'spaces',
'thumbnaillink': 'thumbnailLink',
'title': 'title',
'userpermission': 'userPermission',
'version': 'version',
'viewedbyme': 'labels(viewed)',
'viewedbymedate': 'lastViewedByMeDate',
'viewedbymetime': 'lastViewedByMeDate',
'viewerscancopycontent': 'labels(restricted)',
'webcontentlink': 'webContentLink',
'webviewlink': 'webViewLink',
'writerscanshare': 'writersCanShare',
}
DRIVEFILE_LABEL_CHOICES_MAP = {
'restricted': 'restricted',
'restrict': 'restricted',
'starred': 'starred',
'star': 'starred',
'trashed': 'trashed',
'trash': 'trashed',
'viewed': 'viewed',
'view': 'viewed',
}
DRIVEFILE_ORDERBY_CHOICES_MAP = {
'createddate': 'createdDate',
'folder': 'folder',
'lastviewedbyme': 'lastViewedByMeDate',
'lastviewedbymedate': 'lastViewedByMeDate',
'lastviewedbyuser': 'lastViewedByMeDate',
'modifiedbyme': 'modifiedByMeDate',
'modifiedbymedate': 'modifiedByMeDate',
'modifiedbyuser': 'modifiedByMeDate',
'modifieddate': 'modifiedDate',
'name': 'title',
'quotabytesused': 'quotaBytesUsed',
'quotaused': 'quotaBytesUsed',
'recency': 'recency',
'sharedwithmedate': 'sharedWithMeDate',
'starred': 'starred',
'title': 'title',
'viewedbymedate': 'lastViewedByMeDate',
}
DELETE_DRIVEFILE_FUNCTION_TO_ACTION_MAP = {
'delete': | |
#!/usr/bin/env python3
# *******************************************************
# Copyright (c) VMware, Inc. 2020. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Definition of the CBCloudAPI object, the core object for interacting with the Carbon Black Cloud SDK."""
from cbc_sdk.connection import BaseAPI
from cbc_sdk.errors import ApiError, CredentialError, ServerError
from cbc_sdk.live_response_api import LiveResponseSessionManager
from cbc_sdk.audit_remediation import Run, RunHistory
from cbc_sdk.enterprise_edr.threat_intelligence import ReportSeverity
import logging
import time
from concurrent.futures import ThreadPoolExecutor
log = logging.getLogger(__name__)
class CBCloudAPI(BaseAPI):
"""The main entry point into the CBCloudAPI.
:param str profile: (optional) Use the credentials in the named profile when connecting to the Carbon Black server.
Uses the profile named 'default' when not specified.
Usage::
>>> from cbc_sdk import CBCloudAPI
>>> cb = CBCloudAPI(profile="production")
"""
def __init__(self, *args, **kwargs):
super(CBCloudAPI, self).__init__(*args, **kwargs)
self._thread_pool_count = kwargs.pop('thread_pool_count', 1)
self._lr_scheduler = None
self._async_executor = None
if not self.credentials.org_key:
raise CredentialError("No organization key specified")
def _perform_query(self, cls, **kwargs):
if hasattr(cls, "_query_implementation"):
return cls._query_implementation(self, **kwargs)
else:
raise ApiError("All Carbon Black Cloud models must provide _query_implementation")
# ---- Async
def _async_submit(self, callable, *args, **kwargs):
"""
Submit a task to the executor, creating it if it doesn't yet exist.
Args:
callable (func): A callable to be executed as a background task.
*args (list): Arguments to be passed to the callable.
**kwargs (dict): Keyword arguments to be passed to the callable.
Returns:
Future: A future object representing the background task, which will pass along the result.
"""
if not self._async_executor:
self._async_executor = ThreadPoolExecutor(max_workers=self._thread_pool_count)
return self._async_executor.submit(callable, args, kwargs)
# ---- LiveOps
@property
def live_response(self):
if self._lr_scheduler is None:
self._lr_scheduler = LiveResponseSessionManager(self)
return self._lr_scheduler
def _request_lr_session(self, sensor_id):
return self.live_response.request_session(sensor_id)
# ---- Audit and Remediation
def audit_remediation(self, sql):
return self.select(Run).where(sql=sql)
def audit_remediation_history(self, query=None):
return self.select(RunHistory).where(query)
# ---- Notifications
def notification_listener(self, interval=60):
"""Generator to continually poll the Cb Endpoint Standard server for notifications (alerts). Note that this can only
be used with a 'SIEM' key generated in the Cb Endpoint Standard console.
"""
while True:
for notification in self.get_notifications():
yield notification
time.sleep(interval)
def get_notifications(self):
"""Retrieve queued notifications (alerts) from the Cb Endpoint Standard server. Note that this can only be used
with a 'SIEM' key generated in the Cb Endpoint Standard console.
:returns: list of dictionary objects representing the notifications, or an empty list if none available.
"""
res = self.get_object("/integrationServices/v3/notification")
return res.get("notifications", [])
# ---- Device API
def _raw_device_action(self, request):
"""
Invokes the API method for a device action.
:param dict request: The request body to be passed as JSON to the API method.
:return: The parsed JSON output from the request.
:raises ServerError: If the API method returns an HTTP error code.
"""
url = "/appservices/v6/orgs/{0}/device_actions".format(self.credentials.org_key)
resp = self.post_object(url, body=request)
if resp.status_code == 200:
return resp.json()
elif resp.status_code == 204:
return None
else:
raise ServerError(error_code=resp.status_code, message="Device action error: {0}".format(resp.content))
def _device_action(self, device_ids, action_type, options=None):
"""
Executes a device action on multiple device IDs.
:param list device_ids: The list of device IDs to execute the action on.
:param str action_type: The action type to be performed.
:param dict options: Options for the bulk device action. Default None.
"""
request = {"action_type": action_type, "device_id": device_ids}
if options:
request["options"] = options
return self._raw_device_action(request)
def _action_toggle(self, flag):
"""
Converts a boolean flag value into a "toggle" option.
:param boolean flag: The value to be converted.
:return: A dict containing the appropriate "toggle" element.
"""
if flag:
return {"toggle": "ON"}
else:
return {"toggle": "OFF"}
def device_background_scan(self, device_ids, scan):
"""
Set the background scan option for the specified devices.
:param list device_ids: List of IDs of devices to be set.
:param boolean scan: True to turn background scan on, False to turn it off.
"""
return self._device_action(device_ids, "BACKGROUND_SCAN", self._action_toggle(scan))
def device_bypass(self, device_ids, enable):
"""
Set the bypass option for the specified devices.
:param list device_ids: List of IDs of devices to be set.
:param boolean enable: True to enable bypass, False to disable it.
"""
return self._device_action(device_ids, "BYPASS", self._action_toggle(enable))
def device_delete_sensor(self, device_ids):
"""
Delete the specified sensor devices.
:param list device_ids: List of IDs of devices to be deleted.
"""
return self._device_action(device_ids, "DELETE_SENSOR")
def device_uninstall_sensor(self, device_ids):
"""
Uninstall the specified sensor devices.
:param list device_ids: List of IDs of devices to be uninstalled.
"""
return self._device_action(device_ids, "UNINSTALL_SENSOR")
def device_quarantine(self, device_ids, enable):
"""
Set the quarantine option for the specified devices.
:param list device_ids: List of IDs of devices to be set.
:param boolean enable: True to enable quarantine, False to disable it.
"""
return self._device_action(device_ids, "QUARANTINE", self._action_toggle(enable))
def device_update_policy(self, device_ids, policy_id):
"""
Set the current policy for the specified devices.
:param list device_ids: List of IDs of devices to be changed.
:param int policy_id: ID of the policy to set for the devices.
"""
return self._device_action(device_ids, "UPDATE_POLICY", {"policy_id": policy_id})
def device_update_sensor_version(self, device_ids, sensor_version):
"""
Update the sensor version for the specified devices.
:param list device_ids: List of IDs of devices to be changed.
:param dict sensor_version: New version properties for the sensor.
"""
return self._device_action(device_ids, "UPDATE_SENSOR_VERSION", {"sensor_version": sensor_version})
# ---- Alerts API
def alert_search_suggestions(self, query):
"""
Returns suggestions for keys and field values that can be used in a search.
:param query str: A search query to use.
:return: A list of search suggestions expressed as dict objects.
"""
query_params = {"suggest.q": query}
url = "/appservices/v6/orgs/{0}/alerts/search_suggestions".format(self.credentials.org_key)
output = self.get_object(url, query_params)
return output["suggestions"]
def _bulk_threat_update_status(self, threat_ids, status, remediation, comment):
"""
Update the status of alerts associated with multiple threat IDs, past and future.
:param list threat_ids: List of string threat IDs.
:param str status: The status to set for all alerts, either "OPEN" or "DISMISSED".
:param str remediation: The remediation state to set for all alerts.
:param str comment: The comment to set for all alerts.
"""
if not all(isinstance(t, str) for t in threat_ids):
raise ApiError("One or more invalid threat ID values")
request = {"state": status, "threat_id": threat_ids}
if remediation is not None:
request["remediation_state"] = remediation
if comment is not None:
request["comment"] = comment
url = "/appservices/v6/orgs/{0}/threat/workflow/_criteria".format(self.credentials.org_key)
resp = self.post_object(url, body=request)
output = resp.json()
return output["request_id"]
def bulk_threat_update(self, threat_ids, remediation=None, comment=None):
"""
Update the alert status of alerts associated with multiple threat IDs.
The alerts will be left in an OPEN state after this request.
:param threat_ids list: List of string threat IDs.
:param remediation str: The remediation state to set for all alerts.
:param comment str: The comment to set for all alerts.
:return: The request ID, which may be used to select a WorkflowStatus object.
"""
return self._bulk_threat_update_status(threat_ids, "OPEN", remediation, comment)
def bulk_threat_dismiss(self, threat_ids, remediation=None, comment=None):
"""
Dismiss the alerts associated with multiple threat IDs.
The alerts will be left in a DISMISSED state after this request.
:param threat_ids list: List of string threat IDs.
:param remediation str: The remediation state to set for all alerts.
:param comment str: The comment to set for all alerts.
:return: The request ID, which may be used to select a WorkflowStatus object.
"""
return self._bulk_threat_update_status(threat_ids, "DISMISSED", remediation, comment)
# ---- Enterprise EDR
def create(self, cls, data=None):
"""Creates a new model.
>>> feed = cb.create(Feed, feed_data)
:param cls: The model being created
:param data: The data to pre-populate the model with
:type data: dict(str, object)
:return: an instance of `cls`
"""
return cls(self, initial_data=data)
def validate_process_query(self, query):
"""Validates the given IOC query.
>>> cb.validate_query("process_name:chrome.exe") # True
:param str query: the query to validate
:return: whether or not the query is valid
:rtype: bool
"""
args = {"q": query}
url = "/api/investigate/v1/orgs/{}/processes/search_validation".format(
self.credentials.org_key
)
resp = self.get_object(url, query_parameters=args)
return resp.get("valid", False)
def convert_feed_query(self, query):
"""Converts a legacy CB Response query to a ThreatHunter query.
:param str query: the query to convert
:return: the converted query
:rtype: str
"""
args = {"query": query}
resp = self.post_object("/threathunter/feedmgr/v2/query/translate", args).json()
| |
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2c']
a = dset['res4f_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2c']
a = dset['bn4f_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4g_branch2a']
a = dset['res4g_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2a']
a = dset['bn4g_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2b']
a = dset['res4g_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2b']
a = dset['bn4g_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2c']
a = dset['res4g_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2c']
a = dset['bn4g_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4h_branch2a']
a = dset['res4h_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2a']
a = dset['bn4h_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2b']
a = dset['res4h_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2b']
a = dset['bn4h_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2c']
a = dset['res4h_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2c']
a = dset['bn4h_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4i_branch2a']
a = dset['res4i_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2a']
a = dset['bn4i_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2b']
a = dset['res4i_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2b']
a = dset['bn4i_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2c']
a = dset['res4i_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2c']
a = dset['bn4i_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4j_branch2a']
a = dset['res4j_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2a']
a = dset['bn4j_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2b']
a = dset['res4j_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2b']
a = dset['bn4j_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2c']
a = dset['res4j_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2c']
a = dset['bn4j_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4k_branch2a']
a = dset['res4k_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2a']
a = dset['bn4k_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2b']
a = dset['res4k_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2b']
a = dset['bn4k_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2c']
a = dset['res4k_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2c']
a = dset['bn4k_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4l_branch2a']
a = dset['res4l_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2a']
a = dset['bn4l_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2b']
a = dset['res4l_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2b']
a = dset['bn4l_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2c']
a = dset['res4l_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2c']
a = dset['bn4l_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4m_branch2a']
a = dset['res4m_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2a']
a = dset['bn4m_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2b']
a = dset['res4m_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2b']
a = dset['bn4m_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], | |
<reponame>nikunjpansari/stochastic-optimization
"""
Clinical Trials Policy class
<NAME> (c) 2018
Adapted from code by <NAME> (c) 2018
"""
from collections import namedtuple
import numpy as np
from scipy.stats import binom
import scipy
import math
import pandas as pd
import copy
from ClinicalTrialsModel import ClinicalTrialsModel
import time
def trunc_poisson_fn(count, mean):
"""
returns list of truncated Poisson distribution with given mean and values count
:param count: int - maximal value considered by the distribution
:param mean: float - mean of Poisson distribution
:return list(float) - vector of truncated Poisson pmfs
"""
trunc_probs = []
sum = 0.0
for r in range(0, count):
trunc_probs.insert(r, 1/math.factorial(r)*(mean**r)*np.exp(-mean))
sum += trunc_probs[r]
trunc_probs.insert(count, 1-sum)
return trunc_probs
class ClinicalTrialsPolicy():
"""
Base class for decision policy
"""
def __init__(self, model, policy_names):
"""
initializes the policy
:param model: the ClinicalTrialsModel that the policy is being implemented on
:param policy_names: list(str) - list of policies
"""
self.model = model
self.policy_names = policy_names
self.Policy = namedtuple('Policy', policy_names)
def build_policy(self, info):
"""
builds the policies depending on the parameters provided
:param info: dict - contains all policy information
:return: namedtuple - a policy object
"""
return self.Policy(*[info[k] for k in self.policy_names])
def model_A_policy(self, state, info_tuple):
"""
implements deterministic lookahead policy based on Model A
:param state: namedtuple - the state of the model at a given time
:param info_tuple: tuple - contains the parameters needed to run the policy
:return: a decision made based on the policy
"""
success_A = info_tuple[0]
stop_A = info_tuple[1]
sim_model = ClinicalTrialsModel(self.model.state_variables, self.model.decision_variables, self.model.initial_state, True)
sim_model.state = copy.deepcopy(state)
if stop_A==False:
value_dict={}
sol_dict,value_dict = model_A_value_fn(sim_model, 0, success_A,value_dict)
new_decision = sol_dict['optimal_enroll']
else:
new_decision=0
return new_decision
def model_B_policy(self, state, info_tuple):
"""
implements lookahead policy based on Model B
:param state: namedtuple - the state of the model at a given time
:param info_tuple: tuple - contains the parameters needed to run the policy
:return: a decision made based on the policy
"""
success_B = info_tuple[0]
stop_B = info_tuple[1]
sim_model = ClinicalTrialsModel(self.model.state_variables, self.model.decision_variables, self.model.initial_state, True)
sim_model.state = copy.deepcopy(state)
if stop_B==False:
value_dict={}
sol_dict,value_dict = model_B_value_fn(sim_model, 0, success_B,value_dict)
new_decision = sol_dict['optimal_enroll']
else:
new_decision=0
return new_decision
def model_C_extension_policy(self, state, info_tuple):
"""
implements lookahead policy based on the extension of Model C
:param state: namedtuple - the state of the model at a given time
:param info_tuple: tuple - contains the parameters needed to run the policy
:return: a decision made based on the policy
"""
success_C_extension = info_tuple[0]
stop_C_extension = info_tuple[1]
sim_model = ClinicalTrialsModel(self.model.state_variables, self.model.decision_variables, self.model.initial_state, True)
sim_model.state = copy.deepcopy(state)
if stop_C_extension==False:
value_dict={}
sol_dict,value_dict = model_C_extension_value_fn(sim_model, 0, success_C_extension,value_dict)
new_decision = sol_dict['optimal_enroll']
else:
new_decision=0
return new_decision
def model_C_policy(self, state, info_tuple, time):
"""
implements hybrid policy for Model C using backward ADP
:param state: namedtuple - the state of the model at a given time
:param info_tuple: tuple - contains the parameters needed to run the policy
:param time: int - start time
:return: a decision made based on the policy
"""
success_C = info_tuple[0]
stop_C = info_tuple[1]
sim_model = ClinicalTrialsModel(self.model.state_variables, self.model.decision_variables, self.model.initial_state, True)
sim_model.state = copy.deepcopy(state)
parameters = parameters_fn(sim_model)
if stop_C == True: new_decision = 0
else:
vals = []
decs = []
for x_enroll in range(self.model.initial_state['enroll_min'], self.model.initial_state['enroll_max']+self.model.initial_state['enroll_step'], self.model.initial_state['enroll_step']):
pseudo_state = [state.potential_pop + x_enroll, state.success, state.failure, state.l_response]
if len(parameters[time]) < 8:
value = func_simple(pseudo_state, parameters[time][0], parameters[time][1], parameters[time][2], parameters[time][3])
else:
value = func(pseudo_state, parameters[time][0], parameters[time][1], parameters[time][2], parameters[time][3], parameters[time][4], parameters[time][5], parameters[time][6], parameters[time][7])
cost = -(self.model.initial_state['program_cost'] + self.model.initial_state['patient_cost'] * x_enroll)
vals.append(value + cost)
decs.append(x_enroll)
val_max = max(vals)
new_decision = decs[vals.index(val_max)]
return new_decision
def run_policy(self, policy_info, policy, t):
"""
runs the model with a selected policy
:param policy_info: dict - dictionary of policies and their associated parameters
:param policy: str - the name of the chosen policy
:param time: int - start time
:return: float - calculated contribution
"""
time_run = time.time()
model_copy = copy.deepcopy(self.model)
while t <= model_copy.initial_state['trial_size'] and policy_info[policy][1] == False:
time_t = time.time()
# build decision policy
p = self.build_policy(policy_info)
# implements sampled distribution for p_true
p_true_samples = np.random.beta(model_copy.state.success, model_copy.state.failure, model_copy.initial_state['K'])
p_belief = model_copy.state.success / (model_copy.state.success + model_copy.state.failure)
# drug_success = 1 if successful, 0 if failure, -1 if continue trial (for all policies)
if p_belief > model_copy.initial_state['theta_stop_high']:
decision = {'prog_continue': 0, 'drug_success': 1}
policy_info[policy][1] = True
elif p_belief < model_copy.initial_state['theta_stop_low']:
decision = {'prog_continue': 0, 'drug_success': 0}
policy_info[policy][1] = True
else:
decision = {'prog_continue': 1, 'drug_success': -1}
# makes enrollment decision based on chosen policy
if policy == "model_A":
decision['enroll'] = self.model_A_policy(model_copy.state, p.model_A)
elif policy == "model_B":
decision['enroll'] = self.model_B_policy(model_copy.state, p.model_B)
elif policy == "model_C_extension":
decision['enroll'] = self.model_C_extension_policy(model_copy.state, p.model_C_extension)
elif policy == "model_C":
decision['enroll'] = self.model_C_policy(model_copy.state, p.model_C, t)
x = model_copy.build_decision(decision)
print("Base Model t={}, obj={:,}, state.potential_pop={}, state.success={}, state.failure={}, x={}, elapsed time={:.2f} sec".format(t, model_copy.objective,
model_copy.state.potential_pop,
model_copy.state.success,
model_copy.state.failure, x,time.time()-time_t))
# steps the model forward one iteration
model_copy.step(x)
# updates policy info
policy_info[policy][0] = decision['drug_success']
# increments time
t += 1
print("Base Model: Stopping time t={}, obj(revenue)={:,}, rhobar={:.2f}, Elapsed time={:.2f} sec".format(t, model_copy.objective, model_copy.state.success/(model_copy.state.success+model_copy.state.failure),time.time()-time_run))
policy_value = model_copy.objective
return policy_value
def model_A_value_fn(model, iteration, success_index,value_dict):
"""
solves the deterministic shortest path problem for Model A (over given horizon);
returns the value of the current state and its optimal number of new potential patients to enroll
:param model: ClinicalTrialsModel - model which contains all state variables (physical and belief states)
:param iteration: int - tracks the horizon in the deteministic shortest path problem
:param success_index: int - 1 if drug is declared successful, 0 if failure, -1 if continue trial
:return: value of current node and its optimal enrollment count
"""
# computes value and optimal enrollments corresponding to current state
if success_index == -1:
if iteration < model.initial_state['H']:
bellman_vals = []
bellman_decisions = []
for x_enroll in range(model.initial_state['enroll_min'], model.initial_state['enroll_max']+model.initial_state['enroll_step'], model.initial_state['enroll_step']):
bellman_potential_pop = model.state.potential_pop + x_enroll
#print("Starting bellman - ite: {}, R: {}, x_enroll: {}, R_t+1: {}".format(iteration, model.state.potential_pop,x_enroll,bellman_potential_pop))
bellman_cost = -(model.initial_state['program_cost'] + model.initial_state['patient_cost'] * x_enroll)
bellman_state = copy.deepcopy(model.initial_state)
bellman_state['potential_pop'] = bellman_potential_pop
bellman_M = ClinicalTrialsModel(model.state_variables, model.decision_variables, bellman_state, True)
value_key=(iteration+1,bellman_state['potential_pop'])
count=-1
# the drug success probability stays fixed
bellman_p_belief = bellman_M.state.success / (bellman_M.state.success + bellman_M.state.failure)
if bellman_p_belief > bellman_M.initial_state['theta_stop_high']:
success_index = 1
step_value = model.initial_state['success_rev']
elif bellman_p_belief < bellman_M.initial_state['theta_stop_low']:
success_index = 0
step_value = 0
else:
if value_key in value_dict:
step_value = value_dict[value_key][0]
count = value_dict[value_key][1]
#print("key: {} value: {:.2f} count: {} lendict:{}".format(value_key,step_value,count,len(value_dict)))
else:
sol_dict,value_dict = model_A_value_fn(bellman_M, iteration+1, success_index,value_dict)
step_value = sol_dict['value']
value_dict.update({value_key:[step_value,count+1]})
bellman_cost += step_value
bellman_decisions.append(x_enroll)
bellman_vals.append(bellman_cost)
#print("Ending - ite: {}, R: {}, x_enroll: {}, R_t+1: {}, Cost: {}".format(iteration, model.state.potential_pop,x_enroll,bellman_potential_pop,bellman_cost))
value = max(bellman_vals)
optimal_enroll = bellman_decisions[bellman_vals.index(value)]
#print("********Ending State- ite: {}, R: {}, arg_max: {}, opt_value {} ".format(iteration, model.state.potential_pop,optimal_enroll,value))
return {"value": value,
"optimal_enroll": optimal_enroll},value_dict
# stops iterating at horizon t' = t + H
else:
return {"value": 0,"optimal_enroll": 0},value_dict
# stops experiment at node if drug is declared success or failure
else: return {"value": model.initial_state['success_rev'] * success_index,
"optimal_enroll": 0},value_dict
def model_B_value_fn(model, iteration, success_index,value_dict):
"""
solves the stochastic lookahead problem for Model B (over given horizon);
returns the value of the current state and its optimal number of new potential patients to enroll
:param model: ClinicalTrialsModel - model which contains all state variables (physical and belief states)
:param iteration: int - tracks the horizon in the stochastic lookahead problem
:param success_index: int - 1 if drug is declared successful, 0 if failure, -1 if continue trial
:return: value of current node and its optimal enrollment count
"""
# computes value and optimal enrollments corresponding to current state
if success_index == -1:
if iteration < model.initial_state['H']:
bellman_vals = []
bellman_decisions = []
for x_enroll in range(model.initial_state['enroll_min'], model.initial_state['enroll_max']+model.initial_state['enroll_step'], model.initial_state['enroll_step']):
# "simulated" exogenous info that helps us get from (t, t') to (t, t'+1)
bellman_potential_pop = model.state.potential_pop + x_enroll
bellman_enrollments = math.floor(model.state.l_response * bellman_potential_pop)
bellman_cost = -(model.initial_state['program_cost'] + model.initial_state['patient_cost'] * x_enroll)
# loops over success values in increments of step_succ
step_succ = int(bellman_enrollments / 3) + 1
for set_succ in range(0, bellman_enrollments, step_succ):
bellman_state = copy.deepcopy(model.initial_state)
bellman_state['potential_pop'] = bellman_potential_pop
bellman_state['success'] = model.state.success + set_succ
bellman_state['failure'] = model.state.failure + (bellman_enrollments - set_succ)
bellman_M = ClinicalTrialsModel(model.state_variables, model.decision_variables, bellman_state, True)
value_key=(iteration+1,bellman_state['potential_pop'],bellman_state['success'],bellman_state['failure']) #Remember to include here bellman_state['l_response'] when solving question 6
count=-1
# implements sampled distribution for bellman_p_true
bellman_p_samples = np.random.beta(bellman_M.state.success, bellman_M.state.failure, bellman_M.initial_state['K'])
bellman_p_belief = bellman_M.state.success / (bellman_M.state.success + bellman_M.state.failure)
if bellman_p_belief > bellman_M.initial_state['theta_stop_high']:
success_index = 1
step_value = model.initial_state['success_rev']
#print("LA State: {}, ({}, {}), {} - Stopping time {}".format(bellman_state['potential_pop'],bellman_state['success'],bellman_state['failure'],model.state.l_response,iteration))
elif bellman_p_belief < bellman_M.initial_state['theta_stop_low']:
success_index = 0
step_value = 0
else:
if value_key in value_dict:
step_value = value_dict[value_key][0]
count = value_dict[value_key][1]
#print("key: {} value: {:.2f} count: {} lendict:{}".format(value_key,step_value,count,len(value_dict)))
else:
sol_dict,value_dict = model_B_value_fn(bellman_M, iteration+1, success_index,value_dict)
step_value = sol_dict['value']
value_dict.update({value_key:[step_value,count+1]})
for k in range(0, bellman_M.initial_state['K']):
bellman_cost += binom.pmf(set_succ, bellman_enrollments, bellman_p_samples[k]) * 1/bellman_M.initial_state['K'] * step_value
bellman_decisions.append(x_enroll)
bellman_vals.append(bellman_cost)
value = max(bellman_vals)
optimal_enroll = bellman_decisions[bellman_vals.index(value)]
return {"value": value, "optimal_enroll": optimal_enroll},value_dict
# stops iterating at horizon t' = t + H
else: return {"value": 0,"optimal_enroll": 0},value_dict
# stops experiment at node if drug is declared success or failure
else: return {"value": model.initial_state['success_rev'] * success_index,"optimal_enroll": 0},value_dict
#Copy model_B_value_fn here and do the modifications for question 6
def model_C_extension_value_fn(model, iteration, success_index,value_dict):
return {} #Get rid of this
#
def func_simple(pseudo_state, a, b, c, d):
"""
linear fit function for the Bellman value at given pseudo-state (for small number of data points)
:param pseudo_state: list(float) - list of the four state variables for a given state
:param a, b, c, d, e: float - parameters of the linear fit function
"""
sum = a*pseudo_state[0] + b*pseudo_state[1] + c*pseudo_state[2] + d*pseudo_state[3]
return sum
def func(pseudo_state, a1, a2, b1, b2, c1, c2, d1, d2):
"""
quadratic fit function for the Bellman value at given pseudo-state
:param pseudo_state: list(float) - list of the four state variables for a given state
:param a1, a2, ... d2: float - parameters of the quadratic fit function
"""
sum = a1*pseudo_state[0]**2 + a2*pseudo_state[0]
sum | |
<gh_stars>1-10
# Copyright (c) 2013-2020 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import sys
import copy
import logging
from .exceptions import (NetError, ASNRegistryError, ASNParseError,
ASNLookupError, HTTPLookupError, WhoisLookupError,
WhoisRateLimitError, ASNOriginLookupError)
if sys.version_info >= (3, 3): # pragma: no cover
from ipaddress import ip_network
else: # pragma: no cover
from ipaddr import IPNetwork as ip_network
log = logging.getLogger(__name__)
BASE_NET = {
'cidr': None,
'description': None,
'maintainer': None,
'updated': None,
'source': None
}
ASN_ORIGIN_WHOIS = {
'radb': {
'server': 'whois.radb.net',
'fields': {
'description': r'(descr):[^\S\n]+(?P<val>.+?)\n',
'maintainer': r'(mnt-by):[^\S\n]+(?P<val>.+?)\n',
'updated': r'(changed):[^\S\n]+(?P<val>.+?)\n',
'source': r'(source):[^\S\n]+(?P<val>.+?)\n',
}
},
}
ASN_ORIGIN_HTTP = {
'radb': {
'url': 'http://www.radb.net/query',
'form_data_asn_field': 'keywords',
'form_data': {
'advanced_query': '1',
'query': 'Query',
# '-T option': 'inet-rtr',
'ip_option': '',
'-i': '1',
'-i option': 'origin'
},
'fields': {
'description': r'(descr):[^\S\n]+(?P<val>.+?)\n',
'maintainer': r'(mnt-by):[^\S\n]+(?P<val>.+?)\n',
'updated': r'(changed):[^\S\n]+(?P<val>.+?)\n',
'source': r'(source):[^\S\n]+(?P<val>.+?)\<',
}
},
}
class IPASN:
"""
The class for parsing ASN data for an IP address.
Args:
net (:obj:`ipwhois.net.Net`): A ipwhois.net.Net object.
Raises:
NetError: The parameter provided is not an instance of
ipwhois.net.Net
"""
def __init__(self, net):
from .net import (Net, ORG_MAP)
from .whois import RIR_WHOIS
# ipwhois.net.Net validation
if isinstance(net, Net):
self._net = net
else:
raise NetError('The provided net parameter is not an instance of '
'ipwhois.net.Net')
self.org_map = ORG_MAP
self.rir_whois = RIR_WHOIS
def parse_fields_dns(self, response):
"""
The function for parsing ASN fields from a dns response.
Args:
response (:obj:`str`): The response from the ASN dns server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
try:
temp = response.split('|')
# Parse out the ASN information.
ret = {'asn_registry': temp[3].strip(' \n')}
if ret['asn_registry'] not in self.rir_whois.keys():
raise ASNRegistryError(
'ASN registry {0} is not known.'.format(
ret['asn_registry'])
)
ret['asn'] = temp[0].strip(' "\n').split()[0]
ret['asn_cidr'] = temp[1].strip(' \n')
ret['asn_country_code'] = temp[2].strip(' \n').upper()
ret['asn_date'] = temp[4].strip(' "\n')
ret['asn_description'] = None
except ASNRegistryError:
raise
except Exception as e:
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return ret
def parse_fields_verbose_dns(self, response):
"""
The function for parsing ASN fields from a verbose dns response.
Args:
response (:obj:`str`): The response from the ASN dns server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
try:
temp = response.split('|')
# Parse out the ASN information.
ret = {'asn_registry': temp[2].strip(' \n')}
if ret['asn_registry'] not in self.rir_whois.keys():
raise ASNRegistryError(
'ASN registry {0} is not known.'.format(
ret['asn_registry'])
)
ret['asn'] = temp[0].strip(' "\n')
ret['asn_cidr'] = None
ret['asn_country_code'] = temp[1].strip(' \n').upper()
ret['asn_date'] = temp[3].strip(' \n')
ret['asn_description'] = temp[4].strip(' "\n')
except ASNRegistryError:
raise
except Exception as e:
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return ret
def parse_fields_whois(self, response):
"""
The function for parsing ASN fields from a whois response.
Args:
response (:obj:`str`): The response from the ASN whois server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
try:
temp = response.split('|')
# Parse out the ASN information.
ret = {'asn_registry': temp[4].strip(' \n')}
if ret['asn_registry'] not in self.rir_whois.keys():
raise ASNRegistryError(
'ASN registry {0} is not known.'.format(
ret['asn_registry'])
)
ret['asn'] = temp[0].strip(' \n')
ret['asn_cidr'] = temp[2].strip(' \n')
ret['asn_country_code'] = temp[3].strip(' \n').upper()
ret['asn_date'] = temp[5].strip(' \n')
ret['asn_description'] = temp[6].strip(' \n')
except ASNRegistryError:
raise
except Exception as e:
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return ret
def parse_fields_http(self, response, extra_org_map=None):
"""
The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
# Set the org_map. Map the orgRef handle to an RIR.
org_map = self.org_map.copy()
try:
org_map.update(extra_org_map)
except (TypeError, ValueError, IndexError, KeyError):
pass
try:
asn_data = {
'asn_registry': None,
'asn': None,
'asn_cidr': None,
'asn_country_code': None,
'asn_date': None,
'asn_description': None
}
try:
net_list = response['nets']['net']
if not isinstance(net_list, list):
net_list = [net_list]
except (KeyError, TypeError):
log.debug('No networks found')
net_list = []
for n in reversed(net_list):
try:
asn_data['asn_registry'] = (
org_map[n['orgRef']['@handle'].upper()]
)
except KeyError as e:
log.debug('Could not parse ASN registry via HTTP: '
'{0}'.format(str(e)))
continue
break
if not asn_data['asn_registry']:
log.debug('Could not parse ASN registry via HTTP')
raise ASNRegistryError('ASN registry lookup failed.')
except ASNRegistryError:
raise
except Exception as e: # pragma: no cover
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return asn_data
def lookup(self, inc_raw=False, retry_count=3, extra_org_map=None,
asn_methods=None, get_asn_description=True):
"""
The wrapper function for retrieving and parsing ASN information for an
IP address.
Args:
inc_raw (:obj:`bool`): Whether to include the raw results in the
returned dictionary. Defaults to False.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
extra_org_map (:obj:`dict`): Mapping org handles to RIRs. This is
for limited cases where ARIN REST (ASN fallback HTTP lookup)
does not show an RIR as the org handle e.g., DNIC (which is
now the built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR
values are (note the case-sensitive - this is meant to match
the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'
Defaults to None.
asn_methods (:obj:`list`): ASN lookup types to attempt, in order.
If None, defaults to all: ['dns', 'whois', 'http'].
get_asn_description (:obj:`bool`): Whether to run an additional
query when pulling ASN information via dns, in order to get
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class protocolip_stats(base_resource) :
"""Statistics for protocolip resource."""
def __init__(self) :
self._clearstats = ""
self._iptotrxpkts = 0
self._iprxpktsrate = 0
self._iptotrxbytes = 0
self._iprxbytesrate = 0
self._iptottxpkts = 0
self._iptxpktsrate = 0
self._iptottxbytes = 0
self._iptxbytesrate = 0
self._iptotrxmbits = 0
self._iprxmbitsrate = 0
self._iptottxmbits = 0
self._iptxmbitsrate = 0
self._iptotroutedpkts = 0
self._iproutedpktsrate = 0
self._iptotroutedmbits = 0
self._iproutedmbitsrate = 0
self._iptotfragments = 0
self._iptotsuccreassembly = 0
self._iptotreassemblyattempt = 0
self._iptotaddrlookup = 0
self._iptotaddrlookupfail = 0
self._iptotudpfragmentsfwd = 0
self._iptottcpfragmentsfwd = 0
self._iptotfragpktsgen = 0
self._iptotbadchecksums = 0
self._iptotunsuccreassembly = 0
self._iptottoobig = 0
self._iptotzerofragmentlen = 0
self._iptotdupfragments = 0
self._iptotoutoforderfrag = 0
self._iptotunknowndstrcvd = 0
self._iptotbadtransport = 0
self._iptotvipdown = 0
self._iptotfixheaderfail = 0
self._iptotttlexpired = 0
self._iptotmaxclients = 0
self._iptotunknownsvcs = 0
self._iptotlandattacks = 0
self._iptotinvalidheadersz = 0
self._iptotinvalidpacketsize = 0
self._iptottruncatedpackets = 0
self._noniptottruncatedpackets = 0
self._iptotzeronexthop = 0
self._iptotbadlens = 0
self._iptotbadmacaddrs = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full."""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
:param clearstats:
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def iproutedpktsrate(self) :
"""Rate (/s) counter for iptotroutedpkts."""
try :
return self._iproutedpktsrate
except Exception as e:
raise e
@property
def iptotoutoforderfrag(self) :
"""Fragments received that are out of order."""
try :
return self._iptotoutoforderfrag
except Exception as e:
raise e
@property
def noniptottruncatedpackets(self) :
"""Truncated non-IP packets received."""
try :
return self._noniptottruncatedpackets
except Exception as e:
raise e
@property
def iptotmaxclients(self) :
"""Attempts to open a new connection to a service for which the maximum limit has been exceeded. Default value, 0, applies no limit."""
try :
return self._iptotmaxclients
except Exception as e:
raise e
@property
def iptotzerofragmentlen(self) :
"""Packets received with a fragment length of 0 bytes."""
try :
return self._iptotzerofragmentlen
except Exception as e:
raise e
@property
def iptxbytesrate(self) :
"""Rate (/s) counter for iptottxbytes."""
try :
return self._iptxbytesrate
except Exception as e:
raise e
@property
def iptotvipdown(self) :
"""Packets received for which the VIP is down. This can occur when all the services bound to the VIP are down or the VIP is manually disabled."""
try :
return self._iptotvipdown
except Exception as e:
raise e
@property
def iptotroutedpkts(self) :
"""Total routed packets."""
try :
return self._iptotroutedpkts
except Exception as e:
raise e
@property
def iprxmbitsrate(self) :
"""Rate (/s) counter for iptotrxmbits."""
try :
return self._iprxmbitsrate
except Exception as e:
raise e
@property
def iprxpktsrate(self) :
"""Rate (/s) counter for iptotrxpkts."""
try :
return self._iprxpktsrate
except Exception as e:
raise e
@property
def iptxmbitsrate(self) :
"""Rate (/s) counter for iptottxmbits."""
try :
return self._iptxmbitsrate
except Exception as e:
raise e
@property
def iptotroutedmbits(self) :
"""Total routed Mbits."""
try :
return self._iptotroutedmbits
except Exception as e:
raise e
@property
def iptotinvalidheadersz(self) :
"""Packets received in which an invalid data length is specified, or the value in the length field and the actual data length do not match. The range for the Ethernet packet data length is 0-1500 bytes."""
try :
return self._iptotinvalidheadersz
except Exception as e:
raise e
@property
def iptotaddrlookupfail(self) :
"""IP address lookups performed by the NetScaler that have failed because the destination IP address of the packet does not match any of the NetScaler owned IP addresses."""
try :
return self._iptotaddrlookupfail
except Exception as e:
raise e
@property
def iptotbadtransport(self) :
"""Packets received in which the protocol specified in the IP header is unknown to the NetScaler."""
try :
return self._iptotbadtransport
except Exception as e:
raise e
@property
def iptotfragpktsgen(self) :
"""Fragmented packets created by the NetScaler."""
try :
return self._iptotfragpktsgen
except Exception as e:
raise e
@property
def iptotsuccreassembly(self) :
"""Fragmented IP packets successfully reassembled on the NetScaler."""
try :
return self._iptotsuccreassembly
except Exception as e:
raise e
@property
def iptotrxmbits(self) :
"""Megabits of IP data received."""
try :
return self._iptotrxmbits
except Exception as e:
raise e
@property
def iptottxbytes(self) :
"""Bytes of IP data transmitted."""
try :
return self._iptottxbytes
except Exception as e:
raise e
@property
def iptotbadmacaddrs(self) :
"""IP packets transmitted with a bad MAC address."""
try :
return self._iptotbadmacaddrs
except Exception as e:
raise e
@property
def iptottcpfragmentsfwd(self) :
"""TCP fragments forwarded to the client or the server."""
try :
return self._iptottcpfragmentsfwd
except Exception as e:
raise e
@property
def iptotrxpkts(self) :
"""IP packets received."""
try :
return self._iptotrxpkts
except Exception as e:
raise e
@property
def iptotrxbytes(self) :
"""Bytes of IP data received."""
try :
return self._iptotrxbytes
except Exception as e:
raise e
@property
def iptotlandattacks(self) :
"""Land-attack packets received. The source and destination addresses are the same."""
try :
return self._iptotlandattacks
except Exception as e:
raise e
@property
def iptotunknowndstrcvd(self) :
"""Packets received in which the destination IP address was not reachable or not owned by the NetScaler."""
try :
return self._iptotunknowndstrcvd
except Exception as e:
raise e
@property
def iptottruncatedpackets(self) :
"""Truncated IP packets received. An overflow in the routers along the path can truncate IP packets."""
try :
return self._iptottruncatedpackets
except Exception as e:
raise e
@property
def iptotttlexpired(self) :
"""Packets for which the time-to-live (TTL) expired during transit. These packets are dropped."""
try :
return self._iptotttlexpired
except Exception as e:
raise e
@property
def iprxbytesrate(self) :
"""Rate (/s) counter for iptotrxbytes."""
try :
return self._iprxbytesrate
except Exception as e:
raise e
@property
def iptottxmbits(self) :
"""Megabits of IP data transmitted."""
try :
return self._iptottxmbits
except Exception as e:
raise e
@property
def iptotbadlens(self) :
"""Packets received with a length greater than the normal maximum transmission unit of 1514 bytes."""
try :
return self._iptotbadlens
except Exception as e:
raise e
@property
def iptotunknownsvcs(self) :
"""Packets received on a port or service that is not configured."""
try :
return self._iptotunknownsvcs
except Exception as e:
raise e
@property
def iptotdupfragments(self) :
"""Duplicate IP fragments received. This can occur when the acknowledgement was not received within the expected time."""
try :
return self._iptotdupfragments
except Exception as e:
raise e
@property
def iptottoobig(self) :
"""Packets received for which the reassembled data exceeds the Ethernet packet data length of 1500 bytes."""
try :
return self._iptottoobig
except Exception as e:
raise e
@property
def iptotzeronexthop(self) :
"""Packets received that contain a 0 value in the next hop field. These packets are dropped."""
try :
return self._iptotzeronexthop
except Exception as e:
raise e
@property
def iptotaddrlookup(self) :
"""IP address lookups performed by the NetScaler. When a packet is received on a non-established session, the NetScaler checks if the destination IP address is one of the NetScaler owned IP addresses."""
try :
return self._iptotaddrlookup
except Exception as e:
raise e
@property
def iptotfragments(self) :
"""IP fragments received."""
try :
return self._iptotfragments
except Exception as e:
raise e
@property
def iptotinvalidpacketsize(self) :
"""Total number of packets received by NetScaler with invalid IP packet size."""
try :
return self._iptotinvalidpacketsize
except Exception as e:
raise e
@property
def iptotunsuccreassembly(self) :
"""Packets received that could not be reassembled. This can occur when there is a checksum failure, an identification field mismatch, or when one of the fragments is missing."""
try :
return self._iptotunsuccreassembly
except Exception as e:
raise e
@property
def iptotreassemblyattempt(self) :
"""IP packets | |
# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should
# not be documented for external users. These will generally be used for internal test or only
# given to customers when they have been briefed on the side effects of using them.
INTERNAL_ONLY_PROPERTIES = {
"__module__",
"__doc__",
"create_transaction",
"SESSION_COOKIE_NAME",
"SESSION_COOKIE_HTTPONLY",
"SESSION_COOKIE_SAMESITE",
"DATABASE_SECRET_KEY",
"V22_NAMESPACE_BLACKLIST",
"MAXIMUM_CNR_LAYER_SIZE",
"OCI_NAMESPACE_WHITELIST",
"FEATURE_GENERAL_OCI_SUPPORT",
"FEATURE_HELM_OCI_SUPPORT",
"FEATURE_NAMESPACE_GARBAGE_COLLECTION",
"FEATURE_REPOSITORY_GARBAGE_COLLECTION",
"FEATURE_REPOSITORY_ACTION_COUNTER",
"APP_REGISTRY_PACKAGE_LIST_CACHE_WHITELIST",
"APP_REGISTRY_SHOW_PACKAGE_CACHE_WHITELIST",
"FEATURE_MANIFEST_SIZE_BACKFILL",
"TESTING",
"SEND_FILE_MAX_AGE_DEFAULT",
"DISABLED_FOR_AUDIT_LOGS",
"DISABLED_FOR_PULL_LOGS",
"FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES",
"FEATURE_CLEAR_EXPIRED_RAC_ENTRIES",
"ACTION_LOG_MAX_PAGE",
"NON_RATE_LIMITED_NAMESPACES",
"REPLICATION_QUEUE_NAME",
"DOCKERFILE_BUILD_QUEUE_NAME",
"CHUNK_CLEANUP_QUEUE_NAME",
"SECURITY_SCANNER_ISSUER_NAME",
"NOTIFICATION_QUEUE_NAME",
"REPOSITORY_GC_QUEUE_NAME",
"NAMESPACE_GC_QUEUE_NAME",
"EXPORT_ACTION_LOGS_QUEUE_NAME",
"SECSCAN_V4_NOTIFICATION_QUEUE_NAME",
"FEATURE_BILLING",
"BILLING_TYPE",
"INSTANCE_SERVICE_KEY_LOCATION",
"INSTANCE_SERVICE_KEY_REFRESH",
"INSTANCE_SERVICE_KEY_SERVICE",
"INSTANCE_SERVICE_KEY_KID_LOCATION",
"INSTANCE_SERVICE_KEY_EXPIRATION",
"UNAPPROVED_SERVICE_KEY_TTL_SEC",
"EXPIRED_SERVICE_KEY_TTL_SEC",
"REGISTRY_JWT_AUTH_MAX_FRESH_S",
"SERVICE_LOG_ACCOUNT_ID",
"BUILDLOGS_OPTIONS",
"LIBRARY_NAMESPACE",
"STAGGER_WORKERS",
"QUEUE_WORKER_METRICS_REFRESH_SECONDS",
"PUSH_TEMP_TAG_EXPIRATION_SEC",
"GARBAGE_COLLECTION_FREQUENCY",
"PAGE_TOKEN_KEY",
"BUILD_MANAGER",
"JWTPROXY_AUDIENCE",
"JWTPROXY_SIGNER",
"SECURITY_SCANNER_INDEXING_MIN_ID",
"SECURITY_SCANNER_V4_REINDEX_THRESHOLD",
"STATIC_SITE_BUCKET",
"LABEL_KEY_RESERVED_PREFIXES",
"TEAM_SYNC_WORKER_FREQUENCY",
"JSONIFY_PRETTYPRINT_REGULAR",
"TUF_GUN_PREFIX",
"LOGGING_LEVEL",
"SIGNED_GRANT_EXPIRATION_SEC",
"PROMETHEUS_PUSHGATEWAY_URL",
"DB_TRANSACTION_FACTORY",
"NOTIFICATION_SEND_TIMEOUT",
"QUEUE_METRICS_TYPE",
"MAIL_FAIL_SILENTLY",
"LOCAL_OAUTH_HANDLER",
"USE_CDN",
"ANALYTICS_TYPE",
"LAST_ACCESSED_UPDATE_THRESHOLD_S",
"GREENLET_TRACING",
"EXCEPTION_LOG_TYPE",
"SENTRY_DSN",
"SENTRY_PUBLIC_DSN",
"BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT",
"THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT",
"IP_DATA_API_KEY",
"SECURITY_SCANNER_ENDPOINT_BATCH",
"SECURITY_SCANNER_API_TIMEOUT_SECONDS",
"SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS",
"SECURITY_SCANNER_ENGINE_VERSION_TARGET",
"SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS",
"SECURITY_SCANNER_API_VERSION",
"REPO_MIRROR_INTERVAL",
"DATA_MODEL_CACHE_CONFIG",
# TODO: move this into the schema once we support signing in QE.
"FEATURE_SIGNING",
"TUF_SERVER",
"V1_ONLY_DOMAIN",
"LOGS_MODEL",
"LOGS_MODEL_CONFIG",
"APP_REGISTRY_RESULTS_LIMIT",
"V3_UPGRADE_MODE", # Deprecated old flag
"ACCOUNT_RECOVERY_MODE",
}
CONFIG_SCHEMA = {
"type": "object",
"description": "Schema for Quay configuration",
"required": [
"PREFERRED_URL_SCHEME",
"SERVER_HOSTNAME",
"DB_URI",
"AUTHENTICATION_TYPE",
"DISTRIBUTED_STORAGE_CONFIG",
"BUILDLOGS_REDIS",
"USER_EVENTS_REDIS",
"DISTRIBUTED_STORAGE_PREFERENCE",
"DEFAULT_TAG_EXPIRATION",
"TAG_EXPIRATION_OPTIONS",
],
"properties": {
"REGISTRY_STATE": {
"type": "string",
"description": "The state of the registry.",
"enum": ["normal", "readonly"],
"x-example": "readonly",
},
# Hosting.
"PREFERRED_URL_SCHEME": {
"type": "string",
"description": "The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`",
"enum": ["http", "https"],
"x-example": "https",
},
"SERVER_HOSTNAME": {
"type": "string",
"description": "The URL at which Quay is accessible, without the scheme.",
"x-example": "quay.io",
},
"EXTERNAL_TLS_TERMINATION": {
"type": "boolean",
"description": "If TLS is supported, but terminated at a layer before Quay, must be true.",
"x-example": True,
},
# SSL/TLS.
"SSL_CIPHERS": {
"type": "array",
"description": "If specified, the nginx-defined list of SSL ciphers to enabled and disabled",
"x-example": ["CAMELLIA", "!3DES"],
"x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers",
},
"SSL_PROTOCOLS": {
"type": "array",
"description": "If specified, the nginx-defined list of SSL protocols to enabled and disabled",
"x-example": ["TLSv1.1", "TLSv1.2"],
"x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols",
},
# User-visible configuration.
"REGISTRY_TITLE": {
"type": "string",
"description": "If specified, the long-form title for the registry. Defaults to `Red Hat Quay`.",
"x-example": "Corp Container Service",
},
"REGISTRY_TITLE_SHORT": {
"type": "string",
"description": "If specified, the short-form title for the registry. Defaults to `Red Hat Quay`.",
"x-example": "CCS",
},
"CONTACT_INFO": {
"type": "array",
"uniqueItems": True,
"description": "If specified, contact information to display on the contact page. "
+ "If only a single piece of contact information is specified, the contact footer will link directly.",
"items": [
{
"type": "string",
"pattern": "^mailto:(.)+$",
"x-example": "mailto:<EMAIL>",
"description": "Adds a link to send an e-mail",
},
{
"type": "string",
"pattern": "^irc://(.)+$",
"x-example": "irc://chat.freenode.net:6665/quay",
"description": "Adds a link to visit an IRC chat room",
},
{
"type": "string",
"pattern": "^tel:(.)+$",
"x-example": "tel:+1-888-930-3475",
"description": "Adds a link to call a phone number",
},
{
"type": "string",
"pattern": "^http(s)?://(.)+$",
"x-example": "https://twitter.com/quayio",
"description": "Adds a link to a defined URL",
},
],
},
"SEARCH_RESULTS_PER_PAGE": {
"type": "number",
"description": "Number of results returned per page by search page. Defaults to 10",
"x-example": 10,
},
"SEARCH_MAX_RESULT_PAGE_COUNT": {
"type": "number",
"description": "Maximum number of pages the user can paginate in search before they are limited. Defaults to 10",
"x-example": 10,
},
# E-mail.
"FEATURE_MAILING": {
"type": "boolean",
"description": "Whether emails are enabled. Defaults to True",
"x-example": True,
},
"MAIL_SERVER": {
"type": "string",
"description": "The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.",
"x-example": "smtp.somedomain.com",
},
"MAIL_USE_TLS": {
"type": "boolean",
"description": "If specified, whether to use TLS for sending e-mails.",
"x-example": True,
},
"MAIL_PORT": {
"type": "number",
"description": "The SMTP port to use. If not specified, defaults to 587.",
"x-example": 588,
},
"MAIL_USERNAME": {
"type": ["string", "null"],
"description": "The SMTP username to use when sending e-mails.",
"x-example": "myuser",
},
"MAIL_PASSWORD": {
"type": ["string", "null"],
"description": "The SMTP password to use when sending e-mails.",
"x-example": "<PASSWORD>",
},
"MAIL_DEFAULT_SENDER": {
"type": ["string", "null"],
"description": "If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `<EMAIL>`.",
"x-example": "<EMAIL>",
},
# Database.
"DB_URI": {
"type": "string",
"description": "The URI at which to access the database, including any credentials.",
"x-example": "mysql+pymysql://username:[email protected]/quay",
"x-reference": "https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495",
},
"DB_CONNECTION_ARGS": {
"type": "object",
"description": "If specified, connection arguments for the database such as timeouts and SSL.",
"properties": {
"threadlocals": {
"type": "boolean",
"description": "Whether to use thread-local connections. Should *ALWAYS* be `true`",
},
"autorollback": {
"type": "boolean",
"description": "Whether to use auto-rollback connections. Should *ALWAYS* be `true`",
},
"ssl": {
"type": "object",
"description": "SSL connection configuration",
"properties": {
"ca": {
"type": "string",
"description": "*Absolute container path* to the CA certificate to use for SSL connections",
"x-example": "conf/stack/ssl-ca-cert.pem",
},
},
"required": ["ca"],
},
},
"required": ["threadlocals", "autorollback"],
},
"ALLOW_PULLS_WITHOUT_STRICT_LOGGING": {
"type": "boolean",
"description": "If true, pulls in which the pull audit log entry cannot be written will "
+ "still succeed. Useful if the database can fallback into a read-only state "
+ "and it is desired for pulls to continue during that time. Defaults to False.",
"x-example": True,
},
# Storage.
"FEATURE_STORAGE_REPLICATION": {
"type": "boolean",
"description": "Whether to automatically replicate between storage engines. Defaults to False",
"x-example": False,
},
"FEATURE_PROXY_STORAGE": {
"type": "boolean",
"description": "Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False",
"x-example": False,
},
"MAXIMUM_LAYER_SIZE": {
"type": "string",
"description": "Maximum allowed size of an image layer. Defaults to 20G",
"x-example": "100G",
"pattern": "^[0-9]+(G|M)$",
},
"DISTRIBUTED_STORAGE_CONFIG": {
"type": "object",
"description": "Configuration for storage engine(s) to use in Quay. Each key is a unique ID"
+ " for a storage engine, with the value being a tuple of the type and "
+ " configuration for that engine.",
"x-example": {
"local_storage": ["LocalStorage", {"storage_path": "some/path/"}],
},
"items": {
"type": "array",
},
},
"DISTRIBUTED_STORAGE_PREFERENCE": {
"type": "array",
"description": "The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to "
+ "use. A preferred engine means it is first checked for pullig and images are "
+ "pushed to it.",
"items": {
"type": "string",
"uniqueItems": True,
},
"x-example": ["s3_us_east", "s3_us_west"],
},
"DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS": {
"type": "array",
"description": "The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose "
+ "images should be fully replicated, by default, to all other storage engines.",
"items": {
"type": "string",
"uniqueItems": True,
},
"x-example": ["s3_us_east", "s3_us_west"],
},
"USERFILES_LOCATION": {
"type": "string",
"description": "ID of the storage engine in which to place user-uploaded files",
"x-example": "s3_us_east",
},
"USERFILES_PATH": {
"type": "string",
"description": "Path under storage in which to place user-uploaded files",
"x-example": "userfiles",
},
"ACTION_LOG_ARCHIVE_LOCATION": {
"type": "string",
"description": "If action log archiving is enabled, the storage engine in which to place the "
+ "archived data.",
"x-example": "s3_us_east",
},
"ACTION_LOG_ARCHIVE_PATH": {
"type": "string",
"description": "If action log archiving is enabled, the path in storage in which to place the "
+ "archived data.",
"x-example": "archives/actionlogs",
},
"ACTION_LOG_ROTATION_THRESHOLD": {
"type": "string",
"description": "If action log archiving is enabled, the time interval after which to "
+ "archive data.",
"x-example": "30d",
},
"LOG_ARCHIVE_LOCATION": {
"type": "string",
"description": "If builds are enabled, the storage engine in which to place the "
+ "archived build logs.",
"x-example": "s3_us_east",
},
"LOG_ARCHIVE_PATH": {
"type": "string",
"description": "If builds are enabled, the path in storage in which to place the "
+ "archived build logs.",
"x-example": "archives/buildlogs",
},
# Authentication.
"AUTHENTICATION_TYPE": {
"type": "string",
"description": "The authentication engine to use for credential authentication.",
"x-example": "Database",
"enum": ["Database", "LDAP", "JWT", "Keystone", "OIDC", "AppToken"],
},
"SUPER_USERS": {
"type": "array",
"description": "Quay usernames of those users to be granted superuser privileges",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"DIRECT_OAUTH_CLIENTID_WHITELIST": {
"type": "array",
"description": "A list of client IDs of *Quay-managed* applications that are allowed "
+ "to perform direct OAuth approval without user approval.",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html",
"uniqueItems": True,
"items": {
"type": "string",
},
},
# Redis.
"BUILDLOGS_REDIS": {
"type": "object",
"description": "Connection information | |
at the start
handler.assert_allclose(
funcs["DVCon1_volume_constraint_0"], 4.0 * np.ones(1), name="volume_base", rtol=1e-7, atol=1e-7
)
def test_4(self, train=False, refDeriv=False):
"""
Test 4: LeTe Constraint using the ilow, ihigh method
There's no need to test this with the rectangular box
because it doesn't depend on a projected pointset (only the FFD)
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_04.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 4: LETE constraint, C172 wing")
DVGeo, DVCon = self.generate_dvgeo_dvcon_c172()
DVCon.addLeTeConstraints(0, "iLow")
DVCon.addLeTeConstraints(0, "iHigh")
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
# LeTe constraints should be all zero at the start
for i in range(2):
handler.assert_allclose(
funcs["DVCon1_lete_constraint_" + str(i)], np.zeros(4), name="lete_" + str(i), rtol=1e-7, atol=1e-7
)
funcs, funcsSens = self.c172_test_twist(DVGeo, DVCon, handler)
# Global DVs should produce no change, especially twist
for i in range(2):
handler.assert_allclose(
funcs["DVCon1_lete_constraint_" + str(i)],
np.zeros(4),
name="lete_twisted_" + str(i),
rtol=1e-7,
atol=1e-7,
)
funcs, funcsSens = self.c172_test_deformed(DVGeo, DVCon, handler)
def test_5(self, train=False, refDeriv=False):
"""
Test 5: Thickness-to-chord constraint
There's no need to test this with the rectangular box
because it doesn't depend on a projected pointset (only the FFD)
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_05.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 5: t/c constraint, C172 wing")
DVGeo, DVCon = self.generate_dvgeo_dvcon_c172()
ptList = [[0.8, 0.0, 0.1], [0.8, 0.0, 5.0]]
DVCon.addThicknessToChordConstraints1D(ptList, nCon=10, axis=[0, 1, 0], chordDir=[1, 0, 0])
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_thickness_to_chord_constraints_0"], np.ones(10), name="toverc_base", rtol=1e-7, atol=1e-7
)
funcs, funcsSens = self.c172_test_twist(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_thickness_to_chord_constraints_0"],
np.ones(10),
name="toverc_twisted",
rtol=1e-3,
atol=1e-3,
)
funcs, funcsSens = self.c172_test_deformed(DVGeo, DVCon, handler)
def test_6(self, train=False, refDeriv=False):
"""
Test 6: Surface area constraint
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_06.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 6: surface area constraint, C172 wing")
DVGeo, DVCon = self.generate_dvgeo_dvcon_c172()
DVCon.addSurfaceAreaConstraint()
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_surfaceArea_constraints_0"], np.ones(1), name="surface_area_base", rtol=1e-7, atol=1e-7
)
funcs, funcsSens = self.c172_test_twist(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_surfaceArea_constraints_0"], np.ones(1), name="surface_area_twisted", rtol=1e-3, atol=1e-3
)
funcs, funcsSens = self.c172_test_deformed(DVGeo, DVCon, handler)
def test_6b(self, train=False, refDeriv=False):
"""
Test 6b: Surface area constraint
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_06b.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 6: surface area constraint, rectangular box")
DVGeo, DVCon = self.generate_dvgeo_dvcon_rect()
DVCon.addSurfaceAreaConstraint(scaled=False)
# 2x1x8 box has surface area 2*(8*2+1*2+8*1) = 52
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_surfaceArea_constraints_0"],
52.0 * np.ones(1),
name="surface_area_base",
rtol=1e-7,
atol=1e-7,
)
def test_7(self, train=False, refDeriv=False):
"""
Test 7: Projected area constraint
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_07.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 7: projected area constraint, C172 wing")
DVGeo, DVCon = self.generate_dvgeo_dvcon_c172()
DVCon.addProjectedAreaConstraint()
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_projectedArea_constraints_0"],
np.ones(1),
name="projected_area_base",
rtol=1e-7,
atol=1e-7,
)
funcs, funcsSens = self.c172_test_twist(DVGeo, DVCon, handler)
funcs, funcsSens = self.c172_test_deformed(DVGeo, DVCon, handler)
def test_7b(self, train=False, refDeriv=False):
"""
Test 7b: Projected area constraint
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_07b.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 7b: projected area constraint, rectangular box")
DVGeo, DVCon = self.generate_dvgeo_dvcon_rect()
DVCon.addProjectedAreaConstraint(scaled=False)
DVCon.addProjectedAreaConstraint(axis="z", scaled=False)
DVCon.addProjectedAreaConstraint(axis="x", scaled=False)
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler, checkDerivs=False)
handler.assert_allclose(
funcs["DVCon1_projectedArea_constraints_0"],
8 * 2 * np.ones(1),
name="projected_area_base",
rtol=1e-7,
atol=1e-7,
)
handler.assert_allclose(
funcs["DVCon1_projectedArea_constraints_1"],
1 * 2 * np.ones(1),
name="projected_area_base",
rtol=1e-7,
atol=1e-7,
)
handler.assert_allclose(
funcs["DVCon1_projectedArea_constraints_2"],
8 * 1 * np.ones(1),
name="projected_area_base",
rtol=1e-7,
atol=1e-7,
)
def test_8(self, train=False, refDeriv=False):
"""
Test 8: Circularity constraint
No need to test this with the rectangular box
because it only depends on the FFD, no projected points
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_08.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 8: Circularity constraint, C172 wing")
DVGeo, DVCon = self.generate_dvgeo_dvcon_c172()
DVCon.addCircularityConstraint(
origin=[0.8, 0.0, 2.5],
rotAxis=[0.0, 0.0, 1.0],
radius=0.1,
zeroAxis=[0.0, 1.0, 0.0],
angleCW=180.0,
angleCCW=180.0,
nPts=10,
)
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_circularity_constraints_0"], np.ones(9), name="circularity_base", rtol=1e-7, atol=1e-7
)
funcs, funcsSens = self.c172_test_twist(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_circularity_constraints_0"], np.ones(9), name="circularity_twisted", rtol=1e-7, atol=1e-7
)
funcs, funcsSens = self.c172_test_deformed(DVGeo, DVCon, handler)
def test_9(self, train=False, refDeriv=False):
"""
Test 9: Colinearity constraint
No need to test this with the rectangular box
because it only depends on the FFD, no projected points
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_09.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 9: Colinearity constraint, C172 wing")
DVGeo, DVCon = self.generate_dvgeo_dvcon_c172()
DVCon.addColinearityConstraint(
np.array([0.7, 0.0, 1.0]), lineAxis=np.array([0.0, 0.0, 1.0]), distances=[0.0, 1.0, 2.5]
)
# Skip derivatives check here because true zero values cause difficulties for the partials
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler, checkDerivs=False)
handler.assert_allclose(
funcs["DVCon1_colinearity_constraints_0"], np.zeros(3), name="colinearity_base", rtol=1e-7, atol=1e-7
)
funcs, funcsSens = self.c172_test_twist(DVGeo, DVCon, handler)
funcs, funcsSens = self.c172_test_deformed(DVGeo, DVCon, handler)
def test_10(self, train=False, refDeriv=False):
"""
Test 10: LinearConstraintShape
No need to test this with the rectangular box
because it only depends on the FFD, no projected points
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_10.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 10: LinearConstraintShape, C172 wing")
DVGeo, DVCon = self.generate_dvgeo_dvcon_c172()
lIndex = DVGeo.getLocalIndex(0)
indSetA = []
indSetB = []
for i in range(lIndex.shape[0]):
indSetA.append(lIndex[i, 0, 0])
indSetB.append(lIndex[i, 0, 1])
DVCon.addLinearConstraintsShape(indSetA, indSetB, factorA=1.0, factorB=-1.0, lower=0, upper=0)
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
funcs, funcsSens = self.c172_test_twist(DVGeo, DVCon, handler)
funcs, funcsSens = self.c172_test_deformed(DVGeo, DVCon, handler)
def test_11(self, train=False, refDeriv=False):
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_11.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 11: CompositeVolumeConstraint, rectangular box")
DVGeo, DVCon = self.generate_dvgeo_dvcon_rect()
# this projects in the z direction which is of dimension 8
# 1x0.5x8 = 4
leList = [[-0.5, -0.25, 0.1], [0.5, -0.25, 0.1]]
teList = [[-0.5, 0.25, 0.1], [0.5, 0.25, 0.1]]
# this projects in the x direction which is of dimension 2
# 2x0.6x7.8 = 9.36
leList2 = [[0.0, -0.25, 0.1], [0.0, -0.25, 7.9]]
teList2 = [[0.0, 0.35, 0.1], [0.0, 0.35, 7.9]]
DVCon.addVolumeConstraint(leList, teList, 4, 4, scaled=False, addToPyOpt=False)
DVCon.addVolumeConstraint(leList2, teList2, 4, 4, scaled=False, addToPyOpt=False)
vols = ["DVCon1_volume_constraint_0", "DVCon1_volume_constraint_1"]
DVCon.addCompositeVolumeConstraint(vols=vols, scaled=False)
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
# Volume should be normalized to 1 at the start
handler.assert_allclose(
funcs["DVCon1_volume_constraint_0"], 4.0 * np.ones(1), name="volume1_base", rtol=1e-7, atol=1e-7
)
handler.assert_allclose(
funcs["DVCon1_volume_constraint_1"], 9.36 * np.ones(1), name="volume2_base", rtol=1e-7, atol=1e-7
)
handler.assert_allclose(
funcs["DVCon1_composite_volume_constraint_2"],
13.36 * np.ones(1),
name="volume_composite_base",
rtol=1e-7,
atol=1e-7,
)
def test_12(self, train=False, refDeriv=False):
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_12.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 12: LocationConstraints1D, rectangular box")
DVGeo, DVCon = self.generate_dvgeo_dvcon_rect()
ptList = [[0.0, 0.0, 0.0], [0.0, 0.0, 8.0]]
ptList2 = [[0.0, 0.2, 0.0], [0.0, -0.2, 8.0]]
# TODO this constraint seems buggy. for example, when scaled, returns a bunch of NaNs
DVCon.addLocationConstraints1D(ptList=ptList, nCon=10, scaled=False)
DVCon.addProjectedLocationConstraints1D(ptList=ptList2, nCon=10, scaled=False, axis=[0.0, 1.0, 0.0])
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
exact_vals = np.zeros((30,))
exact_vals[2::3] = np.linspace(0, 8, 10)
# should be 10 evenly spaced points along the z axis originating from 0,0,0
handler.assert_allclose(
funcs["DVCon1_location_constraints_0"], exact_vals, name="locations_match", rtol=1e-7, atol=1e-7
)
handler.assert_allclose(
funcs["DVCon1_location_constraints_1"],
exact_vals,
name="projected_locations_match",
rtol=1e-7,
atol=1e-7,
)
def test_13(self, train=False, refDeriv=False):
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_13.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 13: PlanarityConstraint, rectangular box")
DVGeo, DVCon = self.generate_dvgeo_dvcon_rect()
DVCon.addPlanarityConstraint(origin=[0.0, 0.5, 0.0], planeAxis=[0.0, 1.0, 0.0])
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
def test_13b(self, train=False, refDeriv=False):
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_13b.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 13: PlanarityConstraint, rectangular box")
ffdfile = os.path.join(self.base_path, "../inputFiles/2x1x8_rectangle.xyz")
DVGeo = DVGeometry(ffdfile)
DVGeo.addGeoDVLocal("local", lower=-0.5, upper=0.5, axis="y", scale=1)
# create a DVConstraints object with a simple plane consisting of 2 triangles
DVCon = DVConstraints()
DVCon.setDVGeo(DVGeo)
p0 = np.zeros(shape=(2, 3))
p1 = np.zeros(shape=(2, 3))
p2 = np.zeros(shape=(2, 3))
vertex1 = np.array([0.5, -0.25, 0.0])
vertex2 = np.array([0.5, -0.25, 4.0])
vertex3 = np.array([-0.5, -0.25, 0.0])
vertex4 = np.array([-0.5, -0.25, 4.0])
p0[:, :] = vertex1
p2[:, :] = vertex4
p1[0, :] = vertex2
p1[1, :] = vertex3
v1 = p1 - p0
v2 = p2 - p0
DVCon.setSurface([p0, v1, v2])
DVCon.addPlanarityConstraint(origin=[0.0, -0.25, 2.0], planeAxis=[0.0, 1.0, 0.0])
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler, checkDerivs=False)
# this should be coplanar and the planarity constraint shoudl be zero
handler.assert_allclose(
funcs["DVCon1_planarity_constraints_0"], np.zeros(1), name="planarity", rtol=1e-7, atol=1e-7
)
def test_14(self, train=False, refDeriv=False):
"""
Test 14: Monotonic constraint
"""
refFile = os.path.join(self.base_path, "ref/test_DVConstraints_14.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 14: Monotonic constraint, C172 wing")
DVGeo, DVCon = self.generate_dvgeo_dvcon_c172()
DVCon.addMonotonicConstraints("twist")
DVCon.addMonotonicConstraints("twist", start=1, stop=2)
funcs, funcsSens = self.generic_test_base(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_monotonic_constraint_0"], np.zeros(2), name="monotonicity", rtol=1e-7, atol=1e-7
)
funcs, funcsSens = self.c172_test_twist(DVGeo, DVCon, handler)
handler.assert_allclose(
funcs["DVCon1_monotonic_constraint_0"],
-5.0 * np.ones(2),
name="monotonicity_twisted",
rtol=1e-7,
atol=1e-7,
)
funcs = dict()
funcsSens = dict()
# change the DVs arbitrarily
xDV = DVGeo.getValues()
xDV["twist"][0] = 1.0
xDV["twist"][1] = -3.5
xDV["twist"][2] = | |
<reponame>phoenix-xhuang/ymir
from enum import Enum
import json
import os
import shutil
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
import uuid
import xml.etree.ElementTree as ElementTree
import lmdb
from PIL import Image, UnidentifiedImageError
from mir.protos import mir_command_pb2 as mirpb
from mir.tools import class_ids, data_reader
from mir.tools.code import MirCode
from mir.tools.errors import MirRuntimeError
class AssetFormat(str, Enum):
ASSET_FORMAT_UNKNOWN = 'unknown'
ASSET_FORMAT_RAW = 'raw'
ASSET_FORMAT_LMDB = 'lmdb'
class AnnoFormat(str, Enum):
ANNO_FORMAT_UNKNOWN = 'unknown'
ANNO_FORMAT_NO_ANNOTATION = 'none'
ANNO_FORMAT_ARK = 'ark'
ANNO_FORMAT_VOC = 'voc'
ANNO_FORMAT_LS_JSON = 'ls_json' # label studio json format
def check_support_format(anno_format: str) -> bool:
return anno_format in support_format_type()
def support_format_type() -> List[str]:
return [f.value for f in AnnoFormat]
def support_asset_format_type() -> List[str]:
return [f.value for f in AssetFormat]
def format_type_from_str(anno_format: str) -> AnnoFormat:
return AnnoFormat(anno_format.lower())
def asset_format_type_from_str(asset_format: str) -> AssetFormat:
return AssetFormat(asset_format.lower())
def get_export_type(type_str: str) -> Tuple[AnnoFormat, AssetFormat]:
if not type_str:
return (AnnoFormat.ANNO_FORMAT_ARK, AssetFormat.ASSET_FORMAT_RAW)
ef, af = type_str.split(':')
return (AnnoFormat(ef), AssetFormat(af))
def _format_file_output_func(anno_format: AnnoFormat) -> Callable:
_format_func_map = {
AnnoFormat.ANNO_FORMAT_ARK: _single_image_annotations_to_ark,
AnnoFormat.ANNO_FORMAT_VOC: _single_image_annotations_to_voc,
AnnoFormat.ANNO_FORMAT_LS_JSON: _single_image_annotations_to_ls_json,
}
return _format_func_map[anno_format]
def _format_file_ext(anno_format: AnnoFormat) -> str:
_format_ext_map = {
AnnoFormat.ANNO_FORMAT_ARK: '.txt',
AnnoFormat.ANNO_FORMAT_VOC: '.xml',
AnnoFormat.ANNO_FORMAT_LS_JSON: '.json',
}
return _format_ext_map[anno_format]
def _single_image_annotations_to_ark(asset_id: str, attrs: mirpb.MetadataAttributes,
image_annotations: mirpb.SingleImageAnnotations, image_cks: mirpb.SingleImageCks,
class_type_mapping: Optional[Dict[int, int]], cls_id_mgr: class_ids.ClassIdManager,
asset_filename: str) -> str:
output_str = ""
for annotation in image_annotations.annotations:
mapped_id = class_type_mapping[annotation.class_id] if class_type_mapping else annotation.class_id
output_str += f"{mapped_id}, {annotation.box.x}, {annotation.box.y}, "
output_str += f"{annotation.box.x + annotation.box.w - 1}, {annotation.box.y + annotation.box.h - 1}, "
output_str += f"{annotation.anno_quality}\n"
return output_str
def _single_image_annotations_to_voc(asset_id: str, attrs: mirpb.MetadataAttributes,
image_annotations: mirpb.SingleImageAnnotations, image_cks: mirpb.SingleImageCks,
class_type_mapping: Optional[Dict[int, int]], cls_id_mgr: class_ids.ClassIdManager,
asset_filename: str) -> str:
annotations = image_annotations.annotations
# annotation
annotation_node = ElementTree.Element('annotation')
# annotation: folder
folder_node = ElementTree.SubElement(annotation_node, 'folder')
folder_node.text = 'folder'
# annotation: filename
filename_node = ElementTree.SubElement(annotation_node, 'filename')
filename_node.text = asset_filename
# annotation: source
source_node = ElementTree.SubElement(annotation_node, 'source')
# annotation: source: database
database_node = ElementTree.SubElement(source_node, 'database')
database_node.text = attrs.dataset_name or 'unknown'
# annotation: source: annotation
annotation2_node = ElementTree.SubElement(source_node, 'annotation')
annotation2_node.text = 'unknown'
# annotation: source: image
image_node = ElementTree.SubElement(source_node, 'image')
image_node.text = 'unknown'
# annotation: size
size_node = ElementTree.SubElement(annotation_node, 'size')
# annotation: size: width
width_node = ElementTree.SubElement(size_node, 'width')
width_node.text = str(attrs.width)
# annotation: size: height
height_node = ElementTree.SubElement(size_node, 'height')
height_node.text = str(attrs.height)
# annotation: size: depth
depth_node = ElementTree.SubElement(size_node, 'depth')
depth_node.text = str(attrs.image_channels)
# annotation: segmented
segmented_node = ElementTree.SubElement(annotation_node, 'segmented')
segmented_node.text = '0'
# annotation: cks and sub nodes
cks_node = ElementTree.SubElement(annotation_node, 'cks')
for k, v in image_cks.cks.items():
ElementTree.SubElement(cks_node, k).text = v
# annotation: image_quality
image_quality_node = ElementTree.SubElement(annotation_node, 'image_quality')
image_quality_node.text = f"{image_cks.image_quality:.4f}"
# annotation: object(s)
for annotation in annotations:
object_node = ElementTree.SubElement(annotation_node, 'object')
name_node = ElementTree.SubElement(object_node, 'name')
name_node.text = cls_id_mgr.main_name_for_id(annotation.class_id) or 'unknown'
pose_node = ElementTree.SubElement(object_node, 'pose')
pose_node.text = 'unknown'
truncated_node = ElementTree.SubElement(object_node, 'truncated')
truncated_node.text = 'unknown'
occluded_node = ElementTree.SubElement(object_node, 'occluded')
occluded_node.text = '0'
bndbox_node = ElementTree.SubElement(object_node, 'bndbox')
xmin_node = ElementTree.SubElement(bndbox_node, 'xmin')
xmin_node.text = str(annotation.box.x)
ymin_node = ElementTree.SubElement(bndbox_node, 'ymin')
ymin_node.text = str(annotation.box.y)
xmax_node = ElementTree.SubElement(bndbox_node, 'xmax')
xmax_node.text = str(annotation.box.x + annotation.box.w - 1)
ymax_node = ElementTree.SubElement(bndbox_node, 'ymax')
ymax_node.text = str(annotation.box.y + annotation.box.h - 1)
difficult_node = ElementTree.SubElement(object_node, 'difficult')
difficult_node.text = '0'
tags_node = ElementTree.SubElement(object_node, 'tags')
for k, v in annotation.tags.items():
ElementTree.SubElement(tags_node, k).text = v
box_quality_node = ElementTree.SubElement(object_node, 'box_quality')
box_quality_node.text = f"{annotation.anno_quality:.4f}"
return ElementTree.tostring(element=annotation_node, encoding='unicode')
def _single_image_annotations_to_ls_json(asset_id: str, attrs: mirpb.MetadataAttributes,
image_annotations: mirpb.SingleImageAnnotations,
image_cks: mirpb.SingleImageCks, class_type_mapping: Optional[Dict[int, int]],
cls_id_mgr: class_ids.ClassIdManager, asset_filename: str) -> str:
annotations = image_annotations.annotations
out_type = "predictions" # out_type: annotation type - "annotations" or "predictions"
to_name = 'image' # to_name: object name from Label Studio labeling config
from_name = 'label' # control tag name from Label Studio labeling config
task: Dict = {
out_type: [{
"result": [],
"ground_truth": False,
}],
"data": {
"image": asset_filename
}
}
for annotation in annotations:
bbox_x, bbox_y = float(annotation.box.x), float(annotation.box.y)
bbox_width, bbox_height = float(annotation.box.w), float(annotation.box.h)
img_width, img_height = attrs.width, attrs.height
item = {
"id": uuid.uuid4().hex[0:10], # random id to identify this annotation.
"type": "rectanglelabels",
"value": {
# Units of image annotations in label studio is percentage of image width/height.
# https://labelstud.io/guide/predictions.html#Units-of-image-annotations
"x": bbox_x / img_width * 100,
"y": bbox_y / img_height * 100,
"width": bbox_width / img_width * 100,
"height": bbox_height / img_height * 100,
"rotation": 0,
"rectanglelabels": [cls_id_mgr.main_name_for_id(annotation.class_id) or 'unknown']
},
"to_name": to_name,
"from_name": from_name,
"image_rotation": 0,
"original_width": img_width,
"original_height": img_height
}
task[out_type][0]['result'].append(item)
return json.dumps(task)
class BaseDataWriter:
def __init__(self, mir_root: str, assets_location: str, class_ids_mapping: Dict[int, int],
format_type: AnnoFormat) -> None:
if not assets_location:
raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='empty assets_location')
self._class_id_manager = class_ids.ClassIdManager(mir_root=mir_root)
self._assets_location = assets_location
self._class_ids_mapping = class_ids_mapping
self._format_type = format_type
def _write(self, asset_id: str, attrs: mirpb.MetadataAttributes, image_annotations: mirpb.SingleImageAnnotations,
gt_annotations: mirpb.SingleImageAnnotations, image_cks: mirpb.SingleImageCks) -> None:
"""
write assets and annotations to destination with proper format
Args:
asset_id (str): asset hash code
attrs (mirpb.MetadataAttributes): attributes to this asset
image_annotations (mirpb.SingleImageAnnotations): annotations to this asset
image_cks (mirpb.SingleImageCks): cks to this asset
"""
raise NotImplementedError('not implemented')
def _close(self) -> None:
"""
close writer
"""
raise NotImplementedError('not implemented')
def write_all(self, dr: data_reader.MirDataReader) -> None:
"""
write all datas from data reader
"""
raise NotImplementedError('not implemented')
class RawDataWriter(BaseDataWriter):
def __init__(self,
mir_root: str,
assets_location: str,
assets_dir: str,
annotations_dir: str,
need_ext: bool,
need_id_sub_folder: bool,
overwrite: bool,
class_ids_mapping: Dict[int, int],
format_type: AnnoFormat,
index_file_path: str = '',
gt_dir: str = '',
gt_index_file_path: str = '',
index_assets_prefix: str = '',
index_annotations_prefix: str = '',
index_gt_prefix: str = '') -> None:
"""
Args:
assets_location (str): path to assets storage directory
assets_dir (str): export asset directory
annotations_dir (str): export annotation directory, if format_type is NO_ANNOTATION, this could be None
gt_dir (str): export ground-truth directory, if format_type is NO_ANNOTATION, this could be None
need_ext (bool): if true, all export assets will have it's type as ext, jpg, png, etc.
need_id_sub_folder (bool): if True, use last 2 chars of asset id as a sub folder name
format_type (AnnoFormat): format type, NONE means exports no annotations
overwrite (bool): if true, export assets even if they are exist in destination position
class_ids_mapping (Dict[int, int]): key: ymir class id, value: class id in exported annotation files
index_file_path (str): path to index file, if None, generates no index file
gt_index_file_path (str): path to gt_index file, if None, generates no index file
index_assets_prefix (str): prefix path added to each asset index path
index_annotations_prefix (str): prefix path added to each annotation index path
index_gt_prefix (str): prefix path added to each groundtruth index path
"""
super().__init__(mir_root=mir_root,
assets_location=assets_location,
class_ids_mapping=class_ids_mapping,
format_type=format_type)
# prepare out dirs
os.makedirs(assets_dir, exist_ok=True)
if annotations_dir:
os.makedirs(annotations_dir, exist_ok=True)
if gt_dir:
os.makedirs(gt_dir, exist_ok=True)
if index_file_path:
os.makedirs(os.path.dirname(index_file_path), exist_ok=True)
if gt_index_file_path:
if index_file_path == gt_index_file_path:
raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS,
error_message='cannot set same value for index_file_path/gt_index_file_path')
os.makedirs(os.path.dirname(gt_index_file_path), exist_ok=True)
self._assets_dir = assets_dir
self._annotations_dir = annotations_dir
self._gt_dir = gt_dir
self._need_ext = need_ext
self._need_id_sub_folder = need_id_sub_folder
self._format_type = format_type
self._index_file = open(index_file_path, 'w') if index_file_path else None
self._gt_index_file = open(gt_index_file_path, 'w') if gt_index_file_path else None
self._index_assets_prefix = index_assets_prefix
self._index_annotations_prefix = index_annotations_prefix
self._index_gt_prefix = index_gt_prefix
self._overwrite = overwrite
def _write(self, asset_id: str, attrs: mirpb.MetadataAttributes, image_annotations: mirpb.SingleImageAnnotations,
gt_annotations: mirpb.SingleImageAnnotations, image_cks: mirpb.SingleImageCks) -> None:
# write asset
asset_src_path = os.path.join(self._assets_location, asset_id)
sub_folder_name = asset_id[-2:] if self._need_id_sub_folder else ''
asset_file_name = asset_id
if self._need_ext:
try:
asset_image = Image.open(asset_src_path)
asset_file_name = f"{asset_file_name}.{asset_image.format.lower()}" # type: ignore
except UnidentifiedImageError:
asset_file_name = f"{asset_file_name}.unknown"
asset_dest_dir = os.path.join(self._assets_dir, sub_folder_name)
os.makedirs(asset_dest_dir, exist_ok=True)
asset_dest_path = os.path.join(asset_dest_dir, asset_file_name)
if self._overwrite or not os.path.isfile(asset_dest_path):
shutil.copyfile(asset_src_path, asset_dest_path)
anno_file_name = ''
if self._format_type != AnnoFormat.ANNO_FORMAT_NO_ANNOTATION:
anno_file_name = f"{asset_id}{_format_file_ext(self._format_type)}"
format_func = _format_file_output_func(anno_format=self._format_type)
# write annotations
if self._annotations_dir:
anno_str: str = format_func(asset_id=asset_id,
attrs=attrs,
image_annotations=image_annotations,
image_cks=image_cks,
class_type_mapping=self._class_ids_mapping,
cls_id_mgr=self._class_id_manager,
asset_filename=asset_file_name)
anno_dest_dir = os.path.join(self._annotations_dir, sub_folder_name)
os.makedirs(anno_dest_dir, exist_ok=True)
anno_dest_path = os.path.join(anno_dest_dir, anno_file_name)
with open(anno_dest_path, 'w') as f:
f.write(anno_str)
# write groundtruth
if self._gt_dir:
gt_str: str = format_func(asset_id=asset_id,
attrs=attrs,
image_annotations=gt_annotations,
image_cks=image_cks,
class_type_mapping=self._class_ids_mapping,
cls_id_mgr=self._class_id_manager,
asset_filename=asset_file_name)
gt_dest_dir = os.path.join(self._gt_dir, sub_folder_name)
os.makedirs(gt_dest_dir, exist_ok=True)
gt_dest_path = os.path.join(gt_dest_dir, anno_file_name)
with open(gt_dest_path, 'w') as f:
f.write(gt_str)
# write index file
asset_path_in_index_file = os.path.join(self._index_assets_prefix, sub_folder_name, asset_file_name)
if self._index_file:
if self._format_type != AnnoFormat.ANNO_FORMAT_NO_ANNOTATION:
anno_path_in_index_file = os.path.join(self._index_annotations_prefix, sub_folder_name, anno_file_name)
self._index_file.write(f"{asset_path_in_index_file}\t{anno_path_in_index_file}\n")
else:
self._index_file.write(f"{asset_path_in_index_file}\n")
if self._gt_index_file:
if self._format_type != AnnoFormat.ANNO_FORMAT_NO_ANNOTATION:
gt_path_in_index_file = os.path.join(self._index_gt_prefix, sub_folder_name, anno_file_name)
self._gt_index_file.write(f"{asset_path_in_index_file}\t{gt_path_in_index_file}\n")
else:
self._gt_index_file.write(f"{asset_path_in_index_file}\n")
def _close(self) -> None:
if self._index_file:
self._index_file.close()
self._index_file = None
if self._gt_index_file:
self._gt_index_file.close()
self._gt_index_file = None
def write_all(self, dr: data_reader.MirDataReader) -> None:
for v in dr.read():
self._write(*v)
self._close()
class LmdbDataWriter(BaseDataWriter):
def __init__(
self,
mir_root: str,
| |
# Reverse photography
##h3D-II sensor size
# 36 * 48 mm, 0.036 x 0.048m
## focal length
# 28mm, 0.028m
## multiplier
# 1.0
from skimage import io
import matplotlib.pyplot as plt
import numpy as np
import cv2
from scipy.spatial import distance
import shapefile as shp
def buildshape(corners, filename):
"""build a shapefile geometry from the vertices of the image in
world coordinates, then save it using the image name. Sub critical"""
#create a shapefile instance
#shape = shp.writer(shape.POLYGON)
#shape.poly(parts = [[proj_coords[:,0], proj_coords[:,1]], [proj_coords[:,1], proj_coords[:,2]]
# [proj_coords[:,3], proj_coords[:,2]], [proj_coords[:,0], proj_coords[:,3]]]
#shape.save("./", filename)
def worldfile(corners, im_pix, filename, filepath):
"""build a world file from the vertices of the image in
world coordinates, then save it using the image name.
here we build a small array and then dump it to a file
input is:
- the image file name
- projected corners in world coordinates (*not* bounding box)
- pxel resolution as a two-element vector [pix_x, pix_y]
- path to warped image files
reference:
http://support.esri.com/en/knowledgebase/techarticles/detail/17489
"""
world_arr = np.zeros([6,1])
#line 1 is the X pixel resolution in M
world_arr[0] = im_pix[0]
#line 2 is the Y pixel resolution in M
world_arr[3] = -im_pix[1]
#now the X coord of the top left corner
world_arr[4] = np.min(corners[0,:])
#and the Y coordinate of the top left corner
world_arr[5] = np.max(corners[1,:])
#strip some parts from the filename
filename = filename[0:len(filename)-4]
np.savetxt(filepath + filename + '.jpgw', world_arr, "%.3f")
#------
# 2D homogeneous vectors and transformations
def hom2(x, y):
"""2D homogeneous column vector."""
return np.matrix([x, y, 1]).T
def scale2d(s_x, s_y):
"""Scale matrix that scales 2D homogeneous coordinates"""
return np.matrix([[s_x, 0, 0],
[0, s_y, 0],
[0, 0, 1]] )
def trans2d(t_x, t_y):
"""Translation matrix that moves a (homogeneous) vector [v_x, v_y, 1]
to [v_x + t_x, v_y + t_y, 1]"""
return np.matrix([[1, 0, t_x],
[0, 1, t_y],
[0, 0, 1]] )
#-----
# 3D homogeneous vectors and transformations
def hom3(x, y, z):
"""3D homogeneous column vector."""
return np.matrix([x, y, z, 1]).T
def unhom(v):
"""Convert homogeneous coords (v_x, v_y, v_z, v_w) to 3D by
(v_x, v_y, v_z) / v_w."""
return v[:-1]/v[-1]
def trans3d(t):
"""Translation matrix that moves a (homogeneous) vector [v_x, v_y, v_z, 1]
to [v_x + t_x, v_y + t_y, v_z + t_z, 1]."""
I = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
return np.hstack([I, t])
# return np.matrix([[1, 0, 0, t_x],
# [0, 1, 0, t_y],
# [0, 0, 1, t_z],
# [0, 0, 0, 1 ]] )
def persp3d(v_x, v_y, v_z):
"""Perspective transformation in homogeneous coordinates
where v represents the viewer's position relative to the
display surface (i.e., (v_x, v_y) is centre, v_z is focal
distance."""
return np.matrix([[1, 0, -v_x/v_z, 0],
[0, 1, -v_y/v_z, 0],
[0, 0, 1, 0],
[0, 0, 1/v_z, 0]] )
def cross(a, b):
"""Compute 3D cross product of homogeneous vectors, returning
the result as a 3D column vector."""
a3, b3 = unhom(a), unhom(b)
return np.matrix(np.cross(a3.T, b3.T)).T
# Compute zero point in hom. coords
ZERO = hom3(0,0,0)
#-------
# Homogeneous lines, planes, and their intersections
# Based on the discussion here:
# http://math.stackexchange.com/questions/400268/equation-for-a-line-through-a-plane-in-homogeneous-coordinates
def line(v, x):
"""A homoegeneous line with direction v through a point x is the pair
(v, x `cross` v)."""
return np.vstack([unhom(v), cross(x, v)])
def plane(n, x):
"""A plane with normal n passing through x is represented homogeneously
by (n, -x.n)."""
n3 = unhom(n)
x3 = unhom(x)
return np.vstack([n3, -(x3.T * n3)])
def meet(P):
"""Compute the meet operator for the given plane W."""
n, d = P[:3], P[3].item(0,0)
nx = np.matrix([[0, -n[2], -n[1]],
[n[2], 0, -n[0]],
[-n[1], n[0], 0]])
left = np.vstack([np.diag([-d, -d, -d]), n.T])
right = np.vstack([nx, np.matrix([0, 0, 0])])
return np.hstack([left, right])
def intersect(L, P):
"""Compute the point of intersection between the line L and plane P.
Returned point is homogenous."""
return meet(P) * L
#-------
# Camera
class Attitude:
def __init__(self, heading, pitch, roll):
"""Construct a new attitude. Input in degrees, stored in radians.
ADS: adjusted heading by 180 to keep corner procession intact.
TL_im -> TL_grnd, TR_im -> TR_gnd, BR_im -> BR_gnd, BL_im -> BR_gnd
"""
self.heading = heading * np.pi / 180.0
self.pitch = pitch * np.pi / 180.0
self.roll = roll * np.pi / 180.0
def rotation(self):
"""4 x 4 rotation matrix for 3D hom. vectors for this attitude."""
heading, pitch, roll = self.heading, self.pitch, self.roll
RX = np.matrix(
[[1, 0, 0, 0],
[0, np.cos(pitch), -np.sin(pitch), 0],
[0, np.sin(pitch), np.cos(pitch), 0],
[0, 0, 0, 1]] )
RY = np.matrix(
[[np.cos(roll), 0, np.sin(roll), 0],
[0, 1, 0, 0],
[-np.sin(roll), 0, np.cos(roll), 0],
[0, 0, 0, 1]] )
RZ = np.matrix(
[[np.cos(heading), -np.sin(heading), 0, 0],
[np.sin(heading), np.cos(heading), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]] )
return RZ * RY * RX
"""original rotations
RX = np.matrix(
[[1, 0, 0, 0],
[0, np.cos(roll), -np.sin(roll), 0],
[0, np.sin(roll), np.cos(roll), 0],
[0, 0, 0, 1]] )
RY = np.matrix(
[[np.cos(pitch), 0, np.sin(pitch), 0],
[0, 1, 0, 0],
[-np.sin(pitch), 0, np.cos(pitch), 0],
[0, 0, 0, 1]] )
"""
class Sensor:
def __init__(self, pixel_dim, sensor_dim, focal_length):
"""New sensor of given focal length, sensor dimensions (width, height)
in metres, and pixel dimensions (width, height)."""
self.pixels = pixel_dim
self.screen = sensor_dim
self.f = focal_length
def pixel_to_screen(self):
"""Returns a 3x3 matrix transformation that takes 2D hom. pixel
coordinates to 2D hom. sensor coordinates."""
px,sc = self.pixels, self.screen
T_centre = trans2d(-px[0]/2, -px[1]/2)
T_scale = scale2d(sc[0]/px[0], sc[1]/px[1])
return T_scale * T_centre
def fov_angle(self):
"""Get the FOV angle for this sensor."""
return 2 * np.arctan(self.w / (2 * self.f))
class Camera:
def __init__(self, position, attitude, sensor):
self.position = position
self.attitude = attitude
self.sensor = sensor
def pix_to_world(self):
"""Compute the matrix transform from image to world coordinates.
Returns a 4 x 3 matrix that converts 2D hom. coords to 3D hom. coords."""
T_px_to_sc = self.sensor.pixel_to_screen()
T_2d3d = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 1]],
dtype = 'float64' )
T_trans_s = trans3d(hom3(0, 0, self.sensor.f))
T_att = self.attitude.rotation()
T_trans_w = trans3d(self.position)
return T_trans_w * T_att * T_trans_s * T_2d3d * T_px_to_sc
def project(self, r, P):
"""Calculate (unhom.) 3D point on plane P that corresponds to pixel
coordinate given in hom. 2D coords by r."""
# Contruct a line through pixel position in world and camera in world
r_w = self.pix_to_world() * r
v = ZERO + self.position - r_w
L = line(v, r_w)
# Get location of project on surface of plane
return unhom(intersect(L, P))
#===========
# Set up example camera and ground plane
# Camera sensor is 640 x 480 pixel sensor that is 48mm x 36mm with focal length
# of 28mm and is located at (1000, 1000, 100) with a pitch of -15 degrees.
#
#test data for image 20121023_f13_0044.jpg
# local XYZ
#74.81761600 -55.15724800 303.97706400
#HPR
#306.977 -3.119 1.668
#need to know LiDAR mean range for the flight - let's say it is -30m
# relative to the ellipsoid... so we add that to aircraft Z.
xpix = 8176
ypix = 6132
sensor_x = 0.048
sensor_y = 0.036
focal_len = 0.028
# Set up corners of the image in pixel coordinates
botleft = hom2(0, 6132)
topleft = hom2(0, 0)
botright = hom2(8176, 6132)
topright =hom2(8176, 0)
raw_coords = np.hstack([topleft, topright, botright, botleft])
print("Pixel Coordinates:\n{}".format(raw_coords))
# Ground plane is z=0
ground = plane(hom3(0,0,1), hom3(0,0,0))
im_dir = '../SIPEX2_f9_test/'
trajectory_file = '../SIPEX2_f9_test/20121003_f9_survey_testims_utm.txt'
imlist = '../SIPEX2_f9_test/imnames.txt'
lidar_z = -2.5
# this will be replaced by an estimate for each camera centre...
cameracentres = np.genfromtxt(trajectory_file)
#apply any boresight misalignment information...
# from the 2012 camberidge calibration flight.
#can be determined empirically if you have time!
h_adj = -1.93
p_adj = 1.8292
r_adj = 1.2262
cameracentres[:,6] = cameracentres[:,6] + h_adj
cameracentres[:,5] = cameracentres[:,5] + p_adj
cameracentres[:,4] = cameracentres[:,4] + r_adj
with open(imlist) as f:
image_list = f.read().splitlines()
i = 0
#now to time-dependent things...
for image in image_list:
flight_x = cameracentres[i,1]
flight_y = cameracentres[i,2]
flight_z = cameracentres[i,3]
flight_h = cameracentres[i,6]
flight_p = cameracentres[i,5]
flight_r = cameracentres[i,4]
range_to_ground = flight_z - lidar_z;
print("camera E:\n{}".format(flight_x))
print("camera N:\n{}".format(flight_y))
print("camera U:\n{}".format(range_to_ground))
print("camera H:\n{}".format(flight_h))
print("camera P:\n{}".format(flight_p))
print("camera R:\n{}".format(flight_r))
camera = Camera(
hom3(flight_x, flight_y, range_to_ground),
Attitude(flight_h - 180, flight_p, flight_r),
Sensor((xpix, ypix), (sensor_x, sensor_y), focal_len))
proj_coords = np.hstack([
camera.project(topleft, ground), | |
<reponame>harsh306/rl-project<filename>src/coinrun/trainers/batched_minecraft_mix.py
import os
import logging
import glob
import re
import math
import time
import csv
import multiprocessing
from queue import Empty
from collections import defaultdict
import pickle
import numpy as np
import random
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class BatchedTrainer(object):
def __init__(self, create_environment, create_policy, args):
self.create_environment = create_environment
self.create_policy = create_policy
self.args = args
random.seed(args.random_seed)
self.task_ids = []
for _ in range(100):
n = random.randint(1, 1000)
self.task_ids.append(n)
def runner(self, env_id, shared_buffer, fifo, num_timesteps, logdir, id):
proc_name = multiprocessing.current_process().name
logger.info("Runner %s started" % proc_name)
# local environment for runner
# env = self.create_environment(env_id, id, os.path.join(logdir, 'gym'), **vars(self.args))
env = self.create_environment(1, id, self.task_ids)
# copy of policy
policy = self.create_policy(env.observation_space, env.action_space, batch_size=1, stochastic=True, args=self.args)
# record episode lengths and rewards for statistics
episode_rewards = []
episode_lengths = []
episode_reward = 0
episode_length = 0
observation = env.reset()
observation = np.squeeze(observation, axis=0)
for i in range(math.ceil(float(num_timesteps) / self.args.num_local_steps)):
# copy weights from main network at the beginning of iteration
# the main network's weights are only read, never modified
# but we create our own model instance, because Keras is not thread-safe
policy.set_weights(pickle.loads(shared_buffer.raw))
observations = []
preds = []
rewards = []
terminals = []
infos = defaultdict(list)
for t in range(self.args.num_local_steps):
if self.args.display:
env.render()
# predict action probabilities (and state value)
gym_action, pred = policy.predict([observation])
# strip batch dimension
pred = [p[0] for p in pred]
# step environment and log data
observations.append(observation)
preds.append(pred)
observation, reward, terminal, info = env.step(gym_action[0])
observation = np.squeeze(observation, axis=0)
reward = reward[0]
terminal = terminal[0]
rewards.append(reward)
terminals.append(terminal)
# record environment diagnostics from info
for key, val in info.items():
try:
val = float(val)
infos[key].append(val)
except (TypeError, ValueError):
pass
episode_reward += reward
episode_length += 1
# reset if terminal state
if terminal:
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
episode_reward = 0
episode_length = 0
observation = env.reset()
observation = np.squeeze(observation, axis=0)
# predict value for the next observation
# needed for calculating n-step returns
_, pred = policy.predict([observation])
# strip batch dimension
pred = [p[0] for p in pred]
preds.append(pred)
# send observations, actions, rewards and returns
# block if fifo is full
fifo.put((
observations,
preds,
rewards,
terminals,
episode_rewards,
episode_lengths,
{key: np.mean(val) for key, val in infos.items()}
))
episode_rewards = []
episode_lengths = []
env.close()
logger.info("Runner %s finished" % proc_name)
def trainer(self, policy, fifos, shared_buffer, start_timestep, num_timesteps, logdir):
proc_name = multiprocessing.current_process().name
logger.info("Trainer %s started" % proc_name)
# must import tensorflow here, otherwise sometimes it conflicts with multiprocessing
from common.tensorboard_utils import create_summary_writer, add_summary
writer = create_summary_writer(logdir)
timestep = start_timestep
total_episodes = 0
total_timesteps = 0
total_updates = 0
total_rewards = []
episode_rewards = []
episode_lengths = []
task_rewards = [[] for _ in range(len(self.args.load_mission))]
stats_start = time.time()
stats_timesteps = 0
stats_updates = 0
while total_timesteps < num_timesteps:
batch_observations = []
batch_preds = []
batch_rewards = []
batch_terminals = []
batch_timesteps = 0
mean_infos = defaultdict(list)
queue_sizes = []
# loop over fifos from all runners
for q, fifo in enumerate(fifos):
try:
# Queue.qsize() is not implemented on Mac, ignore as it is used only for diagnostics
try:
queue_sizes.append(fifo.qsize())
except NotImplementedError:
pass
# wait for a new trajectory and statistics
observations, preds, rewards, terminals, episode_reward, episode_length, mean_info = \
fifo.get(timeout=self.args.queue_timeout)
# add to batch
batch_observations.append(observations)
batch_preds.append(preds)
batch_rewards.append(rewards)
batch_terminals.append(terminals)
# log statistics
total_rewards += episode_reward
episode_rewards += episode_reward
episode_lengths += episode_length
batch_timesteps += len(observations)
task_rewards[q % len(task_rewards)] += episode_reward
for key, val in mean_info.items():
mean_infos[key].append(val)
except Empty:
# just ignore empty fifos, batch will be smaller
pass
# if any of the runners produced trajectories
if len(batch_observations) > 0:
timestep += batch_timesteps
# reorder dimensions for preds
batch_preds = [list(zip(*p)) for p in batch_preds]
batch_preds = list(zip(*batch_preds))
# train model
policy.train(batch_observations, batch_preds, batch_rewards, batch_terminals, timestep, writer)
# share model parameters
shared_buffer.raw = pickle.dumps(policy.get_weights(), pickle.HIGHEST_PROTOCOL)
total_timesteps += batch_timesteps
total_updates += self.args.repeat_updates
stats_timesteps += batch_timesteps
stats_updates += self.args.repeat_updates
for key, val in mean_infos.items():
add_summary(writer, "diagnostics/"+key, np.mean(val), timestep)
if timestep % self.args.stats_interval == 0:
total_episodes += len(episode_rewards)
stats_time = time.time() - stats_start
add_summary(writer, "game_stats/episodes", len(episode_rewards), timestep)
add_summary(writer, "game_stats/episode_reward_mean", np.mean(episode_rewards), timestep)
#add_summary(writer, "game_stats/episode_reward_stddev", np.std(episode_rewards), timestep)
add_summary(writer, "game_stats/episode_length_mean", np.mean(episode_lengths), timestep)
#add_summary(writer, "game_stats/episode_length_stddev", np.std(episode_lengths), timestep)
add_summary(writer, "game_stats/total_episodes", total_episodes, timestep)
add_summary(writer, "game_stats/total_timesteps", total_timesteps, timestep)
add_summary(writer, "game_stats/total_updates", total_updates, timestep)
add_summary(writer, "performance/updates_per_second", stats_updates / stats_time, timestep)
add_summary(writer, "performance/timesteps_per_second", stats_timesteps / stats_time, timestep)
add_summary(writer, "performance/estimated_runner_fps", stats_timesteps / self.args.num_runners / stats_time, timestep)
add_summary(writer, "performance/mean_queue_length", np.mean(queue_sizes), timestep)
for i, rewards in enumerate(task_rewards):
add_summary(writer, "curriculum_rewards/task%d_reward_mean" % i, np.mean(rewards), timestep)
add_summary(writer, "curriculum_episodes/task%d_episodes" % i, len(rewards), timestep)
logger.info("Step %d/%d: episodes %d, mean episode reward %.2f, mean episode length %.2f, timesteps/sec %.2f." %
(timestep, num_timesteps, len(episode_rewards), np.mean(episode_rewards), np.mean(episode_lengths),
stats_timesteps / stats_time))
episode_rewards = []
episode_lengths = []
task_rewards = [[] for _ in range(len(self.args.load_mission))]
stats_start = time.time()
stats_timesteps = 0
stats_updates = 0
if timestep % self.args.save_interval == 0:
policy.save_weights(os.path.join(logdir, "weights_%d.hdf5" % timestep))
#else:
#logger.warn("Empty batch, runners are falling behind!")
# save final weights
policy.save_weights(os.path.join(logdir, "weights_%d.hdf5" % timestep))
if self.args.csv_file:
# save command-line parameters and most important performance metrics to file
data = vars(self.args)
data['episode_reward_mean'] = np.mean(total_rewards)
data['total_episodes'] = total_episodes
data['total_timesteps'] = total_timesteps
data['total_updates'] = total_updates
header = sorted(data.keys())
# write the CSV file one directory above the experiment directory
csv_file = os.path.join(os.path.dirname(logdir), self.args.csv_file)
file_exists = os.path.isfile(csv_file)
with open(csv_file, 'a') as file:
writer = csv.DictWriter(file, delimiter=',', fieldnames=header)
if not file_exists:
writer.writeheader()
writer.writerow(data)
# collect child processes
while len(multiprocessing.active_children()) > 0:
for fifo in fifos:
# empty fifos just in case runners are waiting after them
try:
fifo.get(timeout=1)
except Empty:
pass
logger.info("Trainer %s finished" % proc_name)
def run(self, env_id, num_timesteps, logdir):
# use spawn method for starting subprocesses
ctx = multiprocessing.get_context('spawn')
# create dummy environment to be able to create model
env = self.create_environment(1, 0, self.task_ids)
logger.info("Observation space: " + str(env.observation_space))
logger.info("Action space: " + str(env.action_space))
# use fixed batch size ONLY when queue timeout is None, i.e. blocks indefinitely until full batch is achieved
# needed for stateful RNNs
batch_size = self.args.num_runners if self.args.queue_timeout is None else None
# create main model
policy = self.create_policy(env.observation_space, env.action_space, batch_size, True, self.args)
policy.summary()
env.close()
# check for commandline argument or previous weights file
start_timestep = 0
weights_file = None
if self.args.load_weights:
weights_file = self.args.load_weights
else:
files = glob.glob(os.path.join(logdir, "weights_*.hdf5"))
if files:
weights_file = max(files, key=lambda f: int(re.search(r'_(\d+).hdf5', f).group(1)))
# set start timestep from file name when continuing previous session
start_timestep = int(re.search(r'_(\d+).hdf5', weights_file).group(1))
logger.info("Setting start timestep to %d" % start_timestep)
# load saved weights
if weights_file:
logger.info("Loading weights: " + weights_file)
policy.load_weights(weights_file)
# create shared buffer for sharing weights
blob = pickle.dumps(policy.get_weights(), pickle.HIGHEST_PROTOCOL)
shared_buffer = ctx.Array('c', len(blob))
shared_buffer.raw = blob
# number of timesteps each runner has to make
runner_timesteps = math.ceil((num_timesteps - start_timestep) / self.args.num_runners)
# force runner processes to use cpu, child processes inherit environment variables
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# force child processes to use one thread only
os.environ["OMP_NUM_THREADS"] = "1"
# create fifos and threads for all runners
fifos = []
for i in range(self.args.num_runners):
fifo = ctx.Queue(self.args.queue_length)
fifos.append(fifo)
process = ctx.Process(target=self.runner, args=(env_id, shared_buffer, fifo, runner_timesteps, logdir, i))
process.start()
# start trainer in main thread
self.trainer(policy, fifos, shared_buffer, start_timestep, num_timesteps, logdir)
logger.info("All done")
def eval(self, env_id, num_timesteps, logdir):
env = self.create_environment(100, 0, self.task_ids)
logger.info("Observation space: " + str(env.observation_space))
logger.info("Action space: " + str(env.action_space))
# create main model
batch_size = 1
policy = self.create_policy(env.observation_space, env.action_space, batch_size, self.args.stochastic, self.args)
policy.summary()
weights_file = None
if self.args.load_weights:
weights_file = self.args.load_weights
else:
files = glob.glob(os.path.join(logdir, "weights_*.hdf5"))
if files:
weights_file = max(files, key=lambda f: int(re.search(r'_(\d+).hdf5', f).group(1)))
# load saved weights
if weights_file:
logger.info("Loading weights: " + weights_file)
policy.load_weights(weights_file)
# record episode lengths and rewards for statistics
episode_rewards = []
episode_lengths = []
episode_reward = 0
episode_length = 0
observation = env.reset()
for i in range(num_timesteps):
if self.args.display:
env.render()
# predict action probabilities (and state value)
gym_action, _ = policy.predict([observation])
# step environment and log data
observation, reward, terminal, info = env.step(gym_action[0])
observation = np.squeeze(observation, axis=0)
episode_reward += reward
episode_length += 1
# reset if terminal state
if terminal:
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
episode_reward = 0
episode_length = 0
observation = env.reset()
observation = np.squeeze(observation, axis=0)
logger.info("Episodes %d, | |
import torch
import numpy as np
import random
import argparse
from utils import plot_utils, eval_utils
from collections import defaultdict
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
from sklearn.neighbors import KernelDensity
from scipy.stats import gaussian_kde
import pandas as pd
hyperparams = {
### Training
## Batch Sizes
'batch_size': 16,
## Learning Rate
'learning_rate': 0.002,
'min_learning_rate': 0.00001,
'learning_decay_rate': 0.9999,
## Optimizer
# 'optimizer': tf.train.AdamOptimizer,
'optimizer_kwargs': {},
'grad_clip': 1.0,
### Prediction
'minimum_history_length': 8, # 3.2 seconds
'prediction_horizon': 12, # 4.8 seconds (at least as far as the loss function is concerned)
### Variational Objective
## Objective Formulation
'alpha': 1,
'k': 3, # number of samples from z during training
'k_eval': 50, # number of samples from z during evaluation
'use_iwae': False, # only matters if alpha = 1
'kl_exact': True, # relevant only if alpha = 1
## KL Annealing/Bounding
'kl_min': 0.07,
'kl_weight': 1.0,
'kl_weight_start': 0.0001,
'kl_decay_rate': 0.99995,
'kl_crossover': 8000,
'kl_sigmoid_divisor': 6,
### Network Parameters
## RNNs/Summarization
'rnn_kwargs': {"dropout_keep_prob": 0.75},
'MLP_dropout_keep_prob': 0.9,
'rnn_io_dropout_keep_prob': 1.0,
'enc_rnn_dim_multiple_inputs': 8,
'enc_rnn_dim_edge': 8,
'enc_rnn_dim_edge_influence': 8,
'enc_rnn_dim_history': 32,
'enc_rnn_dim_future': 32,
'dec_rnn_dim': 128,
'dec_GMM_proj_MLP_dims': None,
'sample_model_during_dec': True,
'dec_sample_model_prob_start': 0.0,
'dec_sample_model_prob_final': 0.0,
'dec_sample_model_prob_crossover': 20000,
'dec_sample_model_prob_divisor': 6,
## q_z_xy (encoder)
'q_z_xy_MLP_dims': None,
## p_z_x (encoder)
'p_z_x_MLP_dims': 16,
## p_y_xz (decoder)
'fuzz_factor': 0.05,
'GMM_components': 16,
'log_sigma_min': -10,
'log_sigma_max': 10,
'log_p_yt_xz_max': 50,
### Discrete Latent Variable
'N': 2,
'K': 5,
## Relaxed One-Hot Temperature Annealing
'tau_init': 2.0,
'tau_final': 0.001,
'tau_decay_rate': 0.9999,
## Logit Clipping
'use_z_logit_clipping': False,
'z_logit_clip_start': 0.05,
'z_logit_clip_final': 3.0,
'z_logit_clip_crossover': 8000,
'z_logit_clip_divisor': 6
}
parser = argparse.ArgumentParser()
parser.add_argument('--sgan_models_path', type=str, default='../ref_impls/SocialGAN-PyTorch/models/sgan-models')
parser.add_argument('--sgan_dset_type', default='test', type=str)
parser.add_argument("--dynamic_edges", help="whether to use dynamic edges or not, options are 'no' and 'yes'",
type=str, default='yes')
parser.add_argument("--edge_radius", help="the radius (in meters) within which two nodes will be connected by an edge",
type=float, default=1.5)
parser.add_argument("--edge_state_combine_method", help="the method to use for combining edges of the same type",
type=str, default=None)
parser.add_argument("--edge_influence_combine_method", help="the method to use for combining edge influences",
type=str, default=None)
parser.add_argument('--edge_addition_filter', nargs='+', help="what scaling to use for edges as they're created",
type=float, default=[0.25, 0.5, 0.75, 1.0]) # We automatically pad left with 0.0
parser.add_argument('--edge_removal_filter', nargs='+', help="what scaling to use for edges as they're removed",
type=float, default=[1.0, 0.0]) # We automatically pad right with 0.0
parser.add_argument('--incl_robot_node', help="whether to include a robot node in the graph or simply model all agents",
action='store_true')
parser.add_argument('--num_samples', help='how many times to sample from the model',
type=int, default=2000)
parser.add_argument('--num_runs', help='how many scenes to predict per model evaluation',
type=int, default=100)
parser.add_argument('--device', help='what device to perform training on',
type=str, default='cpu')
parser.add_argument("--eval_device", help="what device to use during evaluation",
type=str, default='cpu')
parser.add_argument('--seed', help='manual seed to use, default is 123',
type=int, default=123)
args = parser.parse_args()
# 44.72 km/h = 12.42 m/s i.e. that's the max value that a velocity coordinate can be.
max_speed = 12.422222
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
method_names = ['sgan', 'our_full', 'our_most_likely']
def pretty_dataset_name(dataset_name):
if dataset_name == 'eth':
return 'ETH - Univ'
elif dataset_name == 'hotel':
return 'ETH - Hotel'
elif dataset_name == 'univ':
return 'UCY - Univ'
elif dataset_name == 'zara1':
return 'UCY - Zara 1'
elif dataset_name == 'zara2':
return 'UCY - Zara 2'
else:
return dataset_name
def plot_run_trajs(data_precondition, dataset_name,
our_preds_most_likely_list, our_preds_list,
sgan_preds_list, sgan_gt_list, eval_inputs, eval_data_dict,
data_ids, t_predicts, random_scene_idxs, num_runs):
eval_dt = eval_data_dict['dt']
for run in range(num_runs):
plt.close('all')
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_xlabel(r'$x$ (m)')
ax.set_ylabel(r'$y$ (m)')
sgan_preds = sgan_preds_list[run]
our_most_likely_preds = our_preds_most_likely_list[run]
our_full_preds = our_preds_list[run]
random_scene_idx = random_scene_idxs[run]
data_id = data_ids[random_scene_idx]
t_predict = t_predicts[random_scene_idx] - 1
print(run, data_id, t_predict)
sgan_plotting_tensors = list()
for key, value in sgan_preds.items():
sgan_plotting_tensor = torch.cat([value, torch.full((value.shape[0], 1, 2), np.nan)], dim=1)
sgan_plotting_tensors.append(sgan_plotting_tensor)
if len(sgan_plotting_tensors) == 0:
print('len(sgan_plotting_tensors) == 0 for', run, data_id, t_predict, data_precondition, dataset_name)
continue
sgan_plotting_tensor = torch.cat(sgan_plotting_tensors, dim=0).view(-1, 2).cpu().numpy()
ax.plot(sgan_plotting_tensor[:, 0], sgan_plotting_tensor[:, 1],
color='#EC8F31', label='Social GAN',
alpha=0.5, linewidth=0.7)
# Saving some memory
del sgan_plotting_tensor
del sgan_plotting_tensors
labels_to_use = ['Our Method (Full)', r'Our Method ($z_{best}$)']
colors_to_use = ['blue', '#1FC287']
for idx, preds_dict in enumerate([our_full_preds, our_most_likely_preds]):
our_plotting_tensors = list()
futures_list = list()
previous_list = list()
for key, value in preds_dict.items():
curr_state_val = eval_inputs[key][data_id, t_predict]
pred_trajs = torch.from_numpy(plot_utils.integrate_trajectory(value.cpu().numpy(), [0, 1],
curr_state_val.cpu().numpy(), [0, 1],
eval_dt,
output_limit=max_speed,
velocity_in=True).astype(np.float32))
our_plotting_tensor = torch.cat([pred_trajs[:, 0], torch.full((pred_trajs.shape[0], 1, 2), np.nan)], dim=1)
our_plotting_tensors.append(our_plotting_tensor)
if idx == 1:
run_future = eval_inputs[key][data_id, t_predict+1 : t_predict+1+12, :2]
run_previous = eval_inputs[key][data_id, t_predict+1-8 : t_predict+1, :2]
futures_list.append(run_future)
previous_list.append(run_previous)
if len(our_plotting_tensors) == 0:
print('len(our_plotting_tensors) == 0 for', run, data_id, t_predict, data_precondition, dataset_name)
break
our_plotting_tensor = torch.cat(our_plotting_tensors, dim=0).view(-1, 2).cpu().numpy()
ax.plot(our_plotting_tensor[:, 0], our_plotting_tensor[:, 1],
color=colors_to_use[idx], label=labels_to_use[idx],
alpha=0.5, linewidth=0.7)
if idx == 1:
futures_tensor = torch.stack(futures_list, dim=0)
futures_tensor = torch.cat([futures_tensor, torch.full((futures_tensor.shape[0], 1, 2), np.nan)], dim=1)
futures_tensor = futures_tensor.view(-1, 2).cpu().numpy()
futures_tensor[futures_tensor == 0] = np.nan
ax.plot(futures_tensor[:, 0], futures_tensor[:, 1],
color='white', label='Ground Truth',
linestyle='--',
path_effects=[pe.Stroke(linewidth=3, foreground='k'), pe.Normal()])
previous_tensor = torch.stack(previous_list, dim=0)
previous_tensor = torch.cat([previous_tensor, torch.full((previous_tensor.shape[0], 1, 2), np.nan)], dim=1)
previous_tensor = previous_tensor.view(-1, 2).cpu().numpy()
previous_tensor[previous_tensor == 0] = np.nan
ax.plot(previous_tensor[:, 0], previous_tensor[:, 1],
color='k', label='History', linestyle='--')
curr_tensor = torch.stack(previous_list, dim=0)[:, -1]
ax.scatter(curr_tensor[:, 0], curr_tensor[:, 1], s=25,
c='grey', linewidths=1, edgecolor='k', zorder=10)
else:
# If the loop completed without any breaks, we can go ahead
# and plot the results.
ax.legend(loc='best')
plt.savefig('../sgan-dataset/plots/simultaneous_plots/checking_%s_%s_pred_fig_run%d.pdf' % (data_precondition, dataset_name, run), dpi=300, bbox_inches='tight')
def get_kde_log_likelihoods(data_precondition, dataset_name,
our_preds_most_likely_list, our_preds_list,
sgan_preds_list, sgan_gt_list, eval_inputs, eval_data_dict,
data_ids, t_predicts, random_scene_idxs, num_runs):
eval_dt = eval_data_dict['dt']
all_methods_preds_dict = defaultdict(list)
gt_dicts = list()
for run in range(num_runs):
sgan_preds = sgan_preds_list[run]
our_most_likely_preds = our_preds_most_likely_list[run]
our_full_preds = our_preds_list[run]
random_scene_idx = random_scene_idxs[run]
data_id = data_ids[random_scene_idx]
t_predict = t_predicts[random_scene_idx] - 1
print(run, data_id, t_predict)
sgan_preds = {key: value.cpu().numpy() for key, value in sgan_preds.items()}
all_methods_preds_dict['sgan'].append(sgan_preds)
methods_list = ['our_full', 'our_most_likely']
curr_gt = dict()
for idx, preds_dict in enumerate([our_full_preds, our_most_likely_preds]):
curr_preds = dict()
for key, value in preds_dict.items():
curr_state_val = eval_inputs[key][data_id, t_predict]
pred_trajs = plot_utils.integrate_trajectory(value.cpu().numpy(), [0, 1],
curr_state_val.cpu().numpy(), [0, 1],
eval_dt,
output_limit=max_speed,
velocity_in=True).astype(np.float32)
curr_preds[key] = pred_trajs[:, 0]
if idx == 1:
curr_gt[key] = eval_inputs[key][[data_id], t_predict+1 : t_predict+1+12, :2].cpu().numpy()
all_methods_preds_dict[methods_list[idx]].append(curr_preds)
gt_dicts.append(curr_gt)
detailed_ll_dict = {'data_precondition': list(),
'dataset': list(),
'method': list(),
'run': list(),
'timestep': list(),
'node': list(),
'log-likelihood': list()}
sgan_lls = list()
our_full_lls = list()
our_most_likely_lls = list()
log_pdf_lower_bound = -20
for run in range(num_runs):
sgan_preds = all_methods_preds_dict['sgan'][run]
our_full_preds = all_methods_preds_dict['our_full'][run]
our_most_likely_preds = all_methods_preds_dict['our_most_likely'][run]
gt_dict = gt_dicts[run]
for node in sgan_preds.keys():
first_nz = plot_utils.first_nonzero(np.sum(gt_dict[node], axis=2)[0, ::-1], axis=0)
if first_nz < 0:
continue
num_timesteps = gt_dict[node].shape[1] - first_nz
sgan_ll = 0.0
our_full_ll = 0.0
our_most_likely_ll = 0.0
for timestep in range(num_timesteps):
curr_gt = gt_dict[node][:, timestep]
sgan_scipy_kde = gaussian_kde(sgan_preds[node][:, timestep].T)
our_full_scipy_kde = gaussian_kde(our_full_preds[node][:, timestep].T)
our_most_likely_scipy_kde = gaussian_kde(our_most_likely_preds[node][:, timestep].T)
# We need [0] because it's a (1,)-shaped numpy array.
sgan_log_pdf = np.clip(sgan_scipy_kde.logpdf(curr_gt.T), a_min=log_pdf_lower_bound, a_max=None)[0]
our_full_pdf = np.clip(our_full_scipy_kde.logpdf(curr_gt.T), a_min=log_pdf_lower_bound, a_max=None)[0]
our_most_likely_pdf = np.clip(our_most_likely_scipy_kde.logpdf(curr_gt.T), a_min=log_pdf_lower_bound, a_max=None)[0]
for idx, result in enumerate([sgan_log_pdf, our_full_pdf, our_most_likely_pdf]):
detailed_ll_dict['data_precondition'].append(data_precondition)
detailed_ll_dict['dataset'].append(dataset_name)
detailed_ll_dict['method'].append(method_names[idx])
detailed_ll_dict['run'].append(run)
detailed_ll_dict['timestep'].append(timestep)
detailed_ll_dict['node'].append(str(node))
detailed_ll_dict['log-likelihood'].append(result)
sgan_ll += sgan_log_pdf/num_timesteps
our_full_ll += our_full_pdf/num_timesteps
our_most_likely_ll += our_most_likely_pdf/num_timesteps
sgan_lls.append(sgan_ll)
our_full_lls.append(our_full_ll)
our_most_likely_lls.append(our_most_likely_ll)
return sgan_lls, our_full_lls, our_most_likely_lls, detailed_ll_dict
def main():
for data_precondition in ['curr', 'prev', 'all']:
sgan_eval_mse_batch_errors = dict()
sgan_eval_fse_batch_errors = dict()
our_eval_mse_batch_errors = dict()
our_eval_fse_batch_errors = dict()
for dataset_name in ['eth', 'hotel', 'univ', 'zara1', 'zara2']:
(our_preds_most_likely_list, our_preds_list,
sgan_preds_list, sgan_gt_list, eval_inputs, eval_data_dict,
data_ids, t_predicts, random_scene_idxs, num_runs) = eval_utils.extract_our_and_sgan_preds(dataset_name, hyperparams, args, data_precondition=data_precondition)
# Computing log-likelihoods from each run.
sgan_lls, our_full_lls, our_most_likely_lls, detailed_ll_dict = get_kde_log_likelihoods(
data_precondition, dataset_name,
our_preds_most_likely_list, our_preds_list,
sgan_preds_list, sgan_gt_list, eval_inputs, eval_data_dict,
data_ids, t_predicts, random_scene_idxs, num_runs)
print('SGAN LLs, Our Method (Full) LLs, Our Method (Most Likely) LLs')
print(np.mean(sgan_lls), np.mean(our_full_lls), np.mean(our_most_likely_lls))
print('Calculated all KDE LLs for', data_precondition, dataset_name)
print('Saving current log-likelihoods to csv.')
pd.DataFrame.from_dict(detailed_ll_dict).to_csv('../sgan-dataset/plots/data/%s_%s_lls.csv' % (data_precondition, dataset_name), index=False)
# Plotting the trajectories from each run.
plot_run_trajs(data_precondition, dataset_name,
our_preds_most_likely_list, our_preds_list,
sgan_preds_list, sgan_gt_list, eval_inputs, eval_data_dict,
data_ids, t_predicts, random_scene_idxs, num_runs)
print('Plotted all run trajectories from', data_precondition, dataset_name)
# SGAN Errors
batch_error_dict, detailed_error_dict = eval_utils.compute_sgan_errors(
sgan_preds_list,
sgan_gt_list,
data_precondition,
dataset_name,
num_runs)
print('Saving current SGAN errors to csv.')
pd.DataFrame.from_dict(detailed_error_dict).to_csv('../sgan-dataset/plots/data/%s_%s_sgan_errors.csv' % (data_precondition, dataset_name), index=False)
sgan_eval_mse_batch_errors[pretty_dataset_name(dataset_name)] = torch.cat(batch_error_dict['mse'], dim=0)
sgan_eval_fse_batch_errors[pretty_dataset_name(dataset_name)] = torch.cat(batch_error_dict['fse'], dim=0)
# Our Most Likely Errors
error_info_dict = {'output_limit': max_speed}
batch_error_dict, detailed_error_dict = eval_utils.compute_preds_dict_error(
our_preds_most_likely_list,
eval_data_dict,
data_precondition,
dataset_name,
'our_most_likely',
num_runs,
random_scene_idxs,
data_ids,
t_predicts,
hyperparams['prediction_horizon'],
error_info_dict)
print('Saving current Our Method (Most Likely) errors to csv.')
pd.DataFrame.from_dict(detailed_error_dict).to_csv('../sgan-dataset/plots/data/%s_%s_our_most_likely_errors.csv' % (data_precondition, dataset_name), index=False)
our_eval_mse_batch_errors[pretty_dataset_name(dataset_name)] = torch.cat(batch_error_dict['mse'], dim=0)
our_eval_fse_batch_errors[pretty_dataset_name(dataset_name)] = torch.cat(batch_error_dict['fse'], dim=0)
# Our Full Errors
error_info_dict = {'output_limit': max_speed}
batch_error_dict, detailed_error_dict = eval_utils.compute_preds_dict_error(
our_preds_list,
eval_data_dict,
data_precondition,
dataset_name,
'our_full',
num_runs,
random_scene_idxs,
data_ids,
t_predicts,
hyperparams['prediction_horizon'],
error_info_dict)
print('Saving current Our Method (Full) errors to csv.')
pd.DataFrame.from_dict(detailed_error_dict).to_csv('../sgan-dataset/plots/data/%s_%s_our_full_errors.csv' % (data_precondition, dataset_name), index=False)
| |
== CONTAINER_FORMAT_HYBRID_VM:
clients = self._get_hybrid_service_clients_by_node(provider_node)
try:
is_docker_service_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
LOG.error('docker is not start, exception: %s' % traceback.format_exc(e))
raise e
if is_docker_service_up:
try:
self._clients_reboot_app(clients,
network_info=network_info,
block_device_info=block_device_info)
except Exception, e:
error_info = 'Start container failed, exception: %s' % traceback.format_exc(e)
LOG.error(error_info)
raise exception.NovaException(error_info)
else:
try:
self.compute_adapter.reboot_node(provider_node)
except Exception as e:
raise e
@RetryDecorator(max_retry_count= 50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _wait_hybrid_service_up(self, client):
return client.get_version()
@RetryDecorator(max_retry_count=20,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hypervm_inject_file(self, client, file_data):
LOG.info('start to inject file.')
inject_reslut = client.inject_file(CONF.provider_opts.dst_path, file_data=file_data)
LOG.info('end to inject file....')
return inject_reslut
@RetryDecorator(max_retry_count= 100,inc_sleep_time=5,max_sleep_time=120,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _start_container(self, client, network_info, block_device_info):
return client.start_container(network_info=network_info, block_device_info=block_device_info)
@RetryDecorator(max_retry_count= MAX_RETRY_COUNT,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hype_create_container(self, clients, name):
LOG.info('start to create container')
created_container = None
tmp_except = Exception('client is None')
for client in clients:
try:
created_container = client.create_container(name)
break
except Exception, e:
tmp_except = e
LOG.error('exception when create container, exception: %s' % traceback.format_exc(e))
time.sleep(1)
continue
if not created_container:
raise tmp_except
LOG.info('end to create container, created_container: %s' % created_container)
return created_container
@RetryDecorator(max_retry_count=MAX_RETRY_COUNT, inc_sleep_time=5, max_sleep_time=60, exceptions=(
errors.APIError, errors.NotFound, errors.ConnectionError, errors.InternalError, Exception))
def _hyper_create_container_task(self, clients, image_name, image_uuid, injected_files, admin_password,
network_info, block_device_info):
LOG.info('start to submit task for creating container.')
LOG.debug('admin_password: %s' % admin_password)
LOG.debug('injected_files: %s' % injected_files)
created_task = None
tmp_exception = Exception('empty for creating container')
for client in clients:
try:
created_task = client.create_container(image_name, image_uuid, inject_files=injected_files, admin_password=<PASSWORD>,
network_info=network_info, block_device_info=block_device_info)
except Exception, e:
tmp_exception = e
LOG.error('exception when create container, exception: %s' % traceback.format_exc(e))
continue
if not created_task:
raise tmp_exception
LOG.info('end to submit task for creating container, task: %s' % created_task)
return created_task
@RetryDecorator(max_retry_count=50, inc_sleep_time=5, max_sleep_time=60,
exceptions=(exception_ex.RetryException))
def _wait_for_task_finish(self, clients, task):
task_finish = False
if task['code'] == wormhole_constants.TASK_SUCCESS:
return True
current_task = self._hyper_query_task(clients, task)
task_code = current_task['code']
if wormhole_constants.TASK_DOING == task_code:
LOG.debug('task is DOING, status: %s' % task_code)
raise exception_ex.RetryException(error_info='task status is: %s' % task_code)
elif wormhole_constants.TASK_ERROR == task_code:
LOG.debug('task is ERROR, status: %s' % task_code)
raise Exception('task error, task status is: %s' % task_code)
elif wormhole_constants.TASK_SUCCESS == task_code:
LOG.debug('task is SUCCESS, status: %s' % task_code)
task_finish = True
else:
raise Exception('UNKNOW ERROR, task status: %s' % task_code)
LOG.debug('task: %s is finished' % task )
return task_finish
@RetryDecorator(max_retry_count=MAX_RETRY_COUNT, inc_sleep_time=5, max_sleep_time=60, exceptions=(
errors.APIError, errors.NotFound, errors.ConnectionError, errors.InternalError, Exception))
def _hyper_query_task(self, clients, task):
LOG.debug('star to query task.')
current_task = None
tmp_exception = 'empty for query task'
for client in clients:
try:
current_task = client.query_task(task)
break
except Exception, e:
tmp_exception = e
LOG.error('exception when query task. exception: %s' % traceback.format_exc(e))
continue
if not current_task:
raise tmp_exception
return current_task
@RetryDecorator(max_retry_count= MAX_RETRY_COUNT,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hype_start_container(self, clients, network_info, block_device_info):
LOG.info('Start to start container')
started_container = None
tmp_except = None
for client in clients:
try:
started_container = client.start_container(network_info=network_info, block_device_info=block_device_info)
break
except Exception, e:
tmp_except = e
continue
if not started_container:
raise tmp_except
LOG.info('end to start container, started_container: %s' % started_container)
return started_container
@RetryDecorator(max_retry_count=20, inc_sleep_time=5, max_sleep_time=60,
exceptions=(errors.APIError, errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hype_inject_file_to_container(self, clients, inject_file):
"""
:param clients:
:param inject_file: (path, file_contents)
:return:
"""
LOG.debug('start to inject file to container, inject_file: %s' % inject_file)
inject_result = None
tmp_except = None
for client in clients:
try:
inject_result = client.inject_files(inject_file)
break
except Exception, e:
tmp_except = e
continue
if not inject_result:
raise tmp_except
LOG.info('end to inject file to container, inject_file: %s' % inject_file)
return inject_result
@RetryDecorator(max_retry_count= 20,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hype_inject_file(self, clients, file_data):
inject_result = None
tmp_except = None
for client in clients:
try:
inject_result = client.inject_file(CONF.provider_opts.dst_path, file_data=file_data)
break
except Exception, e:
tmp_except = e
continue
if not inject_result:
raise tmp_except
return inject_result
def _get_node_private_ips(self, provider_node):
"""
:param provider_node: type Node,
:return: type list, return list of private ips of Node
"""
LOG.debug('start to get node private ips for node:%s' % provider_node.name)
private_ips = []
interfaces = self.compute_adapter.ex_list_network_interfaces(node=provider_node)
for interface in interfaces:
if len(interface.extra.get('private_ips')) > 0:
for private_ip_dic in interface.extra.get('private_ips'):
private_ip = private_ip_dic.get('private_ip')
if private_ip:
private_ips.append(private_ip)
else:
continue
else:
continue
LOG.debug('end to get node private ips, private_ips: %s' % private_ips)
return private_ips
def _get_hybrid_service_clients_by_instance(self, instance):
LOG.debug('start to get hybrid service clients.')
provider_node = self._get_provider_node(instance)
if not provider_node:
error_info = 'get instance %s error at provider cloud' % instance.uuid
LOG.error(error_info)
raise Exception(error_info)
clients = self._get_hybrid_service_clients_by_node(provider_node)
LOG.debug('end to get hybrid service clients')
return clients
def _get_hybrid_service_clients_by_node(self, provider_node):
port = CONF.provider_opts.hybrid_service_port
private_ips = self._get_node_private_ips(provider_node)
LOG.debug('port: %s' % port)
LOG.debug('private ips: %s' % private_ips)
clients = self._get_hybrid_service_client(private_ips, port)
return clients
def _get_hybrid_service_client(self, ips, port):
clients = []
for ip in ips:
clients.append(Client(ip, port))
return clients
@RetryDecorator(max_retry_count=50, inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError, errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_wait_hybrid_service_up(self, clients):
is_docker_up = False
tmp_except = Exception('Can not get version of docker server ')
for client in clients:
try:
docker_version = client.get_version()
LOG.debug('docker version: %s, docker is up.' % docker_version)
is_docker_up = True
break
except Exception, e:
tmp_except = e
continue
if not is_docker_up:
raise tmp_except
return is_docker_up
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_reboot_app(self, clients, network_info, block_device_info):
is_rebooted = False
tmp_except = Exception('Reboot app failed.')
for client in clients:
try:
client.restart_container(network_info=network_info, block_device_info=block_device_info)
LOG.debug('Reboot app success.')
is_rebooted = True
break
except Exception, e:
tmp_except = e
continue
if not is_rebooted:
raise tmp_except
return is_rebooted
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_stop_container(self, clients):
is_stop = False
tmp_except = Exception('Reboot app failed.')
for client in clients:
try:
client.stop_container()
LOG.debug('Reboot app success.')
is_stop = True
break
except Exception, e:
tmp_except = e
continue
if not is_stop:
raise tmp_except
return is_stop
@staticmethod
def _binding_host(context, network_info, host_id):
neutron = neutronv2.get_client(context, admin=True)
port_req_body = {'port': {'binding:host_id': host_id}}
for vif in network_info:
neutron.update_port(vif.get('id'), port_req_body)
@staticmethod
def _binding_host_vif(vif, host_id):
context = RequestContext('user_id', 'project_id')
neutron = neutronv2.get_client(context, admin=True)
port_req_body = {'port': {'binding:host_id': host_id}}
neutron.update_port(vif.get('id'), port_req_body)
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_attach_volume_for_docker_app(self, clients, volume_id, device, mount_device):
attached = False
tmp_except = Exception('attach volume for app failed.')
for client in clients:
try:
client.attach_volume(volume_id, device, mount_device)
LOG.debug('attach volume for app success.')
attached = True
break
except Exception, e:
tmp_except = e
continue
if not attached:
raise tmp_except
return attached
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_create_image_task(self, clients, image):
image_name = image['name']
LOG.debug('image name : %s' % image_name)
image_id = image['id']
LOG.debug('image id: %s' % image_id)
create_image_task = None
tmp_exception = Exception('tmp exception in create image task')
for client in clients:
try:
create_image_task = client.create_image(image_name, image_id)
LOG.debug('create image task: %s' % create_image_task)
break
except Exception, e:
tmp_exception = e
continue
if not create_image_task:
raise tmp_exception
return create_image_task
@RetryDecorator(max_retry_count=50, inc_sleep_time=5, max_sleep_time=60,
exceptions=(errors.APIError, errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_get_image_info(self, clients, image):
image_name = image['name']
image_id = image['id']
image_info = None
tmp_exception = Exception('tmp exception in get image_info')
for client in clients:
try:
image_info = client.image_info(image_name, image_id)
LOG.debug('get image_info: %s' % image_info)
break
except Exception, e:
tmp_exception = e
continue
if not image_info:
raise tmp_exception
return image_info
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_list_volume_devices_for_docker_app(self, clients):
volume_devices = None
tmp_except = Exception('list volumes devices failed.')
for client in clients:
try:
volume_devices = client.list_volume()
LOG.debug('list volume devices success, volume list: %s' % volume_devices)
break
except Exception, e:
tmp_except = e
continue
if not volume_devices:
raise tmp_except
return volume_devices
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_detach_volume_for_docker_app(self, clients, volume_id):
detached = False
tmp_except = Exception('detach volume for app failed.')
for client in clients:
try:
client.detach_volume(volume_id)
LOG.debug('detach volume for app success.')
detached = True
break
except Exception, e:
tmp_except = e
continue
if not detached:
raise tmp_except
return detached
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_detach_interface(self, clients, vif):
detached = False
tmp_except = Exception('detach interface for app failed.')
for client in clients:
try:
client.detach_interface(vif)
LOG.debug('detach interface for app success.')
detached = True
break
except Exception, e:
tmp_except = e
continue
if not detached:
raise tmp_except
return detached
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_attach_interface(self, clients, vif):
attached = False
tmp_except = Exception('attach interface for app failed.')
for client in clients:
try:
client.attach_interface(vif)
LOG.debug('attach interface for app success.')
attached = True
break
except Exception, e:
tmp_except = e
continue
if not attached:
raise tmp_except
return attached
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_pause_container(self, clients):
paused = False
tmp_except = Exception('pause container failed.')
for client in clients:
try:
client.pause_container()
LOG.debug('pause container success.')
paused | |
from datetime import datetime, timedelta
from os import PathLike
from pathlib import Path
from typing import Collection
import fiona
import fiona.crs
import numpy
import rasterio
from rasterio.enums import Resampling
import scipy.interpolate
import xarray
import PyOFS
from PyOFS import CRS_EPSG, LEAFLET_NODATA_VALUE, TIFF_CREATION_OPTIONS, get_logger
LOGGER = get_logger('PyOFS.HFR')
DATA_VARIABLES = {'ssu': 'u', 'ssv': 'v', 'dopx': 'dopx', 'dopy': 'dopy'}
OUTPUT_CRS = fiona.crs.from_epsg(CRS_EPSG)
NRT_DELAY = timedelta(hours=1)
# either UCSD (University of California San Diego) or NDBC (National Data Buoy Center); NDBC has larger extent but only for the past 4 days
SOURCE_URLS = {
'NDBC': 'https://dods.ndbc.noaa.gov/thredds/dodsC',
'UCSD': 'http://hfrnet-tds.ucsd.edu/thredds/dodsC/HFR/USWC',
}
class HFRadarRange:
"""
High Frequency (HF) Radar NetCDF observation of surface current velocities.
"""
grid_transform = None
def __init__(
self, start_time: datetime = None, end_time: datetime = None, resolution: int = 6
):
"""
Creates new observation object from source.
:param start_time: beginning of time interval
:param end_time: end of time interval
:param resolution: desired observation resolution in kilometers
:raises NoDataError: if observation does not exist.
"""
if start_time is None:
start_time = datetime.now()
self.start_time = start_time
if end_time is None:
end_time = self.start_time + timedelta(days=1)
if end_time > datetime.utcnow():
# HFR near real time delay is 1 hour behind UTC
self.end_time = datetime.utcnow() - NRT_DELAY
else:
self.end_time = end_time
self.resolution = resolution
# NDBC only keeps observations within the past 4 days
for source, source_url in SOURCE_URLS.items():
# get URL
if source == 'NDBC':
url = f'{source_url}/hfradar_uswc_{self.resolution}km'
elif source == 'UCSD':
url = f'{source_url}/{self.resolution}km/hourly/RTV/HFRADAR_US_West_Coast_{self.resolution}km_Resolution_Hourly_RTV_best.ncd'
else:
url = source_url
try:
self.dataset = xarray.open_dataset(url)
self.url = url
break
except OSError as error:
LOGGER.warning(f'{error.__class__.__name__}: {error}')
else:
raise PyOFS.NoDataError(
f'No HFR observations found between {self.start_time} and {self.end_time}'
)
raw_times = self.dataset['time']
self.dataset['time'] = xarray.DataArray(
numpy.array(raw_times.values, dtype='datetime64[h]'),
coords=raw_times.coords,
dims=raw_times.dims,
attrs=raw_times.attrs,
)
self.dataset = self.dataset.sel(time=slice(self.start_time, self.end_time))
LOGGER.info(
f'Collecting HFR velocity between {str(self.dataset["time"].min().values)[:19]} and {str(self.dataset["time"].max().values)[:19]}...'
)
if HFRadarRange.grid_transform is None:
lon = self.dataset['lon'].values
lat = self.dataset['lat'].values
# define image properties
west = numpy.min(lon)
north = numpy.max(lat)
self.mean_x_size = numpy.mean(numpy.diff(lon))
self.mean_y_size = numpy.mean(numpy.diff(lat))
# get rasterio geotransform of HFR observation (flipped latitude)
self.grid_transform = rasterio.transform.from_origin(
west, north, self.mean_x_size, self.mean_y_size
)
def data(self, variable: str, time: datetime, dop_threshold: float = None) -> numpy.array:
"""
Get data for the specified variable at a single time.
:param variable: variable name
:param time: time to retrieve
:param dop_threshold: threshold for Dilution of Precision (DOP) above which data should be discarded
:return: array of data.
"""
output_data = self.dataset[DATA_VARIABLES[variable]].sel(time)
if dop_threshold is not None:
output_data[~self.dop_mask(dop_threshold)] = numpy.nan
return output_data.values
def data_average(
self,
variable: str,
start_time: datetime = None,
end_time: datetime = None,
dop_threshold: float = None,
include_incomplete: bool = False,
) -> numpy.array:
"""
Get data for the specified variable at a single time.
:param variable: variable name
:param start_time: start of time interval
:param end_time: end of time interval
:param dop_threshold: threshold for Dilution of Precision (DOP) above which data should be discarded
:param include_incomplete: whether to keep incomplete time series
:return: array of data
"""
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = self.end_time
data_array = self.dataset[DATA_VARIABLES[variable]].sel(
time=slice(start_time, end_time)
)
if dop_threshold is not None:
data_array.values[~self.dop_mask(dop_threshold)] = numpy.nan
output_data = numpy.nanmean(data_array, axis=0)
if not include_incomplete:
output_data[numpy.isnan(data_array).any(axis=0)] = numpy.nan
return output_data
def bounds(self) -> tuple:
"""
Get coordinate bounds of observation.
:return: tuple of bounds (west, north, east, south)
"""
return (
self.dataset.geospatial_lon_min,
self.dataset.geospatial_lat_max,
self.dataset.geospatial_lon_max,
self.dataset.geospatial_lat_min,
)
def cell_size(self) -> tuple:
"""
Get cell sizes of observation.
:return: tuple of cell sizes (x, y)
"""
return abs(self.mean_x_size), abs(self.mean_y_size)
def write_sites(self, output_filename: PathLike):
"""
Writes HFR radar facility locations to specified file and layer.
:param output_filename: path to output file
"""
if not isinstance(output_filename, Path):
output_filename = Path(output_filename)
output_filename, layer_name = PyOFS.split_layer_filename(output_filename)
layer_records = []
for site_index in range(self.dataset['nSites']):
site_code = (
self.dataset['site_code'][site_index].tobytes().decode().strip('\x00').strip()
)
site_network_code = (
self.dataset['site_netCode'][site_index]
.tobytes()
.decode()
.strip('\x00')
.strip()
)
lon = float(self.dataset['site_lon'][site_index])
lat = float(self.dataset['site_lat'][site_index])
record = {
'id': site_index + 1,
'geometry': {'type': 'Point', 'coordinates': (lon, lat)},
'properties': {
'code': site_code,
'net_code': site_network_code,
'lon': float(lon),
'lat': float(lat),
},
}
layer_records.append(record)
schema = {
'geometry': 'Point',
'properties': {'code': 'str', 'net_code': 'str', 'lon': 'float', 'lat': 'float'},
}
with fiona.open(
output_filename,
'w',
'GPKG',
layer=layer_name,
schema=schema,
crs=OUTPUT_CRS.to_dict(),
) as layer:
layer.writerecords(layer_records)
def write_vectors(
self,
output_filename: PathLike,
variables: Collection[str] = None,
start_time: datetime = None,
end_time: datetime = None,
dop_threshold: float = 0.5,
):
"""
Write HFR data to a layer of the provided output file for every hour in the given time interval.
:param output_filename: path to output file
:param variables: variable names to use
:param start_time: beginning of time interval
:param end_time: end of time interval
:param dop_threshold: threshold for Dilution of Precision (DOP) above which data should be discarded
"""
if not isinstance(output_filename, Path):
output_filename = Path(output_filename)
output_filename = PyOFS.split_layer_filename(output_filename)[0]
if variables is None:
variables = DATA_VARIABLES
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = self.end_time
time_interval_selection = self.dataset.sel(time=slice(start_time, end_time))
if dop_threshold is not None:
dop_mask = (
(self.dataset['dopx'].sel(time=slice(start_time, end_time)) <= dop_threshold)
& (self.dataset['dopy'].sel(time=slice(start_time, end_time)) <= dop_threshold)
).values
time_interval_selection[~dop_mask] = numpy.nan
# create dict to store features
layers = {}
# create layer using OGR, then add features using QGIS
for hfr_time in time_interval_selection['time']:
hfr_time = datetime.utcfromtimestamp(
(hfr_time.values - numpy.datetime64('1970-01-01T00:00:00Z'))
/ numpy.timedelta64(1, 's')
)
layer_name = f'{hfr_time:%Y%m%dT%H%M%S}'
hfr_data = time_interval_selection.sel(time=hfr_time)
# create features
layer_records = []
feature_index = 1
for col in range(len(self.dataset['lon'])):
for row in range(len(self.dataset['lat'])):
data = [
float(hfr_data[variable_name][row, col].values)
for variable, variable_name in variables.items()
]
# stop if record has masked values
if not (numpy.isnan(data)).all():
lon = self.dataset['lon'][col]
lat = self.dataset['lat'][row]
record = {
'id': feature_index,
'geometry': {'type': 'Point', 'coordinates': (lon, lat)},
'properties': {'lon': float(lon), 'lat': float(lat)},
}
record['properties'].update(dict(zip(list(variables.keys()), data)))
layer_records.append(record)
feature_index += 1
layers[layer_name] = layer_records
# write queued features to their respective layers
schema = {
'geometry': 'Point',
'properties': {
'u': 'float',
'v': 'float',
'lat': 'float',
'lon': 'float',
'dop_lat': 'float',
'dop_lon': 'float',
},
}
for layer_name, layer_records in layers.items():
with fiona.open(
output_filename,
'w',
'GPKG',
layer=layer_name,
schema=schema,
crs=OUTPUT_CRS.to_dict(),
) as layer:
layer.writerecords(layer_records)
def write_vector(
self,
output_filename: PathLike,
variables: Collection[str] = None,
start_time: datetime = None,
end_time: datetime = None,
dop_threshold: float = 0.5,
):
"""
Write average of HFR data for all hours in the given time interval to a single layer of the provided output file.
:param output_filename: path to output file
:param variables: variable names to use
:param start_time: beginning of time interval
:param end_time: end of time interval
:param dop_threshold: threshold for Dilution of Precision (DOP) above which data should be discarded
"""
if not isinstance(output_filename, Path):
output_filename = Path(output_filename)
output_filename, layer_name = PyOFS.split_layer_filename(output_filename)
if layer_name is None:
layer_name = 'ssuv'
if variables is None:
variables = DATA_VARIABLES
variable_means = {
variable: self.data_average(variable, start_time, end_time, dop_threshold)
for variable in variables
}
# define layer schema
schema = {'geometry': 'Point', 'properties': {'lon': 'float', 'lat': 'float'}}
schema['properties'].update({variable: 'float' for variable in variables})
# create features
layer_records = []
feature_index = 1
for col in range(len(self.dataset['lon'])):
for row in range(len(self.dataset['lat'])):
data = [float(variable_means[variable][row, col]) for variable in variables]
# stop if record has masked values
if not (numpy.isnan(data)).all():
lon = self.dataset['lon'][col]
lat = self.dataset['lat'][row]
record = {
'id': feature_index,
'geometry': {'type': 'Point', 'coordinates': (lon, lat)},
'properties': {'lon': float(lon), 'lat': float(lat)},
}
record['properties'].update(dict(zip(variables, data)))
layer_records.append(record)
feature_index += 1
# write queued features to layer
LOGGER.info(f'Writing {output_filename}')
with fiona.open(
output_filename,
'w',
'GPKG',
layer=layer_name,
schema=schema,
crs=OUTPUT_CRS.to_dict(),
) as layer:
layer.writerecords(layer_records)
def write_rasters(
self,
output_dir: PathLike,
filename_prefix: str = 'hfr',
filename_suffix: str = '',
variables: Collection[str] = None,
start_time: datetime = None,
end_time: datetime = None,
fill_value: float = LEAFLET_NODATA_VALUE,
driver: str = 'GTiff',
dop_threshold: float = None,
):
"""
Write average of HFR data for all hours in the given time interval to rasters.
:param output_dir: path to output directory
:param filename_prefix: prefix for output filenames
:param filename_suffix: suffix for output filenames
:param variables: variable names to use
:param start_time: beginning of time interval
:param end_time: end of time interval
:param fill_value: desired fill value of output
:param driver: string of valid GDAL driver | |
<gh_stars>10-100
import os
import csv
import imageio
from PIL import Image
import numpy as np
from scipy import stats
import torch
import torch.nn.functional as F
from torchvision.utils import save_image
from myGym.vae.vis_helpers import (read_loss_from_file, add_labels, make_grid_img,
FPS_GIF, concatenate_pad)
import cv2
TRAIN_FILE = "train_losses.log"
DECIMAL_POINTS = 3
GIF_FILE = "training.gif"
PLOT_NAMES = dict(generate_samples="samples.png",
data_samples="data_samples.png",
reconstruct="reconstruct.png",
traversals="traversals.png",
text_traversals='txt_traversals_{}.csv',
reconstruct_traverse="reconstruct_traverse.png",
gif_traversals_prior="prior_traversals.gif",
gif_traversals_post="posterior_traversals.gif",)
def textoutput2idxs(input_tensor):
if len(input_tensor.shape) < 3:
recon_text = np.zeros((2), dtype=int)
for row in range(recon_text.shape[0]):
recon_text[row] = int(input_tensor[row][:].max(0)[1])
else:
recon_text = np.zeros((input_tensor.shape[0], input_tensor.shape[1]), dtype=int)
for row in range(recon_text.shape[0]):
for col in range(recon_text.shape[1]):
recon_text[row][col] = int(input_tensor[row][col][:].max(0)[1])
return recon_text
class Visualizer():
def __init__(self, model, model_dir,
save_images=True,
loss_of_interest=None,
display_loss_per_dim=False,
max_traversal=20, # corresponds to ~2 for standard normal
upsample_factor=1,
vocab=None):
"""
Visualizer is used to generate images of samples, reconstructions,
latent traversals and so on of the trained model.
Parameters
----------
model : disvae.vae.VAE
dataset : str
Name of the dataset.
model_dir : str
The directory that the model is saved to and where the images will
be stored.
save_images : bool, optional
Whether to save images or return a tensor.
loss_of_interest : str, optional
The loss type (as saved in the log file) to order the latent dimensions by and display.
display_loss_per_dim : bool, optional
if the loss should be included as text next to the corresponding latent dimension images.
max_traversal: float, optional
The maximum displacement induced by a latent traversal. Symmetrical
traversals are assumed. If `m>=0.5` then uses absolute value traversal,
if `m<0.5` uses a percentage of the distribution (quantile).
E.g. for the prior the distribution is a standard normal so `m=0.45` c
orresponds to an absolute value of `1.645` because `2m=90%%` of a
standard normal is between `-1.645` and `1.645`. Note in the case
of the posterior, the distribution is not standard normal anymore.
upsample_factor : floar, optional
Scale factor to upsample the size of the tensor
"""
self.model = model
self.device = next(self.model.parameters()).device
self.latent_dim = self.model.n_latents
self.max_traversal = max_traversal
self.save_images = save_images
self.model_dir = model_dir
self.upsample_factor = upsample_factor
self.vocab = vocab
if loss_of_interest is not None:
self.losses = read_loss_from_file(os.path.join(self.model_dir, TRAIN_FILE),
loss_of_interest)
else:
self.losses = None
def _get_traversal_range(self, mean=0, std=1):
"""Return the corresponding traversal range in absolute terms."""
max_traversal = self.max_traversal
if max_traversal < 0.5:
max_traversal = (1 - 2 * max_traversal) / 2 # from 0.45 to 0.05
max_traversal = stats.norm.ppf(max_traversal, loc=mean, scale=std) # from 0.05 to -1.645
# symmetrical traversals
return (-1 * max_traversal, max_traversal)
def traverse_line(self, idx, n_samples, data=None):
"""Return a (size, latent_size) latent sample, corresponding to a traversal
of a latent variable indicated by idx.
Parameters
----------
idx : int
Index of continuous dimension to traverse. If the continuous latent
vector is 10 dimensional and idx = 7, then the 7th dimension
will be traversed while all others are fixed.
n_samples : int
Number of samples to generate.
data : torch.Tensor or None, optional
Data to use for computing the posterior. Shape (N, C, H, W). If
`None` then use the mean of the prior (all zeros) for all other dimensions.
"""
if data is None:
# mean of prior for other dimensions
samples = torch.zeros(n_samples, self.latent_dim)
traversals = torch.linspace(*self._get_traversal_range(), steps=n_samples)
else:
if data.size(0) > 1:
raise ValueError("Every value should be sampled from the same posterior, but {} datapoints given.".format(data.size(0)))
with torch.no_grad():
post_mean, post_logvar = self.model.infer(image=data.to(self.device))
samples = self.model.reparametrize(post_mean, post_logvar)
samples = samples.cpu().repeat(n_samples, 1)
post_mean_idx = post_mean.cpu()[0, idx]
post_std_idx = torch.exp(post_logvar / 2).cpu()[0, idx]
# travers from the gaussian of the posterior in case quantile
traversals = torch.linspace(*self._get_traversal_range(mean=post_mean_idx,
std=post_std_idx),
steps=n_samples)
for i in range(n_samples):
samples[i, idx] = traversals[i]
return samples
def _save_or_return(self, to_plot, size, filename, is_force_return=False):
"""Create plot and save or return it."""
text_plot = None
if isinstance(to_plot, list):
## this is the image
text_plot = np.zeros(to_plot[0].size())
txt_recon_np = to_plot[1].numpy()
txt_recon_np = np.argmax(txt_recon_np, axis=1).tolist()
for i, item in enumerate(txt_recon_np):
img = text_plot[i].squeeze()
# cv2.putText(img, LABEL_IX_TO_STRING[item], (0, 8), cv2.FONT_HERSHEY_PLAIN, .7, (255, 255, 255),
# lineType=cv2.LINE_AA)
text_plot[i] = np.expand_dims(img, axis=0)
text_plot = torch.tensor(text_plot)
to_plot = to_plot[0]
to_plot = F.interpolate(to_plot, scale_factor=self.upsample_factor)
if size[0] * size[1] != to_plot.shape[0]:
raise ValueError("Wrong size {} for datashape {}".format(size, to_plot.shape))
# `nrow` is number of images PER row => number of col
kwargs = dict(nrow=size[1], pad_value=(1 - 0))
if self.save_images and not is_force_return:
filename = os.path.join(self.model_dir, filename)
i = make_grid_img(to_plot, **kwargs)
cv2.imwrite(filename, i*255)
if text_plot is not None:
text_plot = F.interpolate(text_plot, scale_factor=self.upsample_factor)
text_plot = text_plot.type('torch.FloatTensor')
joined = torch.cat((to_plot, text_plot), 0)
save_image(text_plot, os.path.join(self.model_dir, 'text_traversal.png'), **kwargs)
kwargs = dict(nrow=size[0]*size[1], pad_value=(1 - 0))
save_image(joined, os.path.join(self.model_dir, 'joint_traversal.png'), **kwargs)
print("Samples saved to {}".format(filename))
else:
img = make_grid_img(to_plot, **kwargs)
return img
def _decode_latents(self, latent_samples, image=True):
"""Decodes latent samples into images.
Parameters
----------
latent_samples : torch.autograd.Variable
Samples from latent distribution. Shape (N, L) where L is dimension
of latent distribution.
"""
latent_samples = latent_samples.to(self.device)
decoded_image = None
if len(latent_samples.shape) > 2:
latent_samples = latent_samples.view(-1, self.latent_dim)
decoded_image = self.model.image_decoder(latent_samples).cpu()
else:
if image is True:
decoded_image = self.model.image_decoder(latent_samples).cpu()
return decoded_image
def generate_samples(self, size=(8, 8)):
"""Plot generated samples from the prior and decoding.
Parameters
----------
size : tuple of ints, optional
Size of the final grid.
"""
prior_samples = torch.randn(size[0] * size[1], self.latent_dim)
generated = self._decode_latents(prior_samples)
if isinstance(generated, list):
prepared = []
for x in generated:
prepared.append(generated[x].data)
generated = prepared
else:
generated = generated.data
return self._save_or_return(generated, size, PLOT_NAMES["generate_samples"])
def data_samples(self, data, size=(8, 8)):
"""Plot samples from the dataset
Parameters
----------
data : torch.Tensor
Data to be reconstructed. Shape (N, C, H, W)
size : tuple of ints, optional
Size of the final grid.
"""
data = data[:size[0] * size[1], ...]
return self._save_or_return(data, size, PLOT_NAMES["data_samples"])
def reconstruct(self, data, size=(8, 8), is_original=True, is_force_return=False):
"""Generate reconstructions of data through the model.
Parameters
----------
data : torch.Tensor
Data to be reconstructed. Shape (N, C, H, W)
size : tuple of ints, optional
Size of grid on which reconstructions will be plotted. The number
of rows should be even when `is_original`, so that upper
half contains true data and bottom half contains reconstructions.contains
is_original : bool, optional
Whether to exclude the original plots.
is_force_return : bool, optional
Force returning instead of saving the image.
"""
if is_original:
if size[0] % 2 != 0:
raise ValueError("Should be even number of rows when showing originals not {}".format(size[0]))
n_samples = size[0] // 2 * size[1]
else:
n_samples = size[0] * size[1]
with torch.no_grad():
originals = data.to(self.device)[:n_samples, ...]
recs, _, _ = self.model(originals)
originals = originals.cpu()
recs = recs.view(-1, *self.model.img_size).cpu()
to_plot = torch.cat([originals, recs]) if is_original else recs
return self._save_or_return(to_plot, size, PLOT_NAMES["reconstruct"],
is_force_return=is_force_return)
def reconstruct_nofile(self, data, size=(8, 8), is_original=True, is_force_return=False):
"""Generate reconstructions of data through the model.
Parameters
----------
data : torch.Tensor
Data to be reconstructed. Shape (N, C, H, W)
size : tuple of ints, optional
Size of grid on which reconstructions will be plotted. The number
of rows should be even when `is_original`, so that upper
half contains true data and bottom half contains reconstructions.contains
is_original : bool, optional
Whether to exclude the original plots.
is_force_return : bool, optional
Force returning instead of saving the image.
"""
if is_original:
if size[0] % 2 != 0:
raise ValueError("Should be even number of rows when showing originals not {}".format(size[0]))
n_samples = size[0] // 2 * size[1]
else:
n_samples = size[0] * size[1]
with torch.no_grad():
originals = data.to(self.device)[:n_samples, ...]
recs, _, _ = self.model(originals)
originals = originals.cpu()
recs = recs.view(-1, *self.model.img_size).cpu()
to_plot = torch.cat([originals, recs]) if is_original else recs
return to_plot
def traversals(self,
data=None,
is_reorder_latents=False,
n_per_latent=8,
n_latents=None,
is_force_return=False,
sample_number="single"):
"""Plot traverse through all latent dimensions (prior or posterior) one
by one and plots a grid of images where each row corresponds to a latent
traversal of one latent dimension.
Parameters
----------
data : bool, optional
Data to use for computing the latent posterior. If `None` traverses
the prior.
n_per_latent : int, optional
The number of points to include in the traversal of a latent dimension.
I.e. number of columns.
n_latents : int, optional
The number of latent dimensions to display. I.e. number of rows. If `None`
uses all latents.
is_reorder_latents : bool, optional
If the | |
<filename>affine_model_with_default.py<gh_stars>0
import numpy as np
import itertools
from scipy.integrate import quad, IntegrationWarning
"""
risk-free: delta + S(a_m X_m) + S(c_i Y_i)
spread: spread + S(b_m X_m) + S(d_i Y_i)
"""
def beta_m(alpha_m, sigma_m, a_m, b_m):
"""
Implements Eq. from Nawalka, Beliaeva, Soto (pg. 460)
"""
if b_m:
changing = np.array(a_m) + np.array(b_m)
else:
changing = np.array(a_m)
return np.sqrt(alpha_m ** 2 + 2 * changing * sigma_m ** 2)
def C_i(tau, **params):
"""
Implements Eq. 9.126 from Nawalka, Beliaeva, Soto (pg. 460)
Need params: k
"""
numerator = 1 - np.exp(- params['k'] * tau)
return numerator / params['k']
def B_m(tau, **params):
"""
Implements Eq. 9.125 from Nawalka, Beliaeva, Soto (pg. 460)
Needs params: alpha, sigma
"""
# Get beta
if params['risk_free']:
beta = beta_m(params['alpha'], params['sigma'], params['a_m'], 0)
else:
beta = beta_m(params['alpha'], params['sigma'], params['a_m'], params['b_m'])
# Terms
exp_term = np.exp(beta * tau) - 1
denom = (beta + params['alpha']) * exp_term + 2 * beta
return 2 * exp_term / denom
def first_part_A(tau, **params):
"""
Implements first part of Eq. 9.124 from Nawalka, Beliaeva, Soto (pg. 460)
Needs params: alpha, beta, theta, sigma
"""
# Get beta
if params['risk_free']:
beta = beta_m(params['alpha'], params['sigma'], params['a_m'], 0)
else:
beta = beta_m(params['alpha'], params['sigma'], params['a_m'], params['b_m'])
# Terms
log_term_nom = 2 * beta * np.exp((beta + params['alpha']) * tau / 2)
log_term_denom = (beta + params['alpha']) * (np.exp(beta * tau) - 1) + 2 * beta
coef = 2 * params['alpha'] * params['theta'] / params['sigma'] ** 2
return np.sum(coef * np.log(log_term_nom / log_term_denom))
def last_part_A(tau, **params):
"""
Implements last part of Eq. 9.124 from Nawalka, Beliaeva, Soto (pg. 460)
Needs params: k, nu, rho
"""
# Get C
C = C_i(tau, **params)
l = params['N'] - params['M']
if params['risk_free']:
changing = np.array(params['c_i'])
else:
changing = np.array(params['c_i']) + np.array(params['d_i'])
# Summation
part_sum = 0
for i, j in itertools.product(range(l), range(l)):
if l == 1:
rho = 1
else:
rho = params['rho'][i][j]
new_term = changing[i] * changing[j]
coef = new_term * params['nu'][i] * params['nu'][j] / (params['k'][i] * params['k'][j]) * rho
term = (1 - np.exp(- (params['k'][i] + params['k'][j]) * tau)) / (params['k'][i] + params['k'][j])
part_sum += coef * (tau - C[i] - C[j] + term)
return part_sum / 2
def A_fun(tau, **params):
gaussian, non_gaussian = 0, 0
if params['M'] > 0:
non_gaussian = first_part_A(tau, **params)
if params['N'] - params['M'] > 0:
gaussian = last_part_A(tau, **params)
return gaussian + non_gaussian
def H_simple(t, T, **params):
"""
Implements Eq. 9.123 from Nawalka, Beliaeva, Soto (pg. 460)
"""
if params['risk_free']:
delta = params['delta']
else:
delta = params['delta'] + params['spread']
return delta * (T - t)
def bond_pricer_simple(t, T, **params):
"""
Implements Eq. 9.31 from Nawalka, Beliaeva, Soto (pg. 426)
"""
tau = T - t
A = A_fun(tau, **params)
B_term, C_term = 0, 0
if params['M'] > 0:
if params['risk_free']:
new_term = np.array(params['a_m'])
else:
new_term = np.array(params['a_m']) + np.array(params['b_m'])
B_term = np.sum(new_term * B_m(tau, **params) * params['X_0'])
if params['N'] - params['M'] > 0:
if params['risk_free']:
new_term = np.array(params['c_i'])
else:
new_term = np.array(params['c_i']) + np.array(params['d_i'])
C_term = np.sum(new_term * C_i(tau, **params) * params['Y_0'])
H = H_simple(t, T, **params)
return np.exp(A - B_term - C_term - H)
def beta1m(**params):
"""
Implements Eq. 9.162 from Nawalka, Beliaeva, Soto (pg. 472)
"""
inside = params['alpha']**2 + 2 * (np.array(params['a_m']) + np.array(params['b_m'])) * params['sigma']**2
return np.sqrt(inside)
def beta2m(beta1, **params):
"""
Implements Eq. 9.162 from Nawalka, Beliaeva, Soto (pg. 472)
"""
return (beta1 - params['alpha']) / 2
def beta3m(beta1, **params):
"""
Implements Eq. 9.162 from Nawalka, Beliaeva, Soto (pg. 472)
"""
return (- beta1 - params['alpha']) / 2
def beta4m(phi, beta1, **params):
"""
Implements Eq. 9.162 from Nawalka, Beliaeva, Soto (pg. 472)
"""
last_term = phi * np.array(params['b_m']) * params['sigma']**2
numerator = - params['alpha'] - beta1 + last_term
denominator = - params['alpha'] + beta1 + last_term
return numerator / denominator
def q_i(phi, **params):
"""
Implements Eq. 9.161 from Nawalka, Beliaeva, Soto (pg. 472)
"""
fraction = np.array(params['d_i']) / (np.array(params['c_i']) + np.array(params['d_i']))
return 1 + phi * params['k'] * fraction
def C_i_def(tau, phi, **params):
"""
Implements Eq. 9.160 from Nawalka, Beliaeva, Soto (pg. 472)
"""
q = q_i(phi, **params)
return (1 - q * np.exp(- params['k'] * tau)) / params['k']
def B_m_def(tau, phi, **params):
"""
Implements Eq. 9.159 from Nawalka, Beliaeva, Soto (pg. 471)
"""
beta1 = beta1m(**params)
beta2 = beta2m(beta1, **params)
beta3 = beta3m(beta1, **params)
beta4 = beta4m(phi, beta1, **params)
exp_term = np.exp(beta1 * tau)
denominator = beta2 * beta4 * exp_term - beta3
numerator = beta4 * exp_term - 1
term = np.array(params['a_m']) + np.array(params['b_m'])
return 2 / (term * params['sigma']**2) * denominator / numerator
def A_first_sum_def(tau, phi, **params):
"""
Implements Eq. 9.158 from Nawalka, Beliaeva, Soto (pg. 471)
"""
summa = 0
l = params['N'] - params['M']
for i, j in itertools.product(range(l), range(l)):
if l == 1:
rho = 1
else:
rho = params['rho'][i][j]
cplusd = np.array(params['c_i']) + np.array(params['d_i'])
q = q_i(phi, **params)
C = C_i_def(tau, phi, **params)
factor = cplusd[i] * cplusd[j] * params['nu'][i] * params['nu'][j] * rho / params['k'][i] / params['k'][j]
k_sum = params['k'][i] + params['k'][j]
fraction = (1 - np.exp(- k_sum * tau) / k_sum)
term = tau - q[i] * C[i] - q[j] * C[j] + q[i] * q[j] * fraction
summa += factor * term
return summa / 2
def A_second_sum_def(tau, phi, **params):
"""
Implements Eq. 9.158 from Nawalka, Beliaeva, Soto (pg. 471)
"""
beta1 = beta1m(**params)
beta3 = beta3m(beta1, **params)
beta4 = beta4m(phi, beta1, **params)
fraction = params['alpha'] * params['theta'] / params['sigma']**2
ln_denominator = 1 - beta4 * np.exp(beta1 * tau)
ln_numerator = 1 - beta4
return 2 * sum(fraction * (beta3 * tau + np.log(ln_denominator / ln_numerator)))
def A_sum_def(tau, phi, **params):
"""
Implements Eq. 9.157 from Nawalka, Beliaeva, Soto (pg. 471)
"""
A_first, A_last = 0, 0
if params['M'] > 0:
A_last = A_second_sum_def(tau, phi, **params)
if params['N'] - params['M'] > 0:
A_first = A_first_sum_def(tau, phi, **params)
return phi * params['spread'] + A_first - A_last
def H_def(t, T, **params):
return (T - t) * (params['delta'] + params['spread'])
def eta(t, T, phi, **params):
tau = T - t
if params['M'] > 0:
factor = np.array(params['a_m']) + np.array(params['b_m'])
B_term = sum(factor * B_m_def(tau, phi, **params) * params['X_0'])
else:
B_term = 0
if params['N'] - params['M'] > 0:
factor = np.array(params['c_i']) + np.array(params['d_i'])
C_term = sum(factor * C_i_def(tau, phi, **params) * params['Y_0'])
else:
C_term = 0
A_term = A_sum_def(tau, phi, **params)
H = H_def(t, T, **params)
return np.exp(A_term - B_term - C_term - H)
def G_function(t, T, h, **params):
return (eta(t, T, h, **params) - eta(t, T, 0, **params)) / h
def in_default(t, T, **params):
tau = T - t
h = 10**-5
def integrand(x):
return G_function(t, t + x, h, **params)
try:
with_recovery = quad(integrand, 0, tau, full_output=1)
except IntegrationWarning:
with_recovery = [0, 0, 0]
else:
with_recovery = [0, 0, 0]
if 'LGD' in params:
return with_recovery[0] * (1 - params['LGD'])
else:
return with_recovery[0]
def defaultable_bond_pricer_simple_with_recovery(t, T, **params):
"""
Implements Eq. 9.149 from Nawalka, Beliaeva, Soto (pg. 470)
"""
tau = T - t
without_recovery = bond_pricer_simple(t, T, **params)
if 'risk_free' in params:
if params['risk_free']:
return without_recovery
if 'LGD' in params:
if params['LGD'] is None:
return without_recovery
elif params['LGD'] == 1:
return without_recovery
with_recovery = in_default(t, T, **params)
return without_recovery + with_recovery
def CDS_premium_coupon(t, T, cds_spread, **params):
return cds_spread * bond_pricer_simple(t, T, **params)
def CDS_protection_leg(t, T, **params):
return in_default(t, T, **params)
def CDS_continous_price(t, T, cds_spread, **params):
tau = T - t
protection_leg = CDS_protection_leg(t, T, **params)
def integrand(x):
return bond_pricer_simple(t, t + x, **params)
integration = quad(integrand, 0, tau)
premium_leg = integration[0] * cds_spread
return premium_leg - protection_leg
"""
def_test_01 = dict(M=0, N=1, delta=0.02,
c_i=[1], k=np.array([1]), nu=np.array([0.02]), Y_0=0.03,
risk_free=True)
test_12 = dict(M=1, N=2, delta=0.02, alpha=np.array([2]), theta=np.array([0.04]), sigma=np.array([0.025]), X_0=0.06,
k=np.array([1]), nu=np.array([0.02]), Y_0=0.03,
risk_free=True)
def_test_12 = dict(M=1, N=2, delta=0.02, spread=0.01,
a_m=[1], b_m=[1], alpha=np.array([2]), theta=np.array([0.04]), sigma=np.array([0.025]), X_0=0.06,
c_i=[1], d_i=[1], k=np.array([1]), nu=np.array([0.02]), Y_0=0.03,
risk_free=False, LGD=0.4)
bond_pricer_simple(0, 1, **def_test_12)
G_function(0, 1, 10**-6, **def_test_12)
from affine_model import bond_pricer_simple as test_function
test_function(0, 1, **test_12)
def_test_13 = dict(M=1, N=3, delta=0.02, spread=0.01,
a_m=[0], b_m=[1], alpha=np.array([2]), theta=np.array([0.04]), sigma=np.array([0.025]), X_0=0.06,
c_i=[1, 1], d_i=[1, 0], k=np.array([1, 1.5]), nu=np.array([0.02, 0.015]), Y_0=[0.03, 0], rho=[[1, -0.5], [-0.5, | |
from math import sqrt
from numpy import (array, unravel_index, nditer, linalg, random, subtract,
power, exp, pi, zeros, arange, outer, meshgrid, dot,
logical_and)
from collections import defaultdict
from warnings import warn
# for unit tests
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from numpy.testing import assert_array_equal
import unittest
"""
Minimalistic implementation of the Self Organizing Maps (SOM).
"""
def fast_norm(x):
"""Returns norm-2 of a 1-D numpy array.
* faster than linalg.norm in case of 1-D arrays (numpy 1.9.2rc1).
"""
return sqrt(dot(x, x.T))
class MiniSom(object):
def __init__(self, x, y, input_len, sigma=1.0, learning_rate=0.5,
decay_function=None, neighborhood_function='gaussian',
random_seed=None):
"""Initializes a Self Organizing Maps.
Parameters
----------
decision_tree : decision tree
The decision tree to be exported.
x : int
x dimension of the SOM
y : int
y dimension of the SOM
input_len : int
Number of the elements of the vectors in input.
sigma : float, optional (default=1.0)
Spread of the neighborhood function, needs to be adequate
to the dimensions of the map.
(at the iteration t we have sigma(t) = sigma / (1 + t/T)
where T is #num_iteration/2)
learning_rate, initial learning rate
(at the iteration t we have
learning_rate(t) = learning_rate / (1 + t/T)
where T is #num_iteration/2)
decay_function : function (default=None)
Function that reduces learning_rate and sigma at each iteration
default function:
lambda x, current_iteration, max_iter :
x/(1+current_iteration/max_iter)
neighborhood_function : function, optional (default='gaussian')
Function that weights the neighborhood of a position in the map
possible values: 'gaussian', 'mexican_hat', 'bubble'
random_seed : int, optiona (default=None)
Random seed to use.
"""
if sigma >= x/2.0 or sigma >= y/2.0:
warn('Warning: sigma is too high for the dimension of the map.')
if random_seed:
self._random_generator = random.RandomState(random_seed)
else:
self._random_generator = random.RandomState(random_seed)
if decay_function:
self._decay_function = decay_function
else:
self._decay_function = lambda x, t, max_iter: x/(1+t/max_iter)
self._learning_rate = learning_rate
self._sigma = sigma
# random initialization
self._weights = self._random_generator.rand(x, y, input_len)*2-1
for i in range(x):
for j in range(y):
# normalization
norm = fast_norm(self._weights[i, j])
self._weights[i, j] = self._weights[i, j] / norm
self._activation_map = zeros((x, y))
self._neigx = arange(x)
self._neigy = arange(y) # used to evaluate the neighborhood function
neig_functions = {'gaussian': self._gaussian,
'mexican_hat': self._mexican_hat,
'bubble': self._bubble}
if neighborhood_function not in neig_functions:
msg = '%s not supported. Functions available: %s'
raise ValueError(msg % (neighborhood_function,
', '.join(neig_functions.keys())))
if neighborhood_function == 'bubble' and sigma % 2 != 1:
warn('sigma should be an odd value when bubble \
is used as neighborhood function')
self.neighborhood = neig_functions[neighborhood_function]
def get_weights(self):
"""Returns the weights of the neural network"""
return self._weights
def _activate(self, x):
"""Updates matrix activation_map, in this matrix
the element i,j is the response of the neuron i,j to x"""
s = subtract(x, self._weights) # x - w
it = nditer(self._activation_map, flags=['multi_index'])
while not it.finished:
# || x - w ||
self._activation_map[it.multi_index] = fast_norm(s[it.multi_index])
it.iternext()
def activate(self, x):
"""Returns the activation map to x"""
self._activate(x)
return self._activation_map
def _gaussian(self, c, sigma):
"""Returns a Gaussian centered in c"""
d = 2*pi*sigma*sigma
ax = exp(-power(self._neigx-c[0], 2)/d)
ay = exp(-power(self._neigy-c[1], 2)/d)
return outer(ax, ay) # the external product gives a matrix
def _mexican_hat(self, c, sigma):
"""Mexican hat centered in c"""
xx, yy = meshgrid(self._neigx, self._neigy)
p = power(xx-c[0], 2) + power(yy-c[1], 2)
d = 2*pi*sigma*sigma
return exp(-p/d)*(1-2/d*p)
def _bubble(self, c, sigma):
"""Constant function centered in c with spread sigma.
sigma should be an odd value,
"""
ax = logical_and(self._neigx > c[0]-sigma/2.,
self._neigx < c[0]+sigma/2.)
ay = logical_and(self._neigy > c[1]-sigma/2.,
self._neigy < c[1]+sigma/2.)
return outer(ax, ay)*1.
def winner(self, x):
"""Computes the coordinates of the winning neuron for the sample x"""
self._activate(x)
return unravel_index(self._activation_map.argmin(),
self._activation_map.shape)
def update(self, x, win, t):
"""Updates the weights of the neurons.
Parameters
----------
x : np.array
Current pattern to learn
win : tuple
Position of the winning neuron for x (array or tuple).
t : int
Iteration index
"""
eta = self._decay_function(self._learning_rate, t, self.T)
# sigma and learning rate decrease with the same rule
sig = self._decay_function(self._sigma, t, self.T)
# improves the performances
g = self.neighborhood(win, sig)*eta
it = nditer(g, flags=['multi_index'])
while not it.finished:
# eta * neighborhood_function * (x-w)
x_w = (x - self._weights[it.multi_index])
self._weights[it.multi_index] += g[it.multi_index] * x_w
# normalization
norm = fast_norm(self._weights[it.multi_index])
self._weights[it.multi_index] = self._weights[it.multi_index]/norm
it.iternext()
def quantization(self, data):
"""Assigns a code book (weights vector of the winning neuron)
to each sample in data."""
q = zeros(data.shape)
for i, x in enumerate(data):
q[i] = self._weights[self.winner(x)]
return q
def random_weights_init(self, data):
"""Initializes the weights of the SOM
picking random samples from data"""
it = nditer(self._activation_map, flags=['multi_index'])
while not it.finished:
rand_i = self._random_generator.randint(len(data))
self._weights[it.multi_index] = data[rand_i]
norm = fast_norm(self._weights[it.multi_index])
self._weights[it.multi_index] = self._weights[it.multi_index]/norm
it.iternext()
def train_random(self, data, num_iteration):
"""Trains the SOM picking samples at random from data"""
self._init_T(num_iteration)
for iteration in range(num_iteration):
# pick a random sample
rand_i = self._random_generator.randint(len(data))
self.update(data[rand_i], self.winner(data[rand_i]), iteration)
def train_batch(self, data, num_iteration):
"""Trains using all the vectors in data sequentially"""
self._init_T(len(data)*num_iteration)
iteration = 0
while iteration < num_iteration:
idx = iteration % (len(data)-1)
self.update(data[idx], self.winner(data[idx]), iteration)
iteration += 1
def _init_T(self, num_iteration):
"""Initializes the parameter T needed to adjust the learning rate"""
# keeps the learning rate nearly constant
# for the last half of the iterations
self.T = num_iteration/2
def distance_map(self):
"""Returns the distance map of the weights.
Each cell is the normalised sum of the distances between
a neuron and its neighbours."""
um = zeros((self._weights.shape[0], self._weights.shape[1]))
it = nditer(um, flags=['multi_index'])
while not it.finished:
for ii in range(it.multi_index[0]-1, it.multi_index[0]+2):
for jj in range(it.multi_index[1]-1, it.multi_index[1]+2):
if (ii >= 0 and ii < self._weights.shape[0] and
jj >= 0 and jj < self._weights.shape[1]):
w_1 = self._weights[ii, jj, :]
w_2 = self._weights[it.multi_index]
um[it.multi_index] += fast_norm(w_1-w_2)
it.iternext()
um = um/um.max()
return um
def activation_response(self, data):
"""
Returns a matrix where the element i,j is the number of times
that the neuron i,j have been winner.
"""
a = zeros((self._weights.shape[0], self._weights.shape[1]))
for x in data:
a[self.winner(x)] += 1
return a
def quantization_error(self, data):
"""Returns the quantization error computed as the average
distance between each input sample and its best matching unit."""
error = 0
for x in data:
error += fast_norm(x-self._weights[self.winner(x)])
return error/len(data)
def win_map(self, data):
"""Returns a dictionary wm where wm[(i,j)] is a list
with all the patterns that have been mapped in the position i,j."""
winmap = defaultdict(list)
for x in data:
winmap[self.winner(x)].append(x)
return winmap
class TestMinisom(unittest.TestCase):
def setup_method(self, method):
self.som = MiniSom(5, 5, 1)
for i in range(5):
for j in range(5):
# checking weights normalization
assert_almost_equal(1.0, linalg.norm(self.som._weights[i, j]))
self.som._weights = zeros((5, 5)) # fake weights
self.som._weights[2, 3] = 5.0
self.som._weights[1, 1] = 2.0
def test_decay_function(self):
assert self.som._decay_function(1., 2., 3.) == 1./(1.+2./3.)
def test_fast_norm(self):
assert fast_norm(array([1, 3])) == sqrt(1+9)
def test_unavailable_neigh_function(self):
with self.assertRaises(ValueError):
MiniSom(5, 5, 1, neighborhood_function='boooom')
def test_gaussian(self):
bell = self.som._gaussian((2, 2), 1)
assert bell.max() == 1.0
assert bell.argmax() == 12 # unravel(12) = (2,2)
def test_mexican_hat(self):
bell = self.som._mexican_hat((2, 2), 1)
assert bell.max() == 1.0
assert bell.argmax() == 12 # unravel(12) = (2,2)
def test_bubble(self):
bubble = self.som._bubble((2, 2), 1)
assert bubble[2,2] == 1
assert sum(sum(bubble)) == 1
def test_win_map(self):
winners = self.som.win_map([5.0, 2.0])
assert winners[(2, 3)][0] == 5.0
assert winners[(1, 1)][0] == 2.0
def test_activation_reponse(self):
response = self.som.activation_response([5.0, 2.0])
assert response[2, 3] == 1
assert response[1, 1] == 1
def test_activate(self):
assert self.som.activate(5.0).argmin() == 13.0 # unravel(13) = (2,3)
def test_quantization_error(self):
self.som.quantization_error([5, 2]) == 0.0
self.som.quantization_error([4, 1]) == 0.5
def test_quantization(self):
q = self.som.quantization(array([4, 2]))
assert q[0] == 5.0
assert q[1] == 2.0
def test_random_seed(self):
som1 = MiniSom(5, 5, 2, sigma=1.0, learning_rate=0.5, random_seed=1)
som2 = MiniSom(5, 5, 2, sigma=1.0, learning_rate=0.5, random_seed=1)
# same initialization
assert_array_almost_equal(som1._weights, som2._weights)
data = random.rand(100, 2)
som1 = MiniSom(5, 5, 2, sigma=1.0, learning_rate=0.5, random_seed=1)
som1.train_random(data, 10)
som2 = MiniSom(5, 5, 2, sigma=1.0, learning_rate=0.5, random_seed=1)
som2.train_random(data, 10)
# same state after training
assert_array_almost_equal(som1._weights, som2._weights)
def test_train_batch(self):
som = MiniSom(5, 5, 2, sigma=1.0, learning_rate=0.5, random_seed=1)
data = array([[4, 2], [3, 1]])
q1 = som.quantization_error(data)
som.train_batch(data, 10)
assert q1 > som.quantization_error(data)
def test_train_random(self):
som = MiniSom(5, 5, 2, sigma=1.0, learning_rate=0.5, random_seed=1)
data = array([[4, 2], [3, 1]])
q1 = | |
ID: 22062
m = p7.match(line)
if m:
ser_dict['session_id'] = int(m.groupdict()['session_num'])
continue
# IKEv2 SA: local 1.1.1.1/4500 remote 172.16.58.3/38452 Active
m = p8.match(line)
if m:
count = 0
ike_version = m.groupdict()['version']
ikev2_dict = ser_dict.setdefault(ike_version, {})
ikev2_dict['local_ip'] = m.groupdict()['localip']
ikev2_dict['local_port'] = int(m.groupdict()['localport'])
ikev2_dict['remote_ip'] = m.groupdict()['remoteip']
ikev2_dict['remote_port'] = int(m.groupdict()['remoteport'])
continue
# Capabilities:DN connid:323 lifetime:10:43:07
m = p9.match(line)
if m:
ikev2_dict['capabilities'] = m.groupdict()['caps']
ikev2_dict['connid'] = int(m.groupdict()['conn'])
ikev2_dict['lifetime'] = m.groupdict()['life']
continue
# IPSEC FLOW: permit ip 0.0.0.0/0.0.0.0 host 172.16.31.10
m = p10.match(line)
if m:
count += 1
ipsec_dict = ser_dict.setdefault('ipsec_flow', {}).setdefault(count, {})
ipsec_dict['flow'] = m.groupdict()['TS']
continue
# Active SAs: 2, origin: crypto map
m = p11.match(line)
if m:
ipsec_dict['active_sa'] = int(m.groupdict()['sa_count'])
ipsec_dict['origin'] = m.groupdict()['origin_type']
continue
# Inbound: #pkts dec'ed 47668 drop 0 life (KB/Sec) 4607746/1687
m = p12.match(line)
if m:
inbound_dict = ipsec_dict.setdefault('inbound', {})
inbound_dict['decrypted'] = int(m.groupdict()['decrypt_count'])
inbound_dict['dropped'] = int(m.groupdict()['in_drop'])
inbound_dict['life_in_kb'] = int(m.groupdict()['in_life_kb'])
inbound_dict['life_in_sec'] = int(m.groupdict()['in_life_sec'])
continue
# Outbound: #pkts enc'ed 47672 drop 0 life (KB/Sec) 4607812/1874
m = p13.match(line)
if m:
outbound_dict = ipsec_dict.setdefault('outbound', {})
outbound_dict['encrypted'] = int(m.groupdict()['encrypt_count'])
outbound_dict['dropped'] = int(m.groupdict()['out_drop'])
outbound_dict['life_in_kb'] = int(m.groupdict()['out_life_kb'])
outbound_dict['life_in_sec'] = int(m.groupdict()['out_life_sec'])
continue
return ret_dict
class ShowCryptoSessionRemote(ShowCryptoSessionRemoteSuper,ShowCryptoSessionRemoteSchema):
'''Parser for:
* 'show crypto session remote {remote_ip}'
'''
cli_command = ['show crypto session remote {remote_ip}']
def cli(self, remote_ip='', output=None):
if output is None:
out = self.device.execute(self.cli_command[0].format(remote_ip=remote_ip))
else:
out = output
return super().cli(output=out)
class ShowCryptoSessionRemoteDetail(ShowCryptoSessionRemoteSuper,ShowCryptoSessionRemoteSchema):
'''Parser for:
* 'show crypto session remote {remote_ip} detail'
'''
cli_command = ['show crypto session remote {remote_ip} detail']
def cli(self, remote_ip='', output=None):
if output is None:
out = self.device.execute(self.cli_command[0].format(remote_ip=remote_ip))
else:
out = output
return super().cli(output=out)
# ==============================
# Schema for
# 'show crypto ikev2 stats'
# ==============================
class ShowCryptoIkev2StatsExtSchema(MetaParser):
"""
Schema for
* 'show crypto ikev2 stats ext-service'
"""
schema = {
'ikev2_stats': {
'aaa_operation':{
'receive_pskey': {
'passed': int,
'failed': int
},
'eap_auth': {
'passed': int,
'failed': int
},
'start_acc': {
'passed': int,
'failed': int
},
'stop_acc': {
'passed': int,
'failed': int
},
'authorization': {
'passed': int,
'failed': int
}
},
'ipsec_operation': {
'ipsec_policy_verify': {
'passed': int,
'failed': int
},
'sa_creation': {
'passed': int,
'failed': int
},
'sa_deletion':{
'passed': int,
'failed': int
}
},
'crypto_engine_operation': {
'dh_key_generated': {
'passed': int,
'failed': int
},
'secret_generated': {
'passed': int,
'failed': int
},
'signature_sign': {
'passed': int,
'failed': int
},
'signature_verify': {
'passed': int,
'failed': int
}
},
'pki_operation': {
'verify_cert': {
'passed': int,
'failed': int
},
'cert_using_http': {
'passed': int,
'failed': int
},
'peer_cert_using_http': {
'passed': int,
'failed': int
},
'get_issuers': {
'passed': int,
'failed': int
},
'get_cert_from_issuers': {
'passed': int,
'failed': int
},
'get_dn_from_cert': {
'passed': int,
'failed': int
}
},
'gkm_operation': {
'get_policy': {
'passed': int,
'failed': int
},
'set_policy': {
'passed': int,
'failed': int
}
},
'ppk_sks_operation': {
'ppk_get_cap': {
'passed': int,
'failed': int
},
'ppk_get_key': {
'passed': int,
'failed': int
}
},
'ike_preroute': {
'idb_verification': {
'passed': int,
'failed': int
}
},
},
}
# =========================================================
# Parser for 'show crypto ikev2 stats ext-service'
# =========================================================
class ShowCryptoIkev2StatsExt(ShowCryptoIkev2StatsExtSchema):
"""
Parser for
* 'show crypto ikev2 stats ext-service'
"""
# Defines a function to run the cli_command
cli_command = 'show crypto ikev2 stats ext-service'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
# RECEIVING PSKEY 0 0
p1 = re.compile(r'^RECEIVING PSKEY\s+(?P<rec_ps_pass>\d+)\s+(?P<rec_ps_fail>\d+)$')
# AUTHENTICATION USING EAP 23986 0
p2 = re.compile(r'^AUTHENTICATION\s+USING\s+EAP\s+(?P<eap_auth_pass>\d+)\s+(?P<eap_auth_fail>\d+)$')
# START ACCOUNTING 3990 0
p3 = re.compile(r'^START\s+ACCOUNTING\s+(?P<start_acc_pass>\d+)\s+(?P<start_acc_fail>\d+)$')
# STOP ACCOUNTING 3186 0
p4 = re.compile(r'^STOP ACCOUNTING\s+(?P<stop_acc_pass>\d+)\s+(?P<stop_acc_fail>\d+)$')
# AUTHORIZATION 0 0
p5 = re.compile(r'^AUTHORIZATION\s+(?P<auth_pass>\d+)\s+(?P<auth_fail>\d+)$')
# IPSEC POLICY VERIFICATION 8895 0
p6 = re.compile(r'^IPSEC POLICY VERIFICATION\s+(?P<policy_ver_pass>\d+)\s+(?P<policy_ver_fail>\d+)$')
# SA CREATION 8895 0
p7 = re.compile(r'^SA CREATION\s+(?P<sa_creation_pass>\d+)\s+(?P<sa_creation_fail>\d+)$')
# SA DELETION 16182 0
p8 = re.compile(r'^SA DELETION\s+(?P<sa_del_pass>\d+)\s+(?P<sa_del_fail>\d+)$')
# DH PUBKEY GENERATED 11432 0
p9 = re.compile(r'^DH\s+PUBKEY\s+GENERATED\s+(?P<pubkey_gen_pass>\d+)\s+(?P<pubkey_gen_fail>\d+)$')
# DH SHARED SECKEY GENERATED 11432 0
p10 = re.compile(r'^DH\s+SHARED\s+SECKEY\s+GENERATED\s+(?P<secret_gen_pass>\d+)\s+(?P<secret_gen_fail>\d+)$')
# SIGNATURE SIGN 4000 0
p11 = re.compile(r'^SIGNATURE\s+SIGN\s+(?P<sign_pass>\d+)\s+(?P<sign_fail>\d+)$')
# SIGNATURE VERIFY 0 0
p12 = re.compile(r'^SIGNATURE VERIFY\s+(?P<sign_ver_pass>\d+)\s+(?P<sign_ver_fail>\d+)$')
# VERIFY CERTIFICATE 0 0
p13 = re.compile(r'^VERIFY CERTIFICATE\s+(?P<ver_cert_pass>\d+)\s+(?P<ver_cert_fail>\d+)$')
# FETCHING CERTIFICATE USING HTTP 0 0
p14 = re.compile(r'^FETCHING\s+CERTIFICATE\s+USING\s+HTTP\s+(?P<cert_http_pass>\d+)\s+(?P<cert_http_fail>\d+)$')
# FETCHING PEER CERTIFICATE USING HTTP 0 0
p15 = re.compile(r'^FETCHING\s+PEER\s+CERTIFICATE\s+USING\s+HTTP\s+(?P<peer_cert_http_pass>\d+)\s+(?P<peer_cert_http_fail>\d+)$')
# GET ISSUERS 13054 0
p16 = re.compile(r'^GET\s+ISSUERS\s+(?P<get_issuers_pass>\d+)\s+(?P<get_issuers_fail>\d+)$')
# GET CERTIFICATES FROM ISSUERS 6518 0
p17 = re.compile(r'^GET\s+CERTIFICATES\s+FROM\s+ISSUERS\s+(?P<get_cert_pass>\d+)\s+(?P<get_cert_fail>\d+)$')
# GET DN FROM CERT 0 0
p18 = re.compile(r'^GET\s+DN\s+FROM\s+CERT\s+(?P<get_dn_pass>\d+)\s+(?P<get_dn_fail>\d+)$')
# GET_POLICY 0 0
p19 = re.compile(r'^GET_POLICY\s+(?P<get_policy_pass>\d+)\s+(?P<get_policy_fail>\d+)$')
# SET_POLICY 0 0
p20 = re.compile(r'^SET_POLICY\s+(?P<set_policy_pass>\d+)\s+(?P<set_policy_fail>\d+)$')
# PPK GET CAP 0 0
p21 = re.compile(r'^PPK\s+GET\s+CAP\s+(?P<ppk_get_cap_pass>\d+)\s+(?P<ppk_get_cap_fail>\d+)$')
# PPK GET KEY 0 0
p22 = re.compile(r'^PPK\s+GET\s+KEY\s+(?P<ppk_get_key_pass>\d+)\s+(?P<ppk_get_key_fail>\d+)$')
# IKE PREROUTE IDB VERIFICATION 0 0
p23 = re.compile(r'^IKE\s+PREROUTE\s+IDB\s+VERIFICATION\s+(?P<idb_ver_pass>\d+)\s+(?P<idb_ver_fail>\d+)$')
# initial return dictionary
ret_dict = {}
for line in output.splitlines():
line = line.strip()
# RECEIVING PSKEY 0 0
m = p1.match(line)
if m:
ser_dict = ret_dict.setdefault('ikev2_stats', {})
aaa_dict = ser_dict.setdefault('aaa_operation', {})
aaa_dict.update ( { 'receive_pskey' : {
'passed': int(m.groupdict()['rec_ps_pass']),
'failed': int(m.groupdict()['rec_ps_fail'])
}
})
continue
# AUTHENTICATION USING EAP 23986 0
m = p2.match(line)
if m:
aaa_dict.update ( { 'eap_auth' : {
'passed': int(m.groupdict()['eap_auth_pass']),
'failed': int(m.groupdict()['eap_auth_fail'])
}
})
continue
# START ACCOUNTING 3990 0
m = p3.match(line)
if m:
aaa_dict.update ( { 'start_acc' : {
'passed': int(m.groupdict()['start_acc_pass']),
'failed': int(m.groupdict()['start_acc_fail'])
}
})
continue
# STOP ACCOUNTING 3186 0
m = p4.match(line)
if m:
aaa_dict.update ( { 'stop_acc' : {
'passed': int(m.groupdict()['stop_acc_pass']),
'failed': int(m.groupdict()['stop_acc_fail'])
}
})
continue
# AUTHORIZATION 0 0
m = p5.match(line)
if m:
aaa_dict.update ( { 'authorization' : {
'passed': int(m.groupdict()['auth_pass']),
'failed': int(m.groupdict()['auth_fail'])
}
})
continue
# IPSEC POLICY VERIFICATION 8895 0
m = p6.match(line)
if m:
ipsec_dict = ser_dict.setdefault('ipsec_operation', {})
ipsec_dict.update ( { 'ipsec_policy_verify' : {
'passed': int(m.groupdict()['policy_ver_pass']),
'failed': int(m.groupdict()['policy_ver_fail'])
}
})
continue
# SA CREATION 8895 0
m = p7.match(line)
if m:
ipsec_dict.update ( { 'sa_creation' : {
'passed': int(m.groupdict()['sa_creation_pass']),
'failed': int(m.groupdict()['sa_creation_fail'])
}
})
continue
# SA DELETION 16182 0
m = p8.match(line)
if m:
ipsec_dict.update ( { 'sa_deletion' : {
'passed': int(m.groupdict()['sa_del_pass']),
'failed': int(m.groupdict()['sa_del_fail'])
}
})
continue
# DH PUBKEY GENERATED 11432 0
m = p9.match(line)
if m:
crypto_dict = ser_dict.setdefault('crypto_engine_operation', {})
crypto_dict.update ( { 'dh_key_generated' : {
'passed': int(m.groupdict()['pubkey_gen_pass']),
'failed': int(m.groupdict()['pubkey_gen_fail'])
}
})
continue
# DH SHARED SECKEY GENERATED 11432 0
m = p10.match(line)
if m:
crypto_dict.update ( { 'secret_generated' : {
'passed': int(m.groupdict()['secret_gen_pass']),
'failed': int(m.groupdict()['secret_gen_fail'])
}
})
continue
# SIGNATURE SIGN 4000 0
m = p11.match(line)
if m:
crypto_dict.update ( { 'signature_sign' : {
'passed': int(m.groupdict()['sign_pass']),
'failed': int(m.groupdict()['sign_fail'])
}
})
continue
# SIGNATURE VERIFY 0 0
m = p12.match(line)
if m:
crypto_dict.update ( { 'signature_verify' : {
'passed': int(m.groupdict()['sign_ver_pass']),
'failed': int(m.groupdict()['sign_ver_fail'])
}
})
continue
# VERIFY CERTIFICATE 0 0
m = p13.match(line)
if m:
pki_dict = ser_dict.setdefault('pki_operation', {})
pki_dict.update ( { 'verify_cert' : {
'passed': int(m.groupdict()['ver_cert_pass']),
'failed': int(m.groupdict()['ver_cert_fail'])
}
})
continue
# FETCHING CERTIFICATE USING HTTP 0 0
m = p14.match(line)
if m:
pki_dict.update ( { 'cert_using_http' : {
'passed': int(m.groupdict()['cert_http_pass']),
'failed': int(m.groupdict()['cert_http_fail'])
}
})
continue
# FETCHING PEER CERTIFICATE USING HTTP 0 0
m = p15.match(line)
if m:
pki_dict.update ( { 'peer_cert_using_http' : {
'passed': int(m.groupdict()['peer_cert_http_pass']),
'failed': int(m.groupdict()['peer_cert_http_fail'])
}
})
continue
# GET ISSUERS 13054 0
m = p16.match(line)
if m:
pki_dict.update ( { 'get_issuers' : {
'passed': int(m.groupdict()['get_issuers_pass']),
'failed': int(m.groupdict()['get_issuers_fail'])
}
})
continue
# GET CERTIFICATES FROM ISSUERS 6518 0
m = p17.match(line)
if m:
pki_dict.update ( { 'get_cert_from_issuers' : {
'passed': int(m.groupdict()['get_cert_pass']),
'failed': int(m.groupdict()['get_cert_fail'])
}
})
continue
# GET DN FROM CERT 0 0
m = p18.match(line)
if m:
pki_dict.update ( { 'get_dn_from_cert' : {
'passed': int(m.groupdict()['get_dn_pass']),
'failed': int(m.groupdict()['get_dn_fail'])
}
})
continue
# GET_POLICY 0 0
m = p19.match(line)
if m:
gkm_dict = ser_dict.setdefault('gkm_operation', {})
gkm_dict.update ( { 'get_policy' : {
'passed': int(m.groupdict()['get_policy_pass']),
'failed': int(m.groupdict()['get_policy_fail'])
}
})
continue
# SET_POLICY 0 0
m = p20.match(line)
if m:
gkm_dict.update ( { 'set_policy' : {
'passed': int(m.groupdict()['set_policy_pass']),
'failed': int(m.groupdict()['set_policy_fail'])
}
})
continue
# PPK GET CAP 0 0
m = p21.match(line)
if m:
ppk_dict = ser_dict.setdefault('ppk_sks_operation', {})
ppk_dict.update ( { 'ppk_get_cap' : {
'passed': int(m.groupdict()['ppk_get_cap_pass']),
'failed': int(m.groupdict()['ppk_get_cap_fail'])
}
})
continue
# PPK GET KEY 0 0
m = | |
3]])
assert quantity.boundary_values.shape[0] == len(self.mesh4.boundary)
def test_set_values(self):
quantity = Quantity(self.mesh4)
# get referece to data arrays
centroid_values = quantity.centroid_values
vertex_values = quantity.vertex_values
quantity.set_values([[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, 3]],
location='vertices')
assert num.allclose(quantity.vertex_values,
[[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, 3]])
assert id(vertex_values) == id(quantity.vertex_values)
assert num.allclose(quantity.centroid_values, [
2., 5., 3., 0.]) # Centroid
assert num.allclose(quantity.edge_values, [[2.5, 2.0, 1.5],
[5., 5., 5.],
[4.5, 4.5, 0.],
[3.0, -1.5, -1.5]])
# Test default
quantity.set_values([[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, 3]])
assert num.allclose(quantity.vertex_values,
[[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, 3]])
assert num.allclose(quantity.centroid_values, [
2., 5., 3., 0.]) # Centroid
assert num.allclose(quantity.edge_values, [[2.5, 2.0, 1.5],
[5., 5., 5.],
[4.5, 4.5, 0.],
[3.0, -1.5, -1.5]])
# Test centroids
quantity.set_values([1, 2, 3, 4], location='centroids')
assert num.allclose(quantity.centroid_values, [
1., 2., 3., 4.]) # Centroid
# Test exceptions
try:
quantity.set_values([[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, 3]],
location='bas kamel tuba')
except:
pass
try:
quantity.set_values([[1, 2, 3], [0, 0, 9]])
except ValueError:
pass
except:
raise Exception('should have raised ValueeError')
def test_set_values_const(self):
quantity = Quantity(self.mesh4)
quantity.set_values(1.0, location='vertices')
assert num.allclose(quantity.vertex_values,
[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]])
assert num.allclose(quantity.centroid_values, [1, 1, 1, 1]) # Centroid
assert num.allclose(quantity.edge_values, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
quantity.set_values(2.0, location='centroids')
assert num.allclose(quantity.centroid_values, [2, 2, 2, 2])
def test_set_values_func(self):
quantity = Quantity(self.mesh4)
def f(x, y):
return x+y
quantity.set_values(f, location='vertices')
#print "quantity.vertex_values",quantity.vertex_values
assert num.allclose(quantity.vertex_values,
[[2, 0, 2], [2, 2, 4], [4, 2, 4], [4, 2, 4]])
assert num.allclose(quantity.centroid_values,
[4.0/3, 8.0/3, 10.0/3, 10.0/3])
assert num.allclose(quantity.edge_values,
[[1, 2, 1], [3, 3, 2], [3, 4, 3], [3, 4, 3]])
quantity.set_values(f, location='centroids')
assert num.allclose(quantity.centroid_values,
[4.0/3, 8.0/3, 10.0/3, 10.0/3])
def test_integral(self):
quantity = Quantity(self.mesh4)
# Try constants first
const = 5
quantity.set_values(const, location='vertices')
#print 'Q', quantity.get_integral()
assert num.allclose(quantity.get_integral(),
self.mesh4.get_area() * const)
# Try with a linear function
def f(x, y):
return x+y
quantity.set_values(f, location='vertices')
ref_integral = (4.0/3 + 8.0/3 + 10.0/3 + 10.0/3) * 2
assert num.allclose(quantity.get_integral(), ref_integral)
def test_integral_with_region(self):
quantity = Quantity(self.mesh4)
# Try constants first
const = 5
quantity.set_values(const, location='vertices')
#print 'Q', quantity.get_integral()
assert num.allclose(quantity.get_integral(),
self.mesh4.get_area() * const)
# Try with a linear function
def f(x, y):
return x+y
quantity.set_values(f, location='vertices')
from anuga import Region
reg1 = Region(self.mesh4, indices=[2])
ref_integral = (10.0/3) * 2
assert num.allclose(quantity.get_integral(region=reg1), ref_integral)
reg2 = Region(self.mesh4, indices=[2, 3])
ref_integral = (10.0/3 + 10.0/3) * 2
assert num.allclose(quantity.get_integral(region=reg2), ref_integral)
id = [2, 3]
ref_integral = (10.0/3 + 10.0/3) * 2
assert num.allclose(quantity.get_integral(indices=id), ref_integral)
def test_set_vertex_values(self):
quantity = Quantity(self.mesh4)
quantity.set_vertex_values([0, 1, 2, 3, 4, 5])
assert num.allclose(quantity.vertex_values,
[[1, 0, 2], [1, 2, 4], [4, 2, 5], [3, 1, 4]])
assert num.allclose(quantity.centroid_values,
[1., 7./3, 11./3, 8./3]) # Centroid
assert num.allclose(quantity.edge_values, [[1., 1.5, 0.5],
[3., 2.5, 1.5],
[3.5, 4.5, 3.],
[2.5, 3.5, 2]])
def test_set_vertex_values_subset(self):
quantity = Quantity(self.mesh4)
quantity.set_vertex_values([0, 1, 2, 3, 4, 5])
quantity.set_vertex_values([0, 20, 30, 50], indices=[0, 2, 3, 5])
assert num.allclose(quantity.vertex_values,
[[1, 0, 20], [1, 20, 4], [4, 20, 50], [30, 1, 4]])
def test_set_vertex_values_using_general_interface(self):
quantity = Quantity(self.mesh4)
quantity.set_values([0, 1, 2, 3, 4, 5])
assert num.allclose(quantity.vertex_values,
[[1, 0, 2], [1, 2, 4], [4, 2, 5], [3, 1, 4]])
#Centroid
assert num.allclose(quantity.centroid_values, [1., 7./3, 11./3, 8./3])
assert num.allclose(quantity.edge_values, [[1., 1.5, 0.5],
[3., 2.5, 1.5],
[3.5, 4.5, 3.],
[2.5, 3.5, 2]])
def test_set_vertex_values_using_general_interface_with_subset(self):
"""test_set_vertex_values_using_general_interface_with_subset(self):
Test that indices and polygon works (for constants values)
"""
quantity = Quantity(self.mesh4)
quantity.set_values([0, 2, 3, 5], indices=[0, 2, 3, 5])
assert num.allclose(quantity.vertex_values,
[[0, 0, 2], [0, 2, 0], [0, 2, 5], [3, 0, 0]])
# Constant
quantity.set_values(0.0)
quantity.set_values(3.14, indices=[0, 2], location='vertices')
# Indices refer to triangle numbers
assert num.allclose(quantity.vertex_values,
[[3.14, 3.14, 3.14], [0, 0, 0],
[3.14, 3.14, 3.14], [0, 0, 0]])
# Now try with polygon (pick points where y>2)
polygon = [[0, 2.1], [4, 2.1], [4, 7], [0, 7]]
quantity.set_values(0.0)
quantity.set_values(3.14, polygon=polygon)
assert num.allclose(quantity.vertex_values,
[[0, 0, 0], [0, 0, 0], [0, 0, 0],
[3.14, 3.14, 3.14]])
# Another polygon (pick triangle 1 and 2 (rightmost triangles)
# using centroids
polygon = [[2.1, 0.0], [3.5, 0.1], [2, 2.2], [0.2, 2]]
quantity.set_values(0.0)
quantity.set_values(3.14, location='centroids', polygon=polygon)
assert num.allclose(quantity.vertex_values,
[[0, 0, 0],
[3.14, 3.14, 3.14],
[3.14, 3.14, 3.14],
[0, 0, 0]])
# Same polygon now use vertices (default)
polygon = [[2.1, 0.0], [3.5, 0.1], [2, 2.2], [0.2, 2]]
quantity.set_values(0.0)
#print 'Here 2'
quantity.set_values(3.14, polygon=polygon)
assert num.allclose(quantity.vertex_values,
[[0, 0, 0],
[3.14, 3.14, 3.14],
[3.14, 3.14, 3.14],
[0, 0, 0]])
# Test input checking
try:
quantity.set_values(3.14, polygon=polygon, indices=[0, 2])
except:
pass
else:
msg = 'Should have caught this'
raise Exception(msg)
def test_set_vertex_values_using_general_interface_subset_and_geo(self):
"""test_set_vertex_values_using_general_interface_with_subset(self):
Test that indices and polygon works using georeferencing
"""
quantity = Quantity(self.mesh4)
G = Geo_reference(56, 10, 100)
quantity.domain.set_georeference(G)
# Constant
quantity.set_values(0.0)
quantity.set_values(3.14, indices=[0, 2], location='vertices')
# Indices refer to triangle numbers here - not vertices (why?)
assert num.allclose(quantity.vertex_values,
[[3.14, 3.14, 3.14], [0, 0, 0],
[3.14, 3.14, 3.14], [0, 0, 0]])
# Now try with polygon (pick points where y>2)
polygon = num.array([[0, 2.1], [4, 2.1], [4, 7], [0, 7]])
polygon += [G.xllcorner, G.yllcorner]
quantity.set_values(0.0)
quantity.set_values(3.14, polygon=polygon, location='centroids')
assert num.allclose(quantity.vertex_values,
[[0, 0, 0], [0, 0, 0], [0, 0, 0],
[3.14, 3.14, 3.14]])
# Another polygon (pick triangle 1 and 2 (rightmost triangles)
polygon = num.array([[2.1, 0.0], [3.5, 0.1], [2, 2.2], [0.2, 2]])
polygon += [G.xllcorner, G.yllcorner]
quantity.set_values(0.0)
quantity.set_values(3.14, polygon=polygon)
msg = ('quantity.vertex_values=\n%s\nshould be close to\n'
'[[0,0,0],\n'
' [3.14,3.14,3.14],\n'
' [3.14,3.14,3.14],\n'
' [0,0,0]]' % str(quantity.vertex_values))
assert num.allclose(quantity.vertex_values,
[[0, 0, 0],
[3.14, 3.14, 3.14],
[3.14, 3.14, 3.14],
[0, 0, 0]]), msg
def test_set_values_using_fit(self):
quantity = Quantity(self.mesh4)
#Get (enough) datapoints
data_points = [[0.66666667, 0.66666667],
[1.33333333, 1.33333333],
[2.66666667, 0.66666667],
[0.66666667, 2.66666667],
[0.0, 1.0],
[0.0, 3.0],
[1.0, 0.0],
[1.0, 1.0],
[1.0, 2.0],
[1.0, 3.0],
[2.0, 1.0],
[3.0, 0.0],
[3.0, 1.0]]
z = linear_function(data_points)
#Use built-in fit_interpolate.fit
quantity.set_values(Geospatial_data(data_points, z), alpha=0)
#quantity.set_values(points = data_points, values = z, alpha = 0)
answer = linear_function(quantity.domain.get_vertex_coordinates())
#print quantity.vertex_values, answer
assert num.allclose(quantity.vertex_values.flat, answer)
#Now try by setting the same values directly
vertex_attributes = fit_to_mesh(data_points,
quantity.domain.get_nodes(),
quantity.domain.get_triangles(),
point_attributes=z,
alpha=0,
verbose=False)
#print vertex_attributes
quantity.set_values(vertex_attributes)
assert num.allclose(quantity.vertex_values.flat, answer)
def test_test_set_values_using_fit_w_geo(self):
#Mesh
vertex_coordinates = [[0.76, 0.76],
[0.76, 5.76],
[5.76, 0.76]]
triangles = [[0, 2, 1]]
mesh_georef = Geo_reference(56, -0.76, -0.76)
mesh1 = Generic_Domain(vertex_coordinates, triangles,
geo_reference=mesh_georef)
mesh1.check_integrity()
#Quantity
quantity = Quantity(mesh1)
#Data
data_points = [[201.0, 401.0],
[201.0, 403.0],
[203.0, 401.0]]
z = [2, 4, 4]
data_georef = Geo_reference(56, -200, -400)
#Reference
ref = fit_to_mesh(data_points, vertex_coordinates, triangles,
point_attributes=z,
data_origin=data_georef.get_origin(),
mesh_origin=mesh_georef.get_origin(),
alpha=0)
assert num.allclose(ref, [0, 5, 5])
#Test set_values
quantity.set_values(Geospatial_data(
data_points, z, data_georef), alpha=0)
#quantity.set_values(points = data_points,
# values = z,
# data_georef = data_georef,
# alpha = 0)
#quantity.set_values(points = data_points,
# values = z,
# data_georef = data_georef,
# alpha = 0)
assert num.allclose(quantity.vertex_values.flat, ref)
#Test set_values using geospatial data object
quantity.vertex_values[:] = 0.0
geo = Geospatial_data(data_points, z, data_georef)
quantity.set_values(geospatial_data=geo, alpha=0)
assert num.allclose(quantity.vertex_values.flat, ref)
def test_set_values_from_file1(self):
quantity = Quantity(self.mesh4)
#Get (enough) datapoints
data_points = [[0.66666667, 0.66666667],
[1.33333333, 1.33333333],
[2.66666667, 0.66666667],
[0.66666667, 2.66666667],
[0.0, 1.0],
[0.0, 3.0],
[1.0, 0.0],
[1.0, 1.0],
[1.0, 2.0],
[1.0, 3.0],
[2.0, 1.0],
[3.0, 0.0],
[3.0, 1.0]]
data_geo_spatial = Geospatial_data(data_points,
geo_reference=Geo_reference(56, 0, 0))
data_points_absolute = data_geo_spatial.get_data_points(absolute=True)
attributes = linear_function(data_points_absolute)
att = 'spam_and_eggs'
#Create .txt file
ptsfile = tempfile.mktemp(".txt")
file = open(ptsfile, "w")
file.write(" x,y," + att + " \n")
for data_point, attribute in zip(data_points_absolute, attributes):
row = str(data_point[0]) + ',' + str(data_point[1]) \
+ ',' + str(attribute)
file.write(row + "\n")
file.close()
#Check that values can be set from file
quantity.set_values(filename=ptsfile,
attribute_name=att, alpha=0)
answer = linear_function(quantity.domain.get_vertex_coordinates())
#print quantity.vertex_values.flat
#print answer
assert num.allclose(quantity.vertex_values.flat, answer)
#Check that values can be set from file using default attribute
quantity.set_values(filename=ptsfile, alpha=0)
assert num.allclose(quantity.vertex_values.flat, answer)
#Cleanup
import os
os.remove(ptsfile)
def Xtest_set_values_from_file_using_polygon(self):
"""test_set_values_from_file_using_polygon(self):
Test that polygon restriction works for general points data
"""
quantity = Quantity(self.mesh4)
#Get (enough) datapoints
data_points = [[0.66666667, 0.66666667],
[1.33333333, 1.33333333],
[2.66666667, 0.66666667],
[0.66666667, 2.66666667],
[0.0, 1.0],
[0.0, 3.0],
[1.0, 0.0],
[1.0, 1.0],
[1.0, 2.0],
[1.0, 3.0],
[2.0, 1.0],
[3.0, 0.0],
[3.0, | |
"""
PyAltmetric
This is a python wrapper for the Altmetric API.
For more information on the Altmetric API visit http://api.altmetric.com/.
Some pieces of this library were inspired by or derived from the altmetric api
wrapper altmetric.py which is licensed under the MIT open source license.
If you display Altmetric data please attribute Altmetric somewhere on your
page.
"""
import requests
import datetime
import warnings
import json
class AltmetricException(Exception):
"""Base class for any pyaltmetric error."""
pass
class JSONParseException(AltmetricException):
"""
Failed to turn HTTP Response into JSON.
Site is probably in the wrong format.
"""
pass
class AltmetricHTTPException(AltmetricException):
"""A query argument or setting was formatted incorrectly."""
def __init__(self, status_code):
response_codes = {
403:"You are not authorized for this call.",
420:"Rate Limit Reached",
502:"API is down.",
}
super(AltmetricHTTPException, self).__init__(
response_codes.get(status_code, status_code)
)
class IncorrectInput(AltmetricException):
"""Informing the user that their query is incorrect."""
def __init__(self, msg):
super(IncorrectInput, self).__init__(msg)
class Altmetric(object):
def __init__(self, api_key = None, api_version = 'v1'):
"""Cache API key and version."""
self._api_version = api_version
if self._api_version != 'v1':
warnings.warn("This wrapper has only been tested with API v1."
"If you try another version it will probably break.")
self._api_url = "http://api.altmetric.com/%s/" % self.api_version
self._api_key = {}
if api_key:
self._api_key = {'key': api_key}
#Make articles
def article_from_doi(self, doi):
"""Create an Article object using DOI."""
raw_json = self._get_altmetrics('doi', doi)
return self._create_article(raw_json)
def article_from_pmid(self, pmid):
"""Create an Article object using PMID."""
raw_json = self._get_altmetrics('pmid', pmid)
return self._create_article(raw_json)
def article_from_altmetric(self, altmetric_id):
"""Create an Article object using Altmetric ID."""
warnings.warn("Altmetric ID's are subject to change.")
raw_json = self._get_altmetrics('id', altmetric_id)
return self._create_article(raw_json)
def article_from_ads(self, ads_bibcode):
"""Create an Article object using ADS Bibcode."""
raw_json = self._get_altmetrics('ads', ads_bibcode)
return self._create_article(raw_json)
def article_from_arxiv(self, arxiv_id):
"""Create an Article object using arXiv ID."""
raw_json = self._get_altmetrics('arxiv', arxiv_id)
return self._create_article(raw_json)
def articles_from_timeframe(self, timeframe, page = 1, num_results = 100,
doi_prefix = None, nlmid = None, subjects = None, cited_in = None):
"""
Return articles with mentions within a certain timeframe keyword
arguments can further limit the search.
:param timeframe: Argument for past x days/months/years. In format:
1d, 1m, 1y...
:param page: Integer. Which page of results you are on.
:param num_results: 1-100. Number of results per page.
:param doi_prefix: Limits results to those with this doi prefix.
:param nlmid: List of journal NLM IDs.
:param subjects: List of slugified journal subjects, accepts NLM
subject ontology term(s).
:param cited_in: Options of facebook, blogs, linkedin, video,
pinterest, gplus,twitter, reddit, news, f1000, rh, qna,
forum, peerreview.
"""
timeframe = self._check_timeframe(timeframe)
while(1):
raw_json = self._get_altmetrics('citations', timeframe,
page = page, num_results = num_results,
doi_prefix = doi_prefix, nlmid = nlmid,
subjects = subjects, cited_in = cited_in)
page += 1
if not raw_json:
break
for result in raw_json.get('results', []):
yield self._create_article(result)
def _get_altmetrics(self, method, *args, **kwargs):
"""
Request information from Altmetric. Return a dictionary.
"""
request_url = self.api_url + method + "/" + "/".join([a for a in args])
params = kwargs or {}
params.update(self.api_key)
response = requests.get(request_url, params = params)
if response.status_code == 200:
try:
return response.json()
except ValueError as e:
raise JSONParseException(e.message)
elif response.status_code in (404, 400):
return {}
else:
raise AltmetricHTTPException(response.status_code)
def _create_article(self, json):
"""Return an article object."""
try:
return Article(json)
except AttributeError:
return None
def _check_timeframe(self, timeframe):
if len(timeframe) > 2:
if timeframe == 'all time':
timeframe = 'at'
else:
timeframe = timeframe[0]+timeframe[2]
if timeframe not in [
'at','1d','2d','3d','4d','5d','6d','1w','1m','3m','6m','1y']:
raise IncorrectInput("Invalid timeframe entered.")
return timeframe
@property
def api_version(self):
return self._api_version
@property
def api_url(self):
return self._api_url
@property
def api_key(self):
return self._api_key
class Article():
def __init__(self, raw_dict):
"""
Create an article object. Get raw dictionary from
Altmetrics JSON. Parse dictionary into attributes.
"""
if raw_dict:
self._raw = raw_dict
self._parse_raw()
else:
raise AttributeError
@classmethod
def from_json_file(cls, filename):
"""Return article from filename or path."""
try:
with open(filename) as fi:
raw = json.load(fi)
obj = Article(raw)
return obj
except ValueError as e:
raise JSONParseException(e.message)
@classmethod
def from_json(cls, file_):
"""Return an article from file."""
try:
raw = json.load(file_)
obj = Article(raw)
return obj
except ValueError as e:
raise JSONParseException(e.message)
def _parse_raw(self):
"""Extract all attributes from raw dictionary."""
#Article Info
self._title = self._raw.get('title')
self._abstract = self._raw.get('abstract')
self._abstract_source = self._raw.get('abstract_source')
self._journal = self._raw.get('journal')
self._subjects = self._raw.get('subjects', [])
self._added_on = self._convert_to_datetime(self._raw.get('added_on'))
self._published_on = self._convert_to_datetime(
self._raw.get('published_on'))
self._url = self._raw.get('url')
self._is_open_access = self._raw.get('is_oa')
self._scopus_subjects = self._raw.get('scopus_subjects', [])
self._publisher_subjects = self._parse_publisher_subjects\
(self._raw.get('publisher_subjects',[]))
self._taglines = self._raw.get('tq', [])
#Various ID's
self._doi = self._raw.get('doi')
self._nlmid = self._raw.get('nlmid')
self._pmid = self._raw.get('pmid')
self._altmetric_id = str(self._raw.get('altmetric_id', ""))
self._arxiv_id = self._raw.get('arxiv_id')
self._ads_id = self._raw.get('ads_id')
self._issns = self._raw.get('issns', [])
#Altmetrics
self._score = self._raw.get('score')
self._score_history = self._parse_score_history(
self._raw.get('history', {}))
self._score_context = self._parse_score_context(
self._raw.get('context', {}))
self._last_updated = self._convert_to_datetime(
self._raw.get('last_updated'))
self._schema = self._raw.get('schema')#schema for what?
self._cited_by_facebook_walls_count = self._raw.get(
'cited_by_fbwalls_count')
self._cited_by_redits_count = self._raw.get('cited_by_rdts_count')
self._cited_by_tweeters_count = self._raw.get(
'cited_by_tweeters_count')
self._cited_by_google_plus_count = self._raw.get(
'cited_by_gplus_count')
self._cited_by_msm_count = self._raw.get('cited_by_msm_count')
self._cited_by_delicious_count = self._raw.get('cited_by_delicious_count')
self._cited_by_qs_count = self._raw.get('cited_by_qs_count')
self._cited_by_posts_count = self._raw.get('cited_by_posts_count')
self._cited_by_accounts_count = (
self._raw.get('cited_by_accounts_count')
or self._raw.get('by_accounts_count')
)
self._cited_by_forums_count = self._raw.get('cited_by_forums_count')
self._cited_by_peer_review_sites_count = self._raw.get(
'cited_by_peer_review_sites_count')
self._cited_by_feeds_count = self._raw.get('cited_by_feeds_count')
self._cited_by_videos_count = self._raw.get('cited_by_videos_count')
self._cohorts = self._raw.get('cohorts', {})
self._readers_count = self._raw.get('readers_count')
self._readers = self._raw.get('readers', {})
self._altmetric_details_url = self._raw.get('details_url',)
self._altmetric_images = self._raw.get('images', {})
def _parse_score_history(self, history):
"""Make the score_history dictionary a little more readable."""
new_dictionary = {}
if history:
change = {'d':'day','m':'month','w':'week','y':'year'}
for item in history:
if item == 'at':
date = "all time"
else:
if item[0] == '1':
date = "past " + change[item[1]]
else:
date = "past " + item[0]+ " " + change[item[1]]+"s"
new_dictionary[date] = history[item]
return new_dictionary
def _convert_to_datetime(self, unix_time):
"""Convert UNIX timestamp to a datetime object."""
if isinstance(unix_time, int):
return datetime.datetime.fromtimestamp(unix_time)
def _parse_publisher_subjects(self, subjects):
"""
Turns the publisher_subjects list of dictionaries into a list of
subjects.
"""
new_subjects = []
if subjects:
for item in subjects:
new_subjects.append(item['name'])
return new_subjects
def _parse_score_context(self, context):
"""
Change the names of the dictionaries in context to make more sense.
"""
new_context = {}
if context:
new_context['all'] = context.get(
'all', {})
new_context['journal age'] = context.get(
'similar_age_journal_3m', {})
new_context['context age'] = context.get(
'similar_age_3m', {})
new_context['journal'] = context.get('journal', {})
return new_context
def __repr__(self):
return self.title[:12].encode('UTF-8')
def __str__(self):
string = u""
for item in self._raw:
string += unicode(item) + u": " + unicode(self._raw[item]) + u'\n'
return unicode(string).encode('UTF-8')
#Basic info
@property
def raw_dictionary(self):
return self._raw
@property
def title(self):
return self._title
@property
def abstract(self):
return self._abstract
@property
def abstract_source(self):
return self._abstract_source
@property
def journal(self):
return self._journal
@property
def subjects(self):
"""Return a list of realted subjects"""
return self._subjects
@property
def scopus_subjects(self):
"""Return a list of Scopus subjects"""
return self._scopus_subjects
@property
def publisher_subjects(self):
"""Return a list of related subjects."""
return self._publisher_subjects
@property
def added_on(self):
return self._added_on
@property
def published_on(self):
return self._published_on
@property
def url(self):
return self._url
@property
def is_open_access(self):
return self._is_open_access
@property
def taglines(self):
"""Return a list of related phrases"""
return self._taglines
#Various ID's
@property
def doi(self):
return self._doi
@property
def nlmid(self):
return self._nlmid
@property
def pmid(self):
return self._pmid
@property
def altmetric_id(self):
return self._altmetric_id
@property
def arxiv_id(self):
return self._arxiv_id
@property
def ads_id(self):
return self._ads_id
@property
def issns(self):
"""A list of issns."""
return self._issns
#Altmetrics
@property
def score(self):
return self._score
@property
def score_history(self):
"""
Return dictionry of Altmetric scores for time periods
such as 'past day', 'past 3 days', 'past month', 'past year',
and 'all time' looking only at that time period.
"""
return self._score_history
@property
def last_updated(self):
"""Return when the Altmetrics were last updated."""
return self._last_updated
@property
def score_context(self):
"""
Return a dictionary that allows you to compare an article's popularity
to articles of a 'similar age'(published within 6 weeks on either
side), articles in journals of a 'similar age', and other articles in
the same 'journal'.
"""
return self._score_context
#Cited by
#Returns count of unique authors for posts cited on various medias.
@property
def cited_by_facebook_walls_count(self):
"""
Return number of posts made on public facebook walls mentioning chosen
article.
"""
return self._cited_by_facebook_walls_count
@property
def cited_by_redits_count(self):
return self._cited_by_redits_count
@property
def cited_by_tweeters_count(self):
return self._cited_by_tweeters_count
@property
def cited_by_google_plus_count(self):
return self._cited_by_google_plus_count
@property
def cited_by_msm_count(self):
"""Return number | |
str(i + 1)
hostname = self._input("Hostname%s" % dns_message, default,
datatype="hostname")
if dns_domain is not None and not hostname.endswith(dns_domain):
hostname += "." + dns_domain
component["hostname"] = hostname
return hostname
def _setup_ip_addresses(self, deployment, i, hostname, is_vsc):
component = deployment[i]
mgmt_ip = self._setup_mgmt_address(component, hostname)
self._setup_mgmt_prefix(component)
self._setup_mgmt_gateway(component, mgmt_ip)
if is_vsc:
default = self._get_value(component, "ctrl_ip")
ctrl_ip = self._input("Control IP address for data", default,
datatype="ipaddr")
component["ctrl_ip"] = ctrl_ip
default = "24"
if "ctrl_ip_prefix" in component:
default = str(component["ctrl_ip_prefix"])
ctrl_ip_prefix = self._input("Control IP address prefix length",
default, datatype="int")
component["ctrl_ip_prefix"] = ctrl_ip_prefix
default = self._get_value(component, "system_ip")
system_ip = self._input("System IP address for routing", default,
datatype="ipaddr")
component["system_ip"] = system_ip
self._setup_target_server(component)
def _setup_mgmt_address(self, component, hostname):
default = self._resolve_hostname(hostname)
if (default is None and "mgmt_ip" in component and
component["mgmt_ip"] != ""):
default = component["mgmt_ip"]
mgmt_ip = self._input("Management IP address", default,
datatype="ipaddr")
component["mgmt_ip"] = mgmt_ip
return mgmt_ip
def _setup_mgmt_prefix(self, component):
default = "24"
if "mgmt_ip_prefix" in component:
default = str(component["mgmt_ip_prefix"])
mgmt_ip_prefix = self._input("Management IP address prefix length",
default, datatype="int")
component["mgmt_ip_prefix"] = mgmt_ip_prefix
return mgmt_ip_prefix
def _setup_mgmt_gateway(self, component, mgmt_ip):
if "mgmt_gateway" in self.state:
default = self.state["mgmt_gateway"]
elif "mgmt_gateway" in component and component["mgmt_gateway"] != "":
default = component["mgmt_gateway"]
else:
octets = mgmt_ip.split(".")
octets.pop()
octets.append("1")
default = ".".join(octets)
mgmt_gateway = self._input("Management IP gateway", default,
datatype="ipaddr")
component["mgmt_gateway"] = mgmt_gateway
self.state["mgmt_gateway"] = mgmt_gateway
return mgmt_gateway
def _setup_target_server(self, component):
if "target_server" in self.state:
default = self.state["target_server"]
elif "target_server" in component and component["target_server"] != "":
default = component["target_server"]
else:
default = None
target_server = self._input("Target server (hypervisor) IP", default,
datatype="hostname")
component["target_server"] = target_server
self.state["target_server"] = target_server
self._append_target_server(target_server)
return target_server
def _append_target_server(self, target_server):
if "all_target_servers" not in self.state:
self.state["all_target_servers"] = list()
if target_server not in self.state["all_target_servers"]:
self.state["all_target_servers"].append(target_server)
def _resolve_hostname(self, hostname):
try:
rc, output_lines = self._run_shell(
"getent hosts %s" % hostname)
if rc == 0:
self._unrecord_problem("dns_resolve")
self._print("")
return output_lines[0].split(" ")[0]
else:
self._record_problem(
"dns_resolve", "Could not resolve hostnames with DNS")
self._print(
u"\nCould not resolve %s to an IP address, this is "
u"required for MetroAE to operate. Is the hostname "
u"defined in DNS?" % hostname)
except Exception as e:
self._record_problem(
"dns_resolve", "Error while resolving hostnames with DNS")
self._print("\nAn error occurred while resolving hostname: " +
str(e))
self._print("Please contact: " + METROAE_CONTACT)
return None
def _setup_vmname(self, deployment, i, hostname, with_upgrade):
component = deployment[i]
dns_domain = None
if "dns_domain" in self.state:
dns_domain = self.state["dns_domain"]
if "vmname" in component:
default = component["vmname"]
elif dns_domain is not None and hostname.endswith(dns_domain):
default = hostname[0:-len(dns_domain) - 1]
else:
default = hostname
vmname = self._input("VM name", default)
component["vmname"] = vmname
if with_upgrade:
default = "new-" + vmname
upgrade_vmname = self._input("Upgrade VM name", default)
component["upgrade_vmname"] = upgrade_vmname
def _setup_vnsutils(self, component, i):
dns_domain = None
dns_message = ""
if "dns_domain" in self.state:
dns_domain = self.state["dns_domain"]
dns_message = " (we'll add .%s)" % dns_domain
default = None
if "data_fqdn" in component and component["data_fqdn"] != "":
default = component["data_fqdn"]
else:
default = "vnsutil%d.data" % (i + 1)
hostname = self._input("Data network FQDN%s" % dns_message, default,
datatype="hostname")
if dns_domain is not None and not hostname.endswith(dns_domain):
hostname += "." + dns_domain
component["data_fqdn"] = hostname
default = self._resolve_hostname(hostname)
if (default is None and "data_ip" in component and
component["data_ip"] != ""):
default = component["data_ip"]
data_ip = self._input("Data network IP address", default,
datatype="ipaddr")
component["data_ip"] = data_ip
default = self._get_value(component, "data_netmask")
if default is None:
default = "255.255.255.0"
address = self._input("Data network subnet mask", default,
datatype="ipaddr")
component["data_netmask"] = address
choice = self._input("Will you use DHCP on the VNSUtil?", 0,
["(Y)es", "(n)o"])
if choice == 1:
if "data_subnet" in component:
del component["data_subnet"]
if "nsgv_gateway" in component:
del component["nsgv_gateway"]
return
default = self._get_value(component, "data_subnet")
if default is None:
octets = data_ip.split(".")
octets.pop()
octets.append("0")
default = ".".join(octets)
data_subnet = self._input("Data IP subnet for DHCP",
default, datatype="ipaddr")
component["data_subnet"] = data_subnet
self.state["data_subnet"] = data_subnet
default = self._get_value(component, "nsgv_gateway")
if default is None:
octets = data_ip.split(".")
octets.pop()
octets.append("1")
default = ".".join(octets)
nsgv_gateway = self._input("Data IP gateway given by DHCP",
default, datatype="ipaddr")
component["nsgv_gateway"] = nsgv_gateway
self.state["nsgv_gateway"] = nsgv_gateway
def _setup_nsgv_component(self, component):
default = 1
if "bootstrap_method" in component:
if component["bootstrap_method"] == "none":
default = 0
elif component["bootstrap_method"] == "zfb_metro":
default = 1
elif component["bootstrap_method"] == "zfb_external":
default = 2
elif component["bootstrap_method"] == "activation_link":
default = 3
choice = self._input("Bootstrap method", default, [
"(n)one - Do not bootstrap",
"(M)etro - MetroAE will perform the bootstrap",
"(e)xternal - Use an external ISO file to bootstrap",
"(a)ctivation - Use an activation link for bootstrap"])
if choice == 0:
component["bootstrap_method"] = "none"
elif choice == 1:
component["bootstrap_method"] = "zfb_metro"
self.state["metro_bootstrap"] = True
self._bootstrap_component_metro(component)
elif choice == 2:
component["bootstrap_method"] = "zfb_external"
self._bootstrap_component_external(component)
elif choice == 3:
component["bootstrap_method"] = "activation_link"
def _bootstrap_component_metro(self, component):
default = self._get_value(component, "nsg_name")
if default is None:
default = self._get_value(component, "vmname")
name = self._input("NSGv name on VSD", default)
component["nsg_name"] = name
default = self._get_value(component, "nsgv_ip")
address = self._input("IP address for NSGv", default,
datatype="ipaddr")
component["nsgv_ip"] = address
component["match_type"] = "ip_address"
component["match_value"] = address
default = self._get_value(component, "nsgv_mac")
mac = self._input("MAC address for NSGv", default)
component["nsgv_mac"] = mac
default = self._get_value(component, "network_port_name")
network_port = self._input("Name for network port", default)
component["network_port_name"] = network_port
default = self._get_value(component, "access_port_name")
access_port = self._input("Name for access port", default)
component["access_port_name"] = access_port
default = self._get_value(component, "access_port_vlan_range")
vlan_range = self._input("VLAN range for access port "
"(format: <start>-<end>)", default)
component["access_port_vlan_range"] = vlan_range
default = self._get_value(component, "access_port_vlan_number")
vlan = self._input("Vlan number for access port", default,
datatype="int")
component["access_port_vlan_number"] = vlan
def _bootstrap_component_external(self, component):
default = None
default_path = component.get("iso_path")
default_file = component.get("iso_file")
if default_path is not None and default_file is not None:
default = os.path.join(default_path, default_file)
path_file = ""
while "/" not in path_file:
path_file = self._input("Full path to ISO file", default)
path, file = os.path.split(path_file)
component["iso_path"] = path
component["iso_file"] = file
def _setup_bootstrap(self, deployment, data):
default = self._get_value(deployment, "nsgv_organization")
org = self._input("Enterprise for NSGvs", default)
deployment["nsgv_organization"] = org
default = self._get_value(deployment, "proxy_user_first_name")
first_name = self._input("First name for proxy user", default)
deployment["proxy_user_first_name"] = first_name
default = self._get_value(deployment, "proxy_user_last_name")
last_name = self._input("Last name for proxy user", default)
deployment["proxy_user_last_name"] = last_name
default = self._get_value(deployment, "proxy_user_email")
email = self._input("Email address for proxy user", default)
deployment["proxy_user_email"] = email
default = self._get_value(deployment, "nsg_infra_profile_name")
profile_name = self._input("Name of NSG infrastructure profile",
default)
deployment["nsg_infra_profile_name"] = profile_name
default = self._get_value(deployment, "nsg_template_name")
template_name = self._input("Name of NSG template", default)
deployment["nsg_template_name"] = template_name
default = self._get_value(deployment, "proxy_dns_name")
dns_name = self._input("DNS name of proxy (on data network)", default)
deployment["proxy_dns_name"] = dns_name
default = self._get_value(deployment, "vsc_infra_profile_name")
profile_name = self._input("Name of VSC infrastructure profile",
default)
deployment["vsc_infra_profile_name"] = profile_name
default = self._get_value(deployment, "first_controller_address")
address = self._input("IP address of primary VSC controller"
" (on data network)", default,
datatype="ipaddr")
deployment["first_controller_address"] = address
choice = self._input("Do you have a secondary VSC controller?", 0,
["(Y)es", "(n)o"])
if choice == 1:
if "second_controller_address" in deployment:
del deployment["second_controller_address"]
return
default = self._get_value(deployment, "second_controller_address")
address = self._input("IP address of secondary VSC controller"
" (on data network)",
default, datatype="ipaddr")
deployment["second_controller_address"] = address
def _setup_ssh(self, username, hostname):
self._print("Adding SSH keys for %s@%s, may ask for password" % (
username, hostname))
try:
options = ""
if self.in_container:
options = "-i /source/id_rsa.pub -o StrictHostKeyChecking=no "
else:
self._setup_ssh_key()
rc, output_lines = self._run_shell(
"ssh-copy-id %s%s@%s" % (options, username, hostname))
if rc == 0:
self._unrecord_problem("ssh_keys")
self._print("\nSuccessfully setup SSH on host %s" % hostname)
return True
else:
self._record_problem(
"ssh_keys", "Could not setup password-less SSH")
self._print("\n".join(output_lines))
self._print(
u"\nCould not add SSH keys for %s@%s, this is required"
u" for MetroAE to operate." % (username, hostname))
except Exception as e:
self._record_problem(
"ssh_keys", "Error while setting up password-less SSH")
self._print("\nAn error occurred while setting up SSH: " +
str(e))
self._print("Please contact: " + METROAE_CONTACT)
return False
def _setup_ssh_key(self):
rc, output_lines = self._run_shell("stat ~/.ssh/id_rsa.pub")
if rc != 0:
self._print("\nCould not find your SSH public key "
"~/.ssh/id_rsa.pub\n")
choice = self._input("Do you wish to generate a new SSH keypair?",
0, ["(Y)es", "(n)o"])
if choice == 0:
rc, output_lines = self._run_shell('ssh-keygen -P "" '
'-f ~/.ssh/id_rsa')
if rc == 0:
self._print("\nSuccessfully generated an SSH keypair")
return True
else:
self._record_problem(
"ssh_keys", "Could not generate SSH | |
'_')
s = s.replace('\xc0', '_')
s = s.replace('\xd9', '_')
s = s.replace('\xda', '_')
s = s.replace('\xf8', '_')
return s
########################################################################
# handlers.
########################################################################
def _handler_command_run_test_200(self, *args, **kwargs):
"""
run test PT200
"""
next_state = None
kwargs['timeout'] = 30
kwargs['expected_prompt'] = TeledynePrompt.COMMAND
result = self._do_cmd_resp(TeledyneInstrumentCmds.RUN_TEST_200, *args, **kwargs)
return (next_state, result)
def _handler_command_get_instrument_transform_matrix(self, *args, **kwargs):
"""
save setup to ram.
"""
next_state = None
kwargs['timeout'] = 30
kwargs['expected_prompt'] = TeledynePrompt.COMMAND
result = self._do_cmd_resp(TeledyneInstrumentCmds.GET_INSTRUMENT_TRANSFORM_MATRIX, *args, **kwargs)
return (next_state, result)
def _handler_command_save_setup_to_ram(self, *args, **kwargs):
"""
save setup to ram.
"""
next_state = None
kwargs['timeout'] = 30
kwargs['expected_prompt'] = TeledynePrompt.COMMAND
result = self._do_cmd_resp(TeledyneInstrumentCmds.SAVE_SETUP_TO_RAM, *args, **kwargs)
return (next_state, result)
def _handler_command_clear_error_status_word(self, *args, **kwargs):
"""
clear the error status word
"""
next_state = None
kwargs['timeout'] = 30
kwargs['expected_prompt'] = TeledynePrompt.COMMAND
result = self._do_cmd_resp(TeledyneInstrumentCmds.CLEAR_ERROR_STATUS_WORD, *args, **kwargs)
return (next_state, result)
def _handler_command_acquire_error_status_word(self, *args, **kwargs):
"""
read the error status word
"""
next_state = None
kwargs['timeout'] = 30
kwargs['expected_prompt'] = TeledynePrompt.COMMAND
result = self._do_cmd_resp(TeledyneInstrumentCmds.DISPLAY_ERROR_STATUS_WORD, *args, **kwargs)
return (next_state, result)
def _handler_command_display_fault_log(self, *args, **kwargs):
"""
display the error log.
"""
next_state = None
kwargs['timeout'] = 30
kwargs['expected_prompt'] = TeledynePrompt.COMMAND
result = self._do_cmd_resp(TeledyneInstrumentCmds.GET_FAULT_LOG, *args, **kwargs)
return (next_state, result)
def _handler_command_clear_fault_log(self, *args, **kwargs):
"""
clear the error log.
"""
next_state = None
kwargs['timeout'] = 30
kwargs['expected_prompt'] = TeledynePrompt.COMMAND
result = self._do_cmd_resp(TeledyneInstrumentCmds.CLEAR_FAULT_LOG, *args, **kwargs)
return (next_state, result)
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
# Command device to initialize parameters and send a config change event.
self._protocol_fsm.on_event(TeledyneProtocolEvent.INIT_PARAMS)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
log.trace("in _handler_command_enter()")
#self._update_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
######################################################
# #
######################################################
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can be COMMAND or AUTOSAMPLE.
@retval (next_state, result), (SBE37ProtocolState.COMMAND or
SBE37State.AUTOSAMPLE, None) if successful.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentStateException if the device response does not correspond to
an expected state.
"""
(protocol_state, agent_state) = self._discover()
if(protocol_state == TeledyneProtocolState.COMMAND):
agent_state = ResourceAgentState.IDLE
return (protocol_state, agent_state)
######################################################
# #
######################################################
def _handler_command_init_params(self, *args, **kwargs):
"""
initialize parameters
"""
next_state = None
result = None
self._init_params()
return (next_state, result)
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state.
"""
self._protocol_fsm.on_event(TeledyneProtocolEvent.INIT_PARAMS)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
Exit autosample state.
"""
log.trace("IN _handler_autosample_exit")
pass
def _handler_autosample_init_params(self, *args, **kwargs):
"""
initialize parameters. For this instrument we need to
put the instrument into command mode, apply the changes
then put it back.
"""
log.debug("in _handler_autosample_init_params")
next_state = None
result = None
error = None
try:
log.debug("stopping logging without checking")
self._stop_logging()
self._init_params()
# Catch all error so we can put ourself back into
# streaming. Then rethrow the error
except Exception as e:
error = e
finally:
# Switch back to streaming
log.debug("starting logging")
self._start_logging()
#self._do_cmd_no_resp(TeledyneInstrumentCmds.START_LOGGING)
if (error):
log.error("Error in apply_startup_params: %s", error)
raise error
return (next_state, result)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode.
@retval (next_state, result) tuple, (ProtocolState.AUTOSAMPLE,
None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
result = None
kwargs['expected_prompt'] = TeledynePrompt.COMMAND
kwargs['timeout'] = 30
log.info("SYNCING TIME WITH SENSOR.")
resp = self._do_cmd_resp(TeledyneInstrumentCmds.SET, TeledyneParameter.TIME, get_timestamp_delayed("%Y/%m/%d, %H:%M:%S"), **kwargs)
# Save setup to nvram and switch to autosample if successful.
resp = self._do_cmd_resp(TeledyneInstrumentCmds.SAVE_SETUP_TO_RAM, *args, **kwargs)
# Issue start command and switch to autosample if successful.
self._start_logging()
next_state = TeledyneProtocolState.AUTOSAMPLE
next_agent_state = ResourceAgentState.STREAMING
return (next_state, (next_agent_state, result))
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample and switch back to command mode.
@retval (next_state, result) tuple, (ProtocolState.COMMAND,
None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command misunderstood or
incorrect prompt received.
"""
next_state = None
result = None
# Wake up the device, continuing until autosample prompt seen.
timeout = kwargs.get('timeout', TIMEOUT)
#if (self._is_logging(timeout)):
self._stop_logging(timeout)
next_state = TeledyneProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
return (next_state, (next_agent_state, result))
def _handler_command_get(self, *args, **kwargs):
"""
Get device parameters from the parameter dict.
@param args[0] list of parameters to retrieve, or DriverParameter.ALL.
@throws InstrumentParameterException if missing or invalid parameter.
"""
log.trace("in _handler_command_get")
next_state = None
result = None
error = None
# Grab a baseline time for calculating expiration time. It is assumed
# that all data if valid if acquired after this time.
expire_time = self._param_dict.get_current_timestamp()
log.trace("expire_time = " + str(expire_time))
# build a list of parameters we need to get
param_list = self._get_param_list(*args, **kwargs)
try:
# Take a first pass at getting parameters. If they are
# expired an exception will be raised.
result = self._get_param_result(param_list, expire_time)
except InstrumentParameterExpirationException as e:
# In the second pass we need to update parameters, it is assumed
# that _update_params does everything required to refresh all
# parameters or at least those that would expire.
log.trace("in _handler_command_get Parameter expired, refreshing, %s", e)
if self._is_logging():
log.trace("I am logging")
try:
# Switch to command mode,
self._stop_logging()
self._update_params()
# Take a second pass at getting values, this time is should
# have all fresh values.
log.trace("Fetching parameters for the second time")
result = self._get_param_result(param_list, expire_time)
# Catch all error so we can put ourself back into
# streaming. Then rethrow the error
except Exception as e:
error = e
finally:
# Switch back to streaming
self._start_logging()
if(error):
raise error
else:
log.trace("I am not logging")
self._update_params()
# Take a second pass at getting values, this time is should
# have all fresh values.
log.trace("Fetching parameters for the second time")
result = self._get_param_result(param_list, expire_time)
return (next_state, result)
def _handler_autosample_get_calibration(self, *args, **kwargs):
"""
execute a get calibration from autosample mode.
For this command we have to move the instrument
into command mode, get calibration, then switch back. If an
exception is thrown we will try to get ourselves back into
streaming and then raise that exception.
@retval (next_state, result) tuple, (ProtocolState.AUTOSAMPLE,
None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
error = None
try:
# Switch to command mode,
self._stop_logging(*args, **kwargs)
kwargs['timeout'] = 120
output = self._do_cmd_resp(TeledyneInstrumentCmds.OUTPUT_CALIBRATION_DATA, *args, **kwargs)
# Catch all error so we can put ourself back into
# streaming. Then rethrow the error
except Exception as e:
error = e
finally:
# Switch back to streaming
self._start_logging()
if(error):
raise error
result = self._sanitize(base64.b64decode(output))
return (next_state, (next_agent_state, result))
#return (next_state, (next_agent_state, {'result': result}))
def _handler_autosample_get_configuration(self, *args, **kwargs):
"""
execute a get configuration from autosample mode.
For this command we have to move the instrument
into command mode, get configuration, then switch back. If an
exception is thrown we will try to get ourselves back into
streaming and then raise that exception.
@retval (next_state, result) tuple, (ProtocolState.AUTOSAMPLE,
None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
error = None
try:
# Switch to command mode,
self._stop_logging(*args, **kwargs)
# Sync the clock
timeout = kwargs.get('timeout', TIMEOUT)
output = self._do_cmd_resp(TeledyneInstrumentCmds.GET_SYSTEM_CONFIGURATION, *args, **kwargs)
# Catch all error so we can put ourself back into
# streaming. Then rethrow the error
except Exception as e:
error = e
finally:
# Switch back to streaming
self._start_logging()
if(error):
raise error
result = self._sanitize(base64.b64decode(output))
| |
of all images, replace ``10%`` of all pixels with either the
value ``0`` or the value ``255`` (same as in the previous example). For
the other ``50%`` of all images, replace *channelwise* ``10%`` of all
pixels with either the value ``0`` or the value ``255``. So, it will be
very rare for each pixel to have all channels replaced by ``255`` or
``0``.
>>> import imgaug.augmenters as iaa
>>> import imgaug.parameters as iap
>>> aug = ReplaceElementwise(0.1, iap.Normal(128, 0.4*128), per_channel=0.5)
Replace ``10%`` of all pixels by gaussian noise centered around ``128``.
Both the replacement mask and the gaussian noise are sampled channelwise
for ``50%`` of all images.
>>> import imgaug.augmenters as iaa
>>> import imgaug.parameters as iap
>>> aug = ReplaceElementwise(
>>> iap.FromLowerResolution(iap.Binomial(0.1), size_px=8),
>>> iap.Normal(128, 0.4*128),
>>> per_channel=0.5)
Replace ``10%`` of all pixels by gaussian noise centered around ``128``.
Sample the replacement mask at a lower resolution (``8x8`` pixels) and
upscale it to the image size, resulting in coarse areas being replaced by
gaussian noise.
"""
def __init__(self, mask, replacement, per_channel=False,
name=None, deterministic=False, random_state=None):
super(ReplaceElementwise, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.mask = iap.handle_probability_param(
mask, "mask", tuple_to_uniform=True, list_to_choice=True)
self.replacement = iap.handle_continuous_param(replacement,
"replacement")
self.per_channel = iap.handle_probability_param(per_channel,
"per_channel")
def _augment_batch(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
rss = random_state.duplicate(1+2*nb_images)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
gen = enumerate(zip(images, per_channel_samples, rss[1::2], rss[2::2]))
for i, (image, per_channel_i, rs_mask, rs_replacement) in gen:
height, width, nb_channels = image.shape
sampling_shape = (height,
width,
nb_channels if per_channel_i > 0.5 else 1)
mask_samples = self.mask.draw_samples(sampling_shape,
random_state=rs_mask)
# TODO add separate per_channels for mask and replacement
# TODO add test that replacement with per_channel=False is not
# sampled per channel
if per_channel_i <= 0.5:
nb_channels = image.shape[-1]
replacement_samples = self.replacement.draw_samples(
(int(np.sum(mask_samples[:, :, 0])),),
random_state=rs_replacement)
# important here to use repeat instead of tile. repeat
# converts e.g. [0, 1, 2] to [0, 0, 1, 1, 2, 2], while tile
# leads to [0, 1, 2, 0, 1, 2]. The assignment below iterates
# over each channel and pixel simultaneously, *not* first
# over all pixels of channel 0, then all pixels in
# channel 1, ...
replacement_samples = np.repeat(replacement_samples,
nb_channels)
else:
replacement_samples = self.replacement.draw_samples(
(int(np.sum(mask_samples)),), random_state=rs_replacement)
batch.images[i] = replace_elementwise_(image, mask_samples,
replacement_samples)
return batch
def get_parameters(self):
return [self.mask, self.replacement, self.per_channel]
class SaltAndPepper(ReplaceElementwise):
"""
Replace pixels in images with salt/pepper noise (white/black-ish colors).
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of replacing a pixel to salt/pepper noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a image-sized mask will be
sampled from that parameter per image. Any value ``>0.5`` in
that mask will be replaced with salt and pepper noise.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.SaltAndPepper(0.05)
Replace ``5%`` of all pixels with salt and pepper noise.
>>> import imgaug.augmenters as iaa
>>> aug = iaa.SaltAndPepper(0.05, per_channel=True)
Replace *channelwise* ``5%`` of all pixels with salt and pepper
noise.
"""
def __init__(self, p=0, per_channel=False,
name=None, deterministic=False, random_state=None):
super(SaltAndPepper, self).__init__(
mask=p,
replacement=iap.Beta(0.5, 0.5) * 255,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
class ImpulseNoise(SaltAndPepper):
"""
Add impulse noise to images.
This is identical to ``SaltAndPepper``, except that `per_channel` is
always set to ``True``.
dtype support::
See ``imgaug.augmenters.arithmetic.SaltAndPepper``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of replacing a pixel to impulse noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a image-sized mask will be
sampled from that parameter per image. Any value ``>0.5`` in
that mask will be replaced with impulse noise noise.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.ImpulseNoise(0.1)
Replace ``10%`` of all pixels with impulse noise.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(ImpulseNoise, self).__init__(
p=p,
per_channel=True,
name=name,
deterministic=deterministic,
random_state=random_state)
class CoarseSaltAndPepper(ReplaceElementwise):
"""
Replace rectangular areas in images with white/black-ish pixel noise.
This adds salt and pepper noise (noisy white-ish and black-ish pixels) to
rectangular areas within the image. Note that this means that within these
rectangular areas the color varies instead of each rectangle having only
one color.
See also the similar ``CoarseDropout``.
TODO replace dtype support with uint8 only, because replacement is
geared towards that value range
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to salt/pepper noise.
* If a float, then that value will always be used as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled
uniformly per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a lower-resolution mask will
be sampled from that parameter per image. Any value ``>0.5`` in
that mask will denote a spatial location that is to be replaced
by salt and pepper noise.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the
replacement mask in absolute pixel dimensions.
Note that this means that *lower* values of this parameter lead to
*larger* areas being replaced (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_percent` must be set.
* If an integer, then that size will always be used for both height
and width. E.g. a value of ``3`` would lead to a ``3x3`` mask,
which is then upsampled to ``HxW``, where ``H`` is the image size
and ``W`` the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be
sampled from the discrete interval ``[a..b]``. The mask
will then be generated at size ``MxN`` and upsampled to ``HxW``.
* If a ``StochasticParameter``, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the
replacement mask *in percent* of the input image.
Note that this means that *lower* values of this parameter lead to
*larger* areas being replaced (as any pixel in the lower resolution
image will | |
int
:param RulePaths: 需要鉴权的url路径列表。
:type RulePaths: list of str
"""
self.KeyAlpha = None
self.KeyBeta = None
self.KeyGamma = None
self.SignParam = None
self.TimeParam = None
self.ExpireTime = None
self.TimeFormat = None
self.FailCode = None
self.ExpireCode = None
self.RulePaths = None
def _deserialize(self, params):
self.KeyAlpha = params.get("KeyAlpha")
self.KeyBeta = params.get("KeyBeta")
self.KeyGamma = params.get("KeyGamma")
self.SignParam = params.get("SignParam")
self.TimeParam = params.get("TimeParam")
self.ExpireTime = params.get("ExpireTime")
self.TimeFormat = params.get("TimeFormat")
self.FailCode = params.get("FailCode")
self.ExpireCode = params.get("ExpireCode")
self.RulePaths = params.get("RulePaths")
class AdvancedAuthenticationTypeC(AbstractModel):
"""时间戳防盗链高级版模式C配置。
"""
def __init__(self):
"""
:param AccessKey: 访问密钥。
:type AccessKey: str
:param SecretKey: 鉴权密钥。
:type SecretKey: str
"""
self.AccessKey = None
self.SecretKey = None
def _deserialize(self, params):
self.AccessKey = params.get("AccessKey")
self.SecretKey = params.get("SecretKey")
class AdvancedAuthenticationTypeD(AbstractModel):
"""时间戳防盗链高级版模式D配置。
"""
def __init__(self):
"""
:param SecretKey: 用于计算签名的密钥,只允许字母和数字,长度6-32字节。
:type SecretKey: str
:param BackupSecretKey: 备份密钥,当使用SecretKey鉴权失败时会使用该密钥重新鉴权。
:type BackupSecretKey: str
:param SignParam: uri串中签名的字段名,字母,数字或下划线构成,同时必须以字母开头。
:type SignParam: str
:param TimeParam: uri串中时间的字段名,字母,数字或下划线构成,同时必须以字母开头。
:type TimeParam: str
:param ExpireTime: 过期时间,单位秒。
:type ExpireTime: int
:param TimeFormat: 时间格式,dec,hex分别表示十进制,十六进制。
:type TimeFormat: str
"""
self.SecretKey = None
self.BackupSecretKey = None
self.SignParam = None
self.TimeParam = None
self.ExpireTime = None
self.TimeFormat = None
def _deserialize(self, params):
self.SecretKey = params.get("SecretKey")
self.BackupSecretKey = params.get("BackupSecretKey")
self.SignParam = params.get("SignParam")
self.TimeParam = params.get("TimeParam")
self.ExpireTime = params.get("ExpireTime")
self.TimeFormat = params.get("TimeFormat")
class AdvancedAuthenticationTypeE(AbstractModel):
"""时间戳防盗链高级版模式E配置。
"""
def __init__(self):
"""
:param SecretKey: 用于计算签名的密钥,只允许字母和数字,长度6-32字节。
注意:此字段可能返回 null,表示取不到有效值。
:type SecretKey: str
:param SignParam: uri串中签名的字段名,字母,数字或下划线构成,同时必须以字母开头。
注意:此字段可能返回 null,表示取不到有效值。
:type SignParam: str
:param AclSignParam: uri串中Acl签名的字段名,字母,数字或下划线构成,同时必须以字母开头。
注意:此字段可能返回 null,表示取不到有效值。
:type AclSignParam: str
:param StartTimeParam: uri串中开始时间字段名,字母,数字或下划线构成,同时必须以字母开头。
注意:此字段可能返回 null,表示取不到有效值。
:type StartTimeParam: str
:param ExpireTimeParam: uri串中过期时间字段名,字母,数字或下划线构成,同时必须以字母开头。
注意:此字段可能返回 null,表示取不到有效值。
:type ExpireTimeParam: str
:param TimeFormat: 时间格式,dec
注意:此字段可能返回 null,表示取不到有效值。
:type TimeFormat: str
"""
self.SecretKey = None
self.SignParam = None
self.AclSignParam = None
self.StartTimeParam = None
self.ExpireTimeParam = None
self.TimeFormat = None
def _deserialize(self, params):
self.SecretKey = params.get("SecretKey")
self.SignParam = params.get("SignParam")
self.AclSignParam = params.get("AclSignParam")
self.StartTimeParam = params.get("StartTimeParam")
self.ExpireTimeParam = params.get("ExpireTimeParam")
self.TimeFormat = params.get("TimeFormat")
class AdvancedAuthenticationTypeF(AbstractModel):
"""时间戳防盗链高级鉴权模式TypeF配置
"""
def __init__(self):
"""
:param SignParam: uri串中签名的字段名,字母,数字或下划线构成,同时必须以字母开头。
注意:此字段可能返回 null,表示取不到有效值。
:type SignParam: str
:param TimeParam: uri串中时间的字段名,字母,数字或下划线构成,同时必须以字母开头。
注意:此字段可能返回 null,表示取不到有效值。
:type TimeParam: str
:param TransactionParam: uri串中Transaction字段名,字母,数字或下划线构成,同时必须以字母开头。
注意:此字段可能返回 null,表示取不到有效值。
:type TransactionParam: str
:param SecretKey: 用于计算签名的主密钥,只允许字母和数字,长度6-32字节。
注意:此字段可能返回 null,表示取不到有效值。
:type SecretKey: str
:param BackupSecretKey: 用于计算签名的备选密钥,主密钥校验失败后再次尝试备选密钥,只允许字母和数字,长度6-32字节。
注意:此字段可能返回 null,表示取不到有效值。
:type BackupSecretKey: str
"""
self.SignParam = None
self.TimeParam = None
self.TransactionParam = None
self.SecretKey = None
self.BackupSecretKey = None
def _deserialize(self, params):
self.SignParam = params.get("SignParam")
self.TimeParam = params.get("TimeParam")
self.TransactionParam = params.get("TransactionParam")
self.SecretKey = params.get("SecretKey")
self.BackupSecretKey = params.get("BackupSecretKey")
class AdvancedCache(AbstractModel):
"""缓存过期配置高级版(功能灰度中,尚未全量)
注意:该版本不支持设置首页缓存规则
"""
def __init__(self):
"""
:param CacheRules: 缓存过期规则
注意:此字段可能返回 null,表示取不到有效值。
:type CacheRules: list of AdvanceCacheRule
:param IgnoreCacheControl: 强制缓存配置
on:开启
off:关闭
开启时,源站返回 no-cache、no-store 头部时,仍按照缓存过期规则进行节点缓存
默认为关闭状态
注意:此字段可能返回 null,表示取不到有效值。
:type IgnoreCacheControl: str
:param IgnoreSetCookie: 忽略源站的 Set-Cookie 头部
on:开启
off:关闭
默认为关闭状态
注意:此字段可能返回 null,表示取不到有效值。
:type IgnoreSetCookie: str
"""
self.CacheRules = None
self.IgnoreCacheControl = None
self.IgnoreSetCookie = None
def _deserialize(self, params):
if params.get("CacheRules") is not None:
self.CacheRules = []
for item in params.get("CacheRules"):
obj = AdvanceCacheRule()
obj._deserialize(item)
self.CacheRules.append(obj)
self.IgnoreCacheControl = params.get("IgnoreCacheControl")
self.IgnoreSetCookie = params.get("IgnoreSetCookie")
class Authentication(AbstractModel):
"""时间戳防盗链配置
"""
def __init__(self):
"""
:param Switch: 防盗链配置开关
on:开启
off:关闭
开启时必须且只配置一种模式,其余模式需要设置为 null
:type Switch: str
:param TypeA: 时间戳防盗链模式 A 配置
注意:此字段可能返回 null,表示取不到有效值。
:type TypeA: :class:`tencentcloud.cdn.v20180606.models.AuthenticationTypeA`
:param TypeB: 时间戳防盗链模式 B 配置(模式 B 后台升级中,暂时不支持配置)
注意:此字段可能返回 null,表示取不到有效值。
:type TypeB: :class:`tencentcloud.cdn.v20180606.models.AuthenticationTypeB`
:param TypeC: 时间戳防盗链模式 C 配置
注意:此字段可能返回 null,表示取不到有效值。
:type TypeC: :class:`tencentcloud.cdn.v20180606.models.AuthenticationTypeC`
:param TypeD: 时间戳防盗链模式 D 配置
注意:此字段可能返回 null,表示取不到有效值。
:type TypeD: :class:`tencentcloud.cdn.v20180606.models.AuthenticationTypeD`
"""
self.Switch = None
self.TypeA = None
self.TypeB = None
self.TypeC = None
self.TypeD = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
if params.get("TypeA") is not None:
self.TypeA = AuthenticationTypeA()
self.TypeA._deserialize(params.get("TypeA"))
if params.get("TypeB") is not None:
self.TypeB = AuthenticationTypeB()
self.TypeB._deserialize(params.get("TypeB"))
if params.get("TypeC") is not None:
self.TypeC = AuthenticationTypeC()
self.TypeC._deserialize(params.get("TypeC"))
if params.get("TypeD") is not None:
self.TypeD = AuthenticationTypeD()
self.TypeD._deserialize(params.get("TypeD"))
class AuthenticationTypeA(AbstractModel):
"""时间戳防盗链模式 A 配置
时间戳防盗链模式 A 的访问 URL 格式为:http://DomainName/Filename?sign=timestamp-rand-uid-md5hash
其中 timestamp 为十进制 UNIX 时间戳;
rand 为随机字符串,0 ~ 100 位大小写字母与数字组成;
uid 为 0;
md5hash:MD5(文件路径-timestamp-rand-uid-自定义密钥)
"""
def __init__(self):
"""
:param SecretKey: 计算签名的密钥
仅允许大小写字母与数字,长度 6~32 位
注意:此字段可能返回 null,表示取不到有效值。
:type SecretKey: str
:param SignParam: 签名参数名设置
仅允许大小写字母、数字或下划线,长度 1~100 位,不能以数字开头
:type SignParam: str
:param ExpireTime: 签名过期时间设置
单位为秒,最大可设置为 31536000
:type ExpireTime: int
:param FileExtensions: 鉴权/不做鉴权的文件扩展名列表设置
如果包含字符 * 则表示所有文件
:type FileExtensions: list of str
:param FilterType: whitelist:白名单,表示对除了 FileExtensions 列表之外的所有类型进行鉴权
blacklist:黑名单,表示仅对 FileExtensions 中的类型进行鉴权
:type FilterType: str
"""
self.SecretKey = None
self.SignParam = None
self.ExpireTime = None
self.FileExtensions = None
self.FilterType = None
def _deserialize(self, params):
self.SecretKey = params.get("SecretKey")
self.SignParam = params.get("SignParam")
self.ExpireTime = params.get("ExpireTime")
self.FileExtensions = params.get("FileExtensions")
self.FilterType = params.get("FilterType")
class AuthenticationTypeB(AbstractModel):
"""时间戳防盗链模式 B 配置(B 模式正在进行平台升级,暂不支持配置)
"""
def __init__(self):
"""
:param SecretKey: 计算签名的密钥
仅允许大小写字母与数字,长度 6~32 位
注意:此字段可能返回 null,表示取不到有效值。
:type SecretKey: str
:param ExpireTime: 签名过期时间设置
单位为秒,最大可设置为 31536000
:type ExpireTime: int
:param FileExtensions: 鉴权/不做鉴权的文件扩展名列表设置
如果包含字符 * 则表示所有文件
:type FileExtensions: list of str
:param FilterType: whitelist:白名单,表示对除了 FileExtensions 列表之外的所有类型进行鉴权
blacklist:黑名单,表示仅对 FileExtensions 中的类型进行鉴权
:type FilterType: str
"""
self.SecretKey = None
self.ExpireTime = None
self.FileExtensions = None
self.FilterType = None
def _deserialize(self, params):
self.SecretKey = params.get("SecretKey")
self.ExpireTime = params.get("ExpireTime")
self.FileExtensions = params.get("FileExtensions")
self.FilterType = params.get("FilterType")
class AuthenticationTypeC(AbstractModel):
"""时间戳防盗链模式 C 配置
时间戳防盗链模式 C 的访问 URL 格式为:http://DomainName/md5hash/timestamp/FileName
其中 timestamp 为十六进制 UNIX 时间戳;
md5hash:MD5(自定义密钥 + 文件路径 + timestamp)
"""
def __init__(self):
"""
:param SecretKey: 计算签名的密钥
仅允许大小写字母与数字,长度 6~32 位
注意:此字段可能返回 null,表示取不到有效值。
:type SecretKey: str
:param ExpireTime: 签名过期时间设置
单位为秒,最大可设置为 31536000
:type ExpireTime: int
:param FileExtensions: 鉴权/不做鉴权的文件扩展名列表设置
如果包含字符 * 则表示所有文件
:type FileExtensions: list of str
:param FilterType: whitelist:白名单,表示对除了 FileExtensions 列表之外的所有类型进行鉴权
blacklist:黑名单,表示仅对 FileExtensions 中的类型进行鉴权
:type FilterType: str
:param TimeFormat: 时间戳进制设置
dec:十进制
hex:十六进制
注意:此字段可能返回 null,表示取不到有效值。
:type TimeFormat: str
"""
self.SecretKey = None
self.ExpireTime = None
self.FileExtensions = None
self.FilterType = None
self.TimeFormat = None
def _deserialize(self, params):
self.SecretKey = params.get("SecretKey")
self.ExpireTime = params.get("ExpireTime")
self.FileExtensions = params.get("FileExtensions")
self.FilterType = params.get("FilterType")
self.TimeFormat = params.get("TimeFormat")
class AuthenticationTypeD(AbstractModel):
"""时间戳防盗链模式 D 配置
时间戳防盗链模式 D 的访问 URL 格式为:http://DomainName/FileName?sign=md5hash&t=timestamp
其中 timestamp 为十进制或十六进制 UNIX 时间戳;
md5hash:MD5(自定义密钥 + 文件路径 + timestamp)
"""
def __init__(self):
"""
:param SecretKey: 计算签名的密钥
仅允许大小写字母与数字,长度 6~32 位
注意:此字段可能返回 null,表示取不到有效值。
:type SecretKey: str
:param ExpireTime: 签名过期时间设置
单位为秒,最大可设置为 31536000
:type ExpireTime: int
:param FileExtensions: 鉴权/不做鉴权的文件扩展名列表设置
如果包含字符 * 则表示所有文件
:type FileExtensions: list of str
:param FilterType: whitelist:白名单,表示对除了 FileExtensions 列表之外的所有类型进行鉴权
blacklist:黑名单,表示仅对 FileExtensions 中的类型进行鉴权
:type FilterType: str
:param SignParam: 签名参数名设置
仅允许大小写字母、数字或下划线,长度 1~100 位,不能以数字开头
:type SignParam: str
:param TimeParam: 时间戳参数名设置
仅允许大小写字母、数字或下划线,长度 1~100 位,不能以数字开头
:type TimeParam: str
:param TimeFormat: 时间戳进制设置
dec:十进制
hex:十六进制
:type TimeFormat: str
"""
self.SecretKey = None
self.ExpireTime = None
self.FileExtensions = None
self.FilterType = None
self.SignParam = None
self.TimeParam = None
self.TimeFormat = None
def _deserialize(self, params):
self.SecretKey = params.get("SecretKey")
self.ExpireTime = params.get("ExpireTime")
self.FileExtensions = params.get("FileExtensions")
self.FilterType = params.get("FilterType")
self.SignParam = params.get("SignParam")
self.TimeParam = params.get("TimeParam")
self.TimeFormat = params.get("TimeFormat")
class AwsPrivateAccess(AbstractModel):
"""s3源站回源鉴权。
"""
def __init__(self):
"""
:param Switch: 开关,on/off。
:type Switch: str
:param AccessKey: 访问ID。
注意:此字段可能返回 null,表示取不到有效值。
:type AccessKey: str
:param SecretKey: 密钥。
注意:此字段可能返回 null,表示取不到有效值。
:type SecretKey: str
"""
self.Switch = None
self.AccessKey = None
self.SecretKey = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.AccessKey = params.get("AccessKey")
self.SecretKey = params.get("SecretKey")
class BandwidthAlert(AbstractModel):
"""带宽封顶配置,默认为关闭状态
"""
def __init__(self):
"""
:param Switch: 带宽封顶配置开关
on:开启
off:关闭
:type Switch: str
:param BpsThreshold: 带宽封顶阈值,单位为bps
注意:此字段可能返回 null,表示取不到有效值。
:type BpsThreshold: int
:param CounterMeasure: 达到阈值后的操作
RESOLVE_DNS_TO_ORIGIN:直接回源,仅自有源站域名支持
RETURN_404:全部请求返回 404
注意:此字段可能返回 null,表示取不到有效值。
:type CounterMeasure: str
:param LastTriggerTime: 上次触发带宽封顶阈值的时间
注意:此字段可能返回 null,表示取不到有效值。
:type LastTriggerTime: str
"""
self.Switch = None
self.BpsThreshold = None
self.CounterMeasure = None
self.LastTriggerTime = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.BpsThreshold = params.get("BpsThreshold")
self.CounterMeasure = params.get("CounterMeasure")
self.LastTriggerTime = params.get("LastTriggerTime")
class BotCookie(AbstractModel):
"""Bot cookie策略
"""
def __init__(self):
"""
:param Switch: on|off
:type Switch: str
:param RuleType: 规则类型,当前只有all
:type RuleType: str
:param RuleValue: 规则值,['*']
:type RuleValue: list of str
:param Action: 执行动作,monitor|intercept|redirect|captcha
:type Action: str
:param RedirectUrl: 重定向时设置的重定向页面
注意:此字段可能返回 null,表示取不到有效值。
:type RedirectUrl: str
:param UpdateTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: str
"""
self.Switch = None
self.RuleType = None
self.RuleValue = None
self.Action = None
self.RedirectUrl = None
self.UpdateTime = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.RuleType = params.get("RuleType")
self.RuleValue = params.get("RuleValue")
self.Action = params.get("Action")
self.RedirectUrl = params.get("RedirectUrl")
self.UpdateTime = params.get("UpdateTime")
class BotJavaScript(AbstractModel):
"""Bot js策略
"""
def __init__(self):
"""
:param Switch: on|off
:type Switch: str
:param RuleType: 规则类型,当前只有file
:type RuleType: str
:param RuleValue: 规则值,['html', 'htm']
:type RuleValue: list of str
:param Action: 执行动作,monitor|intercept|redirect|captcha
:type Action: str
:param RedirectUrl: 重定向时设置的重定向页面
注意:此字段可能返回 null,表示取不到有效值。
:type RedirectUrl: str
:param UpdateTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: str
"""
self.Switch = None
self.RuleType = None
self.RuleValue = None
self.Action = None
self.RedirectUrl = None
self.UpdateTime = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.RuleType = params.get("RuleType")
self.RuleValue = params.get("RuleValue")
self.Action = params.get("Action")
self.RedirectUrl = params.get("RedirectUrl")
self.UpdateTime = params.get("UpdateTime")
class BriefDomain(AbstractModel):
"""域名基础配置信息,含 CNAME、状态、业务类型、加速区域、创建时间、更新时间、源站配置等。
"""
def __init__(self):
"""
:param ResourceId: 域名 ID
:type ResourceId: str
:param AppId: 腾讯云账号 ID
:type AppId: int
:param Domain: 加速域名
:type Domain: str
:param Cname: 域名对应的 CNAME 地址
:type Cname: str
:param Status: 加速服务状态
rejected:域名审核未通过,域名备案过期/被注销导致
processing:部署中
online:已启动
offline:已关闭
:type Status: str
:param ProjectId: 项目 ID,可前往腾讯云项目管理页面查看
:type ProjectId: int
:param ServiceType: 域名业务类型
web:静态加速
download:下载加速
media:流媒体点播加速
:type ServiceType: str
:param CreateTime: 域名创建时间
:type CreateTime: | |
import os
import glob
import scipy
import paddle
import random
import numpy as np
import json
import paddle.vision.transforms.functional as F
from paddle.io import DataLoader,Dataset
from paddle.vision import transforms
from PIL import Image
# from scipy.misc import imread
import skimage
from skimage.feature import canny
from skimage.color import rgb2gray, gray2rgb
from .utils import create_mask, mask_generation_with_BB, imread, random_size
from scipy import ndimage
Image.MAX_IMAGE_PIXELS = 1000000000
class Dataset(paddle.io.Dataset):
def __init__(self, config, afile, augment=True, training=True):
"""
Args:
gt_list: groundtruth list
"""
super(Dataset, self).__init__()
self.augment = augment
self.training = training
self.data = self.load_data(afile, config.WORD_BB_PERCENT_THRESHOLD)
self._mask_pad = config.MASK_PAD
self._mask_safe_pad = config.MASK_SAFE_PAD
self._mask_pad_update_step = config.MASK_PAD_UPDATE_STEP
self.input_size = config.INPUT_SIZE
self.sigma = config.SIGMA
self.mask = config.MASK
self.mask_threshold = config.MASK_THRESHOLD
self.nms = config.NMS
self._count = 0
self.backup_item = None
# in test mode, there's a one-to-one relationship between mask and image
# masks are loaded non random
if config.MODE == 2:
self.mask = 7
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.load_item(index)
'''
try:
item = self.load_item(index)
#item = self.load_item_SCUT(index)
#item = self.load_item_raindrop(index)
self.backup_item = item
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
#print(e.__traceback__)
"""Handling errors introduced by random mask generation step introduced in dataloader."""
print('loading error: item ' + str(index))
# item = self.__getitem__(index+1)
if self.backup_item is not None:
item = self.backup_item
else:
item = self.__getitem__(index + 1)
'''
return item
def load_name(self, index):
name = self.data[index]['dir']
return os.path.basename(name)
def load_item(self, index):
self._count += 1
size = self.input_size
# load image
img = imread(self.data[index]['dir'])
if os.path.exists(self.data[index]['gt_dir']):
img_gt = imread(self.data[index]['gt_dir'])
else:
img_gt = imread(self.data[index]['gt_dir'].split(".")[0] + '.png')
#print(img.shape,img_gt.shape)
# gray to rgb
if len(img.shape) < 3:
img = gray2rgb(img)
if len(img_gt.shape) < 3:
img_gt = gray2rgb(img_gt)
# load mask
masks_pad, masks_gt = self.load_mask(img, index)
#print(mask_pad.shape,mask_gt.shape)
# resize/crop if needed
if size != 0:
img = self.resize(img.astype(np.uint8), size, size)
img_gt = self.resize(img_gt.astype(np.uint8), size, size)
masks_pad = self.resize(masks_pad.astype(np.uint8), size, size)
masks_gt = self.resize(masks_gt.astype(np.uint8), size, size)
if np.mean(masks_pad) == 0 or np.mean(masks_gt) ==0:
raise
if img.shape != img_gt.shape:
img_gt = self.resize(img_gt.astype(np.uint8), img.shape[0], img.shape[1])
# print(masks_pad.shape, img_gt.shape, img.shape)
# augment data: horizontal flip
if self.augment and np.random.binomial(1, 0.5) > 0:
img = img[:, ::-1, ...]
img_gt = img_gt[:, ::-1, ...]
masks_pad = masks_pad[:, ::-1, ...]
masks_gt = masks_gt[:, ::-1, ...]
#print(img.shape)
img=self.normalize(img)
img_gt=self.normalize(img_gt)
masks_pad=self.normalize(masks_pad)
masks_gt=self.normalize(masks_gt)
# !!! Has to change the type, UINT8 subtraction is different
masks_refine_gt = np.greater(np.mean(np.abs(img.astype(np.float32) - img_gt.astype(np.float32)), axis=-1),
self.mask_threshold).astype(np.uint8)
masks_refine_gt = np.multiply(masks_refine_gt, masks_gt)
img_gt = np.multiply(np.expand_dims((1-masks_gt), -1), img) + np.multiply(np.expand_dims(masks_gt, -1), img_gt)
#print(img_gt.shape)
#print(img.shape,img_gt.shape,masks_pad.shape,masks_gt.shape,masks_refine_gt.shape)
return self.to_tensor(img), self.to_tensor(img_gt), \
self.to_tensor((masks_pad).astype(np.float32)), self.to_tensor((masks_gt).astype(np.float32)), \
self.to_tensor((masks_refine_gt).astype(np.float32))
# def load_item_raindrop(self, index):
# self._count += 1
# size = self.input_size
#
# # load image
# img = imread(self.data[index]['dir'])
# if os.path.exists(self.data[index]['gt_dir']):
# img_gt = imread(self.data[index]['gt_dir'])
# else:
# img_gt = imread(self.data[index]['gt_dir'].split(".")[0] + '.png', mode='RGB')
#
# # random crop
# left, rigth = random_size(img.shape[0], 0.8)
# top, bottom = random_size(img.shape[1], 0.8)
# img = img[left:rigth, top: bottom, :]
# img_gt = img_gt[left:rigth, top: bottom, :]
# #
#
# # gray to rgb
# if len(img.shape) < 3:
# img = gray2rgb(img)
#
# if len(img_gt.shape) < 3:
# img_gt = gray2rgb(img_gt)
#
# # resize/crop if needed
# if size != 0:
# img = self.resize(img.astype(np.uint8), size, size)
# img_gt = self.resize(img_gt.astype(np.uint8), size, size)
#
# # load mask
# masks_pad, masks_gt = self.load_mask(img, index)
# masks_pad = masks_pad / 255
# masks_gt = masks_gt / 255
#
# if np.mean(masks_pad) == 0 or np.mean(masks_gt) ==0:
# raise
#
# if img.shape != img_gt.shape:
# img_gt = self.resize(img_gt.astype(np.uint8), img.shape[0], img.shape[1])
# # print(masks_pad.shape, img_gt.shape, img.shape)
#
# # augment data: horizontal flip
# if self.augment and np.random.binomial(1, 0.5) > 0:
# img = img[:, ::-1, ...]
# img_gt = img_gt[:, ::-1, ...]
# masks_pad = masks_pad[:, ::-1, ...]
# masks_gt = masks_gt[:, ::-1, ...]
#
# # augment data: vertical flip
# if self.augment and np.random.binomial(1, 0.5) > 0:
# img = img[::-1, ...]
# img_gt = img_gt[::-1, ...]
# masks_pad = masks_pad[::-1, ...]
# masks_gt = masks_gt[::-1, ...]
#
# # # !!! Has to change the type, UINT8 subtraction is different
# # diff = np.mean(np.abs(img_gt.astype(np.float32) - img.astype(np.float32)), axis=-1)
# # mask_threshold = np.mean(diff)
# # masks_refine_gt = np.greater(diff, mask_threshold).astype(np.uint8)
# #
# # # Remove small white regions
# # open_img = ndimage.binary_opening(masks_refine_gt)
# # # Remove small black hole
# # masks_refine_gt = ndimage.binary_closing(open_img)
#
# # masks_refine_gt = np.multiply(masks_refine_gt, masks_gt)
# # img_gt = np.multiply(np.expand_dims((1-masks_gt), -1), img) + np.multiply(np.expand_dims(masks_gt, -1), img_gt)
# # img_gt = img_gt.astype(np.uint8)
#
# masks_refine_gt = masks_gt
#
# return self.to_tensor(img), self.to_tensor(img_gt), \
# self.to_tensor(masks_pad.astype(np.float64)), self.to_tensor(masks_gt.astype(np.float64)), \
# self.to_tensor(masks_refine_gt.astype(np.float64))
def load_item_raindrop(self, index):
self._count += 1
size = self.input_size
#print(self.data)
# load image
img = imread(self.data[index]['dir'])
if os.path.exists(self.data[index]['gt_dir']):
img_gt = imread(self.data[index]['gt_dir'])
else:
img_gt = imread(self.data[index]['gt_dir'].split(".")[0] + '.png', mode='RGB')
# gray to rgb
if len(img.shape) < 3:
img = gray2rgb(img)
if len(img_gt.shape) < 3:
img_gt = gray2rgb(img_gt)
# resize/crop if needed
if size != 0:
img = self.resize(img.astype(np.uint8), size, size)
img_gt = self.resize(img_gt.astype(np.uint8), size, size)
# load mask
masks_pad, masks_gt = self.load_mask(img, index)
masks_pad = masks_pad / 255
masks_gt = masks_gt / 255
if np.mean(masks_pad) == 0 or np.mean(masks_gt) ==0:
raise
if img.shape != img_gt.shape:
img_gt = self.resize(img_gt.astype(np.uint8), img.shape[0], img.shape[1])
#print(masks_pad.shape, img_gt.shape, img.shape)
masks_refine_gt = masks_gt
return self.to_tensor(img), self.to_tensor(img_gt), \
self.to_tensor(masks_pad.astype(np.float64)), self.to_tensor(masks_gt.astype(np.float64)), \
self.to_tensor(masks_refine_gt.astype(np.float64))
def load_item_SCUT(self, index):
self._count += 1
size = self.input_size
# load image
img = imread(self.data[index]['dir'])
if os.path.exists(self.data[index]['gt_dir']):
img_gt = imread(self.data[index]['gt_dir'])
else:
img_gt = imread(self.data[index]['gt_dir'].split(".")[0] + '.png', mode='RGB')
# gray to rgb
if len(img.shape) < 3:
img = gray2rgb(img)
if len(img_gt.shape) < 3:
img_gt = gray2rgb(img_gt)
# load mask
imgh, imgw = img.shape[0:2]
masks_pad = np.ones([imgh, imgw])
# resize/crop if needed
if size != 0:
img = self.resize(img.astype(np.uint8), size, size)
img_gt = self.resize(img_gt.astype(np.uint8), size, size)
masks_pad = self.resize(masks_pad.astype(np.uint8), size, size)
if img.shape != img_gt.shape:
img_gt = self.resize(img_gt.astype(np.uint8), img.shape[0], img.shape[1])
# print(masks_pad.shape, img_gt.shape, img.shape)
# augment data: horizontal flip
if self.augment and np.random.binomial(1, 0.5) > 0:
img = img[:, ::-1, ...]
img_gt = img_gt[:, ::-1, ...]
masks_pad = masks_pad[:, ::-1, ...]
# !!! Has to change the type, UINT8 subtraction is different
masks_refine_gt = np.greater(np.mean(np.abs(img.astype(np.float32) - img_gt.astype(np.float32)), axis=-1),
self.mask_threshold).astype(np.uint8)
masks_gt = masks_refine_gt
return self.to_tensor(img), self.to_tensor(img_gt), \
self.to_tensor(masks_pad.astype(np.float64)), self.to_tensor(masks_gt.astype(np.float64)), \
self.to_tensor(masks_refine_gt.astype(np.float64))
def load_mask(self, img, index):
imgh, imgw = img.shape[0:2]
mask_type = self.mask
#print(mask_type)
# external + random block
if mask_type == 4:
mask_type = 1 if np.random.binomial(1, 0.5) == 1 else 3
# external + random block + half
elif mask_type == 5:
mask_type = np.random.randint(1, 4)
# random block
if mask_type == 1:
mask = create_mask(imgw, imgh, imgw // 2, imgh // 2)
return mask,mask
if mask_type == 8:
# print(imgw, imgh)
# x = random.randint(imgw//4, imgw)
# y = random.randint(imgh//4, imgh)
# mask = create_mask(imgw, imgh, x, y)
# if np.random.binomial(1, 0.1) > 0:
# mask = np.ones_like(mask)
mask = np.ones([imgw, imgh])
mask = (mask * 255).astype(np.uint8)
return mask,mask
# half
if mask_type == 2:
# randomly choose right or left
return create_mask(imgw, imgh, imgw // 2, imgh, 0 if random.random() < 0.5 else imgw // 2, 0)
# external
if mask_type == 3:
mask_index = random.randint(0, len(self.mask_data) - 1)
mask = imread(self.mask_data[mask_index])
mask = self.resize(mask, imgh, imgw)
mask = (mask > 0).astype(np.uint8) * 255 # threshold due to interpolation
return mask
# test mode: load mask non random
if mask_type == 6:
mask = imread(self.mask_data[index])
mask = self.resize(mask, imgh, imgw, centerCrop=False)
mask = rgb2gray(mask)
mask = (mask > 0).astype(np.uint8) * 255
return mask
if mask_type == 7:
bbox = np.array(self.data[index]['word_bb'])
max_pad = np.max([imgh, imgw])
if self._mask_pad == -1:
# coefficient = 1
# pad = coefficient*self._count//self._mask_pad_update_step
# if pad > np.max(self.input_size+coefficient):
# pad = np.random.randint(0, np.max(self.input_size), 1)[0]
# elif pad == 0:
# pad = 0
# else:
# pad = np.random.randint(0, pad)
if np.random.binomial(1, 0.1) > 0:
pad = max_pad
else:
pad = np.random.randint(self._mask_safe_pad, np.ceil(max_pad/2))
elif self._mask_pad == -2:
# pad = np.random.randint(2, self._mask_pad, 1)[0]
if self.data[index]['word_percent'] < 5:
pad = 20
elif self.data[index]['word_percent'] < 10:
pad | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_utils import uuidutils
import requests
import requests.auth
import six
from tempest.lib import exceptions
from aodhclient.tests.functional import base
class AodhClientTest(base.ClientTestBase):
def test_help(self):
self.aodh("help", params="alarm create")
self.aodh("help", params="alarm delete")
self.aodh("help", params="alarm list")
self.aodh("help", params="alarm show")
self.aodh("help", params="alarm update")
def test_alarm_id_or_name_scenario(self):
def _test(name):
params = "create --type event --name %s" % name
result = self.aodh('alarm', params=params)
alarm_id = self.details_multiple(result)[0]['alarm_id']
params = 'show %s' % name
result = self.aodh('alarm', params=params)
self.assertEqual(alarm_id,
self.details_multiple(result)[0]['alarm_id'])
params = 'show %s' % alarm_id
result = self.aodh('alarm', params=params)
self.assertEqual(alarm_id,
self.details_multiple(result)[0]['alarm_id'])
params = "update --state ok %s" % name
result = self.aodh('alarm', params=params)
self.assertEqual("ok", self.details_multiple(result)[0]['state'])
params = "update --state alarm %s" % alarm_id
result = self.aodh('alarm', params=params)
self.assertEqual("alarm",
self.details_multiple(result)[0]['state'])
params = "update --name another-name %s" % name
result = self.aodh('alarm', params=params)
self.assertEqual("another-name",
self.details_multiple(result)[0]['name'])
params = "update --name %s %s" % (name, alarm_id)
result = self.aodh('alarm', params=params)
self.assertEqual(name,
self.details_multiple(result)[0]['name'])
# Check update with no change is allowed
params = "update --name %s %s" % (name, name)
result = self.aodh('alarm', params=params)
self.assertEqual(name,
self.details_multiple(result)[0]['name'])
params = "update --state ok"
result = self.aodh('alarm', params=params,
fail_ok=True, merge_stderr=True)
self.assertFirstLineStartsWith(
result.splitlines(),
'You need to specify one of alarm ID and alarm name(--name) '
'to update an alarm.')
params = "delete %s" % name
result = self.aodh('alarm', params=params)
self.assertEqual("", result)
params = "create --type event --name %s" % name
result = self.aodh('alarm', params=params)
alarm_id = self.details_multiple(result)[0]['alarm_id']
params = "delete %s" % alarm_id
result = self.aodh('alarm', params=params)
self.assertEqual("", result)
_test(uuidutils.generate_uuid())
_test('normal-alarm-name')
def test_event_scenario(self):
PROJECT_ID = uuidutils.generate_uuid()
# CREATE
result = self.aodh(u'alarm',
params=(u"create --type event --name ev_alarm1 "
"--project-id %s" % PROJECT_ID))
alarm = self.details_multiple(result)[0]
ALARM_ID = alarm['alarm_id']
self.assertEqual('ev_alarm1', alarm['name'])
self.assertEqual('*', alarm['event_type'])
# UPDATE IGNORE INVALID
result = self.aodh(
'alarm', params=("update %s --severity critical --threshold 10"
% ALARM_ID))
alarm_updated = self.details_multiple(result)[0]
self.assertEqual(ALARM_ID, alarm_updated["alarm_id"])
self.assertEqual('critical', alarm_updated['severity'])
# UPDATE IGNORE INVALID
result = self.aodh(
'alarm', params=("update %s --event-type dummy" % ALARM_ID))
alarm_updated = self.details_multiple(result)[0]
self.assertEqual(ALARM_ID, alarm_updated["alarm_id"])
self.assertEqual('dummy', alarm_updated['event_type'])
# GET
result = self.aodh(
'alarm', params="show %s" % ALARM_ID)
alarm_show = self.details_multiple(result)[0]
self.assertEqual(ALARM_ID, alarm_show["alarm_id"])
self.assertEqual(PROJECT_ID, alarm_show["project_id"])
self.assertEqual('ev_alarm1', alarm_show['name'])
self.assertEqual('dummy', alarm_show['event_type'])
# GET BY NAME
result = self.aodh(
'alarm', params="show --name ev_alarm1")
alarm_show = self.details_multiple(result)[0]
self.assertEqual(ALARM_ID, alarm_show["alarm_id"])
self.assertEqual(PROJECT_ID, alarm_show["project_id"])
self.assertEqual('ev_alarm1', alarm_show['name'])
self.assertEqual('dummy', alarm_show['event_type'])
# GET BY NAME AND ID ERROR
self.assertRaises(exceptions.CommandFailed,
self.aodh, u'alarm',
params=(u"show %s --name ev_alarm1" %
ALARM_ID))
# LIST
result = self.aodh('alarm', params="list")
self.assertIn(ALARM_ID,
[r['alarm_id'] for r in self.parser.listing(result)])
output_colums = ['alarm_id', 'type', 'name', 'state', 'severity',
'enabled']
for alarm_list in self.parser.listing(result):
self.assertEqual(sorted(output_colums), sorted(alarm_list.keys()))
if alarm_list["alarm_id"] == ALARM_ID:
self.assertEqual('ev_alarm1', alarm_list['name'])
# LIST WITH QUERY
result = self.aodh('alarm',
params=("list --query project_id=%s" % PROJECT_ID))
alarm_list = self.parser.listing(result)[0]
self.assertEqual(ALARM_ID, alarm_list["alarm_id"])
self.assertEqual('ev_alarm1', alarm_list['name'])
# DELETE
result = self.aodh('alarm', params="delete %s" % ALARM_ID)
self.assertEqual("", result)
# GET FAIL
result = self.aodh('alarm', params="show %s" % ALARM_ID,
fail_ok=True, merge_stderr=True)
expected = "Alarm %s not found (HTTP 404)" % ALARM_ID
self.assertFirstLineStartsWith(result.splitlines(), expected)
# DELETE FAIL
result = self.aodh('alarm', params="delete %s" % ALARM_ID,
fail_ok=True, merge_stderr=True)
self.assertFirstLineStartsWith(result.splitlines(), expected)
# LIST DOES NOT HAVE ALARM
result = self.aodh('alarm', params="list")
self.assertNotIn(ALARM_ID,
[r['alarm_id'] for r in self.parser.listing(result)])
def test_threshold_scenario(self):
PROJECT_ID = uuidutils.generate_uuid()
# CREATE
result = self.aodh(u'alarm',
params=(u"create --type threshold --name alarm_th "
"-m meter_name --threshold 5 "
"--project-id %s" % PROJECT_ID))
alarm = self.details_multiple(result)[0]
ALARM_ID = alarm['alarm_id']
self.assertEqual('alarm_th', alarm['name'])
self.assertEqual('meter_name', alarm['meter_name'])
self.assertEqual('5.0', alarm['threshold'])
# CREATE WITH --TIME-CONSTRAINT
result = self.aodh(
u'alarm',
params=(u"create --type threshold --name alarm_tc "
"-m meter_name --threshold 5 "
"--time-constraint "
"name=cons1;start='0 11 * * *';duration=300 "
"--time-constraint "
"name=cons2;start='0 23 * * *';duration=600 "
"--project-id %s" % PROJECT_ID))
alarm = self.details_multiple(result)[0]
self.assertEqual('alarm_tc', alarm['name'])
self.assertEqual('meter_name', alarm['meter_name'])
self.assertEqual('5.0', alarm['threshold'])
self.assertIsNotNone(alarm['time_constraints'])
# CREATE FAIL MISSING PARAM
self.assertRaises(exceptions.CommandFailed,
self.aodh, u'alarm',
params=(u"create --type threshold --name alarm_th "
"--project-id %s" % PROJECT_ID))
# UPDATE
result = self.aodh(
'alarm', params=("update %s --severity critical --threshold 10"
% ALARM_ID))
alarm_updated = self.details_multiple(result)[0]
self.assertEqual(ALARM_ID, alarm_updated["alarm_id"])
self.assertEqual('critical', alarm_updated['severity'])
self.assertEqual('10.0', alarm_updated["threshold"])
# GET
result = self.aodh(
'alarm', params="show %s" % ALARM_ID)
alarm_show = self.details_multiple(result)[0]
self.assertEqual(ALARM_ID, alarm_show["alarm_id"])
self.assertEqual(PROJECT_ID, alarm_show["project_id"])
self.assertEqual('alarm_th', alarm_show['name'])
self.assertEqual('meter_name', alarm_show['meter_name'])
self.assertEqual('10.0', alarm_show['threshold'])
# GET BY NAME
result = self.aodh(
'alarm', params="show --name alarm_th")
alarm_show = self.details_multiple(result)[0]
self.assertEqual(ALARM_ID, alarm_show["alarm_id"])
self.assertEqual(PROJECT_ID, alarm_show["project_id"])
self.assertEqual('alarm_th', alarm_show['name'])
self.assertEqual('meter_name', alarm_show['meter_name'])
self.assertEqual('10.0', alarm_show['threshold'])
# GET BY NAME AND ID ERROR
self.assertRaises(exceptions.CommandFailed,
self.aodh, u'alarm',
params=(u"show %s --name alarm_th" %
ALARM_ID))
# LIST
result = self.aodh('alarm', params="list")
self.assertIn(ALARM_ID,
[r['alarm_id'] for r in self.parser.listing(result)])
output_colums = ['alarm_id', 'type', 'name', 'state', 'severity',
'enabled']
for alarm_list in self.parser.listing(result):
self.assertEqual(sorted(output_colums), sorted(alarm_list.keys()))
if alarm_list["alarm_id"] == ALARM_ID:
self.assertEqual('alarm_th', alarm_list['name'])
# LIST WITH PAGINATION
# list with limit
result = self.aodh('alarm',
params="list --limit 1")
alarm_list = self.parser.listing(result)
self.assertEqual(1, len(alarm_list))
# list with sort with key=name dir=asc
result = self.aodh('alarm',
params="list --sort name:asc")
names = [r['name'] for r in self.parser.listing(result)]
sorted_name = sorted(names)
self.assertEqual(sorted_name, names)
# list with sort with key=name dir=asc and key=alarm_id dir=asc
result = self.aodh(u'alarm',
params=(u"create --type threshold --name alarm_th "
"-m meter_name --threshold 5 "
"--project-id %s" % PROJECT_ID))
created_alarm_id = self.details_multiple(result)[0]['alarm_id']
result = self.aodh('alarm',
params="list --sort name:asc --sort alarm_id:asc")
alarm_list = self.parser.listing(result)
ids_with_same_name = []
names = []
for alarm in alarm_list:
names.append(['alarm_name'])
if alarm['name'] == 'alarm_th':
ids_with_same_name.append(alarm['alarm_id'])
sorted_ids = sorted(ids_with_same_name)
sorted_names = sorted(names)
self.assertEqual(sorted_names, names)
self.assertEqual(sorted_ids, ids_with_same_name)
# list with sort with key=name dir=desc and with the marker equal to
# the alarm_id of the alarm_th we created for this test.
result = self.aodh('alarm',
params="list --sort name:desc "
"--marker %s" % created_alarm_id)
self.assertIn('alarm_tc',
[r['name'] for r in self.parser.listing(result)])
self.aodh('alarm', params="delete %s" % created_alarm_id)
# LIST WITH QUERY
result = self.aodh('alarm',
params=("list --query project_id=%s" % PROJECT_ID))
alarm_list = self.parser.listing(result)[0]
self.assertEqual(ALARM_ID, alarm_list["alarm_id"])
self.assertEqual('alarm_th', alarm_list['name'])
# DELETE
result = self.aodh('alarm', params="delete %s" % ALARM_ID)
self.assertEqual("", result)
# GET FAIL
result = self.aodh('alarm', params="show %s" % ALARM_ID,
fail_ok=True, merge_stderr=True)
expected = "Alarm %s not found (HTTP 404)" % ALARM_ID
self.assertFirstLineStartsWith(result.splitlines(), expected)
# DELETE FAIL
result = self.aodh('alarm', params="delete %s" % ALARM_ID,
fail_ok=True, merge_stderr=True)
self.assertFirstLineStartsWith(result.splitlines(), expected)
# LIST DOES NOT HAVE ALARM
result = self.aodh('alarm', params="list")
self.assertNotIn(ALARM_ID,
[r['alarm_id'] for r in self.parser.listing(result)])
def test_composite_scenario(self):
project_id = uuidutils.generate_uuid()
# CREATE
result = self.aodh(u'alarm',
params=(u'create --type composite --name calarm1 '
' --composite-rule \'{"or":[{"threshold"'
': 0.8,"meter_name": "cpu_util",'
'"type": "threshold"},{"and": ['
'{"threshold": 200, "meter_name": '
'"disk.iops", "type": "threshold"},'
'{"threshold": 1000,"meter_name":'
'"network.incoming.packets.rate",'
'"type": "threshold"}]}]}\' '
'--project-id %s' % project_id))
alarm = self.details_multiple(result)[0]
alarm_id = alarm['alarm_id']
self.assertEqual('calarm1', alarm['name'])
self.assertEqual('composite', alarm['type'])
self.assertIn('composite_rule', alarm)
# CREATE FAIL MISSING PARAM
self.assertRaises(exceptions.CommandFailed,
self.aodh, u'alarm',
params=(u"create --type composite --name calarm1 "
"--project-id %s" % project_id))
# UPDATE
result = self.aodh(
'alarm', params=("update %s --severity critical" % alarm_id))
alarm_updated = self.details_multiple(result)[0]
self.assertEqual(alarm_id, alarm_updated["alarm_id"])
self.assertEqual('critical', alarm_updated['severity'])
# GET
result = self.aodh(
'alarm', params="show %s" % alarm_id)
alarm_show = self.details_multiple(result)[0]
self.assertEqual(alarm_id, alarm_show["alarm_id"])
self.assertEqual(project_id, alarm_show["project_id"])
self.assertEqual('calarm1', alarm_show['name'])
# GET BY NAME
result = self.aodh(
'alarm', params="show --name calarm1")
alarm_show = self.details_multiple(result)[0]
self.assertEqual(alarm_id, alarm_show["alarm_id"])
self.assertEqual(project_id, alarm_show["project_id"])
self.assertEqual('calarm1', alarm_show['name'])
# GET BY NAME AND ID ERROR
self.assertRaises(exceptions.CommandFailed,
self.aodh, u'alarm',
params=(u"show %s --name calarm1" %
alarm_id))
# LIST
result = self.aodh('alarm', params="list")
self.assertIn(alarm_id,
[r['alarm_id'] for r in self.parser.listing(result)])
output_colums = ['alarm_id', 'type', 'name', 'state', 'severity',
'enabled']
for alarm_list in self.parser.listing(result):
self.assertEqual(sorted(output_colums), sorted(alarm_list.keys()))
if alarm_list["alarm_id"] == alarm_id:
self.assertEqual('calarm1', alarm_list['name'])
# LIST WITH QUERY
result = self.aodh('alarm',
params=("list --query project_id=%s" % project_id))
alarm_list = self.parser.listing(result)[0]
self.assertEqual(alarm_id, alarm_list["alarm_id"])
self.assertEqual('calarm1', alarm_list['name'])
# DELETE
result = self.aodh('alarm', params="delete %s" % alarm_id)
self.assertEqual("", result)
# GET FAIL
result = self.aodh('alarm', params="show %s" % alarm_id,
fail_ok=True, merge_stderr=True)
expected = "Alarm %s not found (HTTP 404)" % alarm_id
self.assertFirstLineStartsWith(result.splitlines(), expected)
# DELETE FAIL
result = self.aodh('alarm', params="delete %s" % alarm_id,
fail_ok=True, merge_stderr=True)
self.assertFirstLineStartsWith(result.splitlines(), expected)
# LIST DOES NOT HAVE ALARM
result = self.aodh('alarm', params="list")
self.assertNotIn(alarm_id,
[r['alarm_id'] for r in self.parser.listing(result)])
| |
<filename>qiskit/visualization/text.py
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
A module for drawing circuits in ascii art or some other text representation
"""
from shutil import get_terminal_size
import sys
from numpy import ndarray
from qiskit.circuit import ControlledGate
from .tools.pi_check import pi_check
from .exceptions import VisualizationError
class DrawElement():
""" An element is an instruction or an operation that need to be drawn."""
def __init__(self, label=None):
self._width = None
self.label = self.mid_content = label
self.top_format = self.mid_format = self.bot_format = "%s"
self.top_connect = self.bot_connect = " "
self.top_pad = self._mid_padding = self.bot_pad = " "
self.mid_bck = self.top_bck = self.bot_bck = " "
self.bot_connector = {}
self.top_connector = {}
self.right_fill = self.left_fill = self.layer_width = 0
self.wire_label = ""
@property
def top(self):
""" Constructs the top line of the element"""
ret = self.top_format % self.top_connect.center(
self.width, self.top_pad)
if self.right_fill:
ret = ret.ljust(self.right_fill, self.top_pad)
if self.left_fill:
ret = ret.rjust(self.left_fill, self.top_pad)
ret = ret.center(self.layer_width, self.top_bck)
return ret
@property
def mid(self):
""" Constructs the middle line of the element"""
ret = self.mid_format % self.mid_content.center(
self.width, self._mid_padding)
if self.right_fill:
ret = ret.ljust(self.right_fill, self._mid_padding)
if self.left_fill:
ret = ret.rjust(self.left_fill, self._mid_padding)
ret = ret.center(self.layer_width, self.mid_bck)
return ret
@property
def bot(self):
""" Constructs the bottom line of the element"""
ret = self.bot_format % self.bot_connect.center(
self.width, self.bot_pad)
if self.right_fill:
ret = ret.ljust(self.right_fill, self.bot_pad)
if self.left_fill:
ret = ret.rjust(self.left_fill, self.bot_pad)
ret = ret.center(self.layer_width, self.bot_bck)
return ret
@property
def length(self):
""" Returns the length of the element, including the box around."""
return max(len(self.top), len(self.mid), len(self.bot))
@property
def width(self):
""" Returns the width of the label, including padding"""
if self._width:
return self._width
return len(self.mid_content)
@width.setter
def width(self, value):
self._width = value
def connect(self, wire_char, where, label=None):
"""Connects boxes and elements using wire_char and setting proper connectors.
Args:
wire_char (char): For example '║' or '│'.
where (list["top", "bot"]): Where the connector should be set.
label (string): Some connectors have a label (see cu1, for example).
"""
if 'top' in where and self.top_connector:
self.top_connect = self.top_connector[wire_char]
if 'bot' in where and self.bot_connector:
self.bot_connect = self.bot_connector[wire_char]
if label:
self.top_format = self.top_format[:-1] + (label if label else "")
class BoxOnClWire(DrawElement):
"""Draws a box on the classical wire.
::
top: ┌───┐ ┌───┐
mid: ╡ A ╞ ══╡ A ╞══
bot: └───┘ └───┘
"""
def __init__(self, label="", top_connect='─', bot_connect='─'):
super().__init__(label)
self.top_format = "┌─%s─┐"
self.mid_format = "╡ %s ╞"
self.bot_format = "└─%s─┘"
self.top_pad = self.bot_pad = '─'
self.mid_bck = '═'
self.top_connect = top_connect
self.bot_connect = bot_connect
self.mid_content = label
class BoxOnQuWire(DrawElement):
"""Draws a box on the quantum wire.
::
top: ┌───┐ ┌───┐
mid: ┤ A ├ ──┤ A ├──
bot: └───┘ └───┘
"""
def __init__(self, label="", top_connect='─', conditional=False):
super().__init__(label)
self.top_format = "┌─%s─┐"
self.mid_format = "┤ %s ├"
self.bot_format = "└─%s─┘"
self.top_pad = self.bot_pad = self.mid_bck = '─'
self.top_connect = top_connect
self.bot_connect = '┬' if conditional else '─'
self.mid_content = label
self.top_connector = {"│": '┴'}
self.bot_connector = {"│": '┬'}
class MeasureTo(DrawElement):
"""The element on the classic wire to which the measure is performed.
::
top: ║ ║
mid: ═╩═ ═══╩═══
bot:
"""
def __init__(self):
super().__init__()
self.top_connect = " ║ "
self.mid_content = "═╩═"
self.bot_connect = " "
self.mid_bck = "═"
class MeasureFrom(BoxOnQuWire):
"""The element on the quantum wire in which the measure is performed.
::
top: ┌─┐ ┌─┐
mid: ┤M├ ───┤M├───
bot: └╥┘ └╥┘
"""
def __init__(self):
super().__init__()
self.top_format = self.mid_format = self.bot_format = "%s"
self.top_connect = "┌─┐"
self.mid_content = "┤M├"
self.bot_connect = "└╥┘"
self.top_pad = self.bot_pad = " "
self._mid_padding = '─'
class MultiBox(DrawElement):
"""Elements that is draw on over multiple wires."""
def center_label(self, input_length, order):
"""In multi-bit elements, the label is centered vertically.
Args:
input_length (int): Rhe amount of wires affected.
order (int): Which middle element is this one?
"""
if input_length == order == 0:
self.top_connect = self.label
return
location_in_the_box = '*'.center(input_length * 2 - 1).index('*') + 1
top_limit = order * 2 + 2
bot_limit = top_limit + 2
if top_limit <= location_in_the_box < bot_limit:
if location_in_the_box == top_limit:
self.top_connect = self.label
elif location_in_the_box == top_limit + 1:
self.mid_content = self.label
else:
self.bot_connect = self.label
@property
def width(self):
""" Returns the width of the label, including padding"""
if self._width:
return self._width
return len(self.label)
class BoxOnQuWireTop(MultiBox, BoxOnQuWire):
""" Draws the top part of a box that affects more than one quantum wire"""
def __init__(self, label="", top_connect=None, wire_label=''):
super().__init__(label)
self.wire_label = wire_label
self.bot_connect = self.bot_pad = " "
self.mid_content = "" # The label will be put by some other part of the box.
self.left_fill = len(self.wire_label)
self.top_format = "┌{}─%s─┐".format(self.top_pad * self.left_fill)
self.mid_format = "┤{} %s ├".format(self.wire_label)
self.bot_format = "│{} %s │".format(self.bot_pad * self.left_fill)
self.top_connect = top_connect if top_connect else '─'
class BoxOnWireMid(MultiBox):
""" A generic middle box"""
def __init__(self, label, input_length, order, wire_label=''):
super().__init__(label, input_length, order)
self.top_pad = self.bot_pad = self.top_connect = self.bot_connect = " "
self.wire_label = wire_label
self.left_fill = len(self.wire_label)
self.top_format = "│{} %s │".format(self.top_pad * self.left_fill)
self.bot_format = "│{} %s │".format(self.bot_pad * self.left_fill)
self.top_connect = self.bot_connect = self.mid_content = ''
self.center_label(input_length, order)
class BoxOnQuWireMid(BoxOnWireMid, BoxOnQuWire):
""" Draws the middle part of a box that affects more than one quantum wire"""
def __init__(self, label, input_length, order, wire_label=''):
super().__init__(label, input_length, order, wire_label=wire_label)
self.mid_format = "┤{} %s ├".format(self.wire_label)
class BoxOnQuWireBot(MultiBox, BoxOnQuWire):
""" Draws the bottom part of a box that affects more than one quantum wire"""
def __init__(self, label, input_length, bot_connect='─', wire_label='', conditional=False):
super().__init__(label)
self.wire_label = wire_label
self.top_pad = " "
self.left_fill = len(self.wire_label)
self.top_format = "│{} %s │".format(self.top_pad * self.left_fill)
self.mid_format = "┤{} %s ├".format(self.wire_label)
self.bot_format = "└{}%s──┘".format(self.bot_pad * self.left_fill)
self.bot_connect = '┬' if conditional else bot_connect
self.mid_content = self.top_connect = ""
if input_length <= 2:
self.top_connect = label
class BoxOnClWireTop(MultiBox, BoxOnClWire):
""" Draws the top part of a conditional box that affects more than one classical wire"""
def __init__(self, label="", top_connect=None, wire_label=''):
super().__init__(label)
self.wire_label = wire_label
self.mid_content = "" # The label will be put by some other part of the box.
self.bot_format = "│ %s │"
self.top_connect = top_connect if top_connect else '─'
self.bot_connect = self.bot_pad = " "
class BoxOnClWireMid(BoxOnWireMid, BoxOnClWire):
""" Draws the middle part of a conditional box that affects more than one classical wire"""
def __init__(self, label, input_length, order, wire_label=''):
super().__init__(label, input_length, order, wire_label=wire_label)
self.mid_format = "╡{} %s ╞".format(self.wire_label)
class BoxOnClWireBot(MultiBox, BoxOnClWire):
""" Draws the bottom part of a conditional box that affects more than one classical wire"""
def __init__(self, label, input_length, bot_connect='─', wire_label='', **_):
super().__init__(label)
self.wire_label = wire_label
self.left_fill = len(self.wire_label)
self.top_pad = ' '
self.bot_pad = '─'
self.top_format = "│{} %s │".format(self.top_pad * self.left_fill)
self.mid_format = "╡{} %s ╞".format(self.wire_label)
self.bot_format = "└{}%s──┘".format(self.bot_pad * self.left_fill)
self.bot_connect = bot_connect
self.mid_content = self.top_connect = ""
if input_length <= 2:
self.top_connect = label
class DirectOnQuWire(DrawElement):
"""
Element to the wire (without the box).
"""
def __init__(self, label=""):
super().__init__(label)
self.top_format = ' %s '
self.mid_format = '─%s─'
self.bot_format = ' %s '
self._mid_padding = self.mid_bck = '─'
self.top_connector = {"│": '│'}
self.bot_connector = {"│": '│'}
class Barrier(DirectOnQuWire):
"""Draws a barrier.
::
top: ░ ░
mid: ─░─ ───░───
bot: ░ ░
"""
def __init__(self, label=""):
super().__init__("░")
self.top_connect = "░"
self.bot_connect = "░"
self.top_connector = {}
self.bot_connector = {}
class Ex(DirectOnQuWire):
"""Draws an X (usually with a connector). E.g. the top part of a swap gate.
::
top:
mid: ─X─ ───X───
bot: │ │
"""
def __init__(self, bot_connect=" ", top_connect=" ", conditional=False):
super().__init__("X")
self.bot_connect = "│" if conditional else bot_connect
self.top_connect = top_connect
class Reset(DirectOnQuWire):
""" Draws a reset gate"""
def __init__(self, conditional=False):
super().__init__("|0>")
if conditional:
self.bot_connect = "│"
class Bullet(DirectOnQuWire):
""" Draws a bullet (usually with a connector). E.g. the top part of a CX gate.
::
top:
mid: ─■─ ───■───
| |
= t
if hasattr(self, '_set'):
self._set()
def _unset_member_vnf_index(self):
self.__member_vnf_index = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="member-vnf-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_vimAccountId(self):
"""
Getter method for vimAccountId, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vnf/vimAccountId (string)
"""
return self.__vimAccountId
def _set_vimAccountId(self, v, load=False):
"""
Setter method for vimAccountId, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vnf/vimAccountId (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vimAccountId is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vimAccountId() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="vimAccountId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vimAccountId must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vimAccountId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__vimAccountId = t
if hasattr(self, '_set'):
self._set()
def _unset_vimAccountId(self):
self.__vimAccountId = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vimAccountId", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_vdu(self):
"""
Getter method for vdu, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vnf/vdu (list)
"""
return self.__vdu
def _set_vdu(self, v, load=False):
"""
Setter method for vdu, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vnf/vdu (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vdu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vdu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("id",yc_vdu_nst__nst_netslice_subnet_instantiation_parameters_vnf_vdu, yang_name="vdu", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="vdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vdu must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("id",yc_vdu_nst__nst_netslice_subnet_instantiation_parameters_vnf_vdu, yang_name="vdu", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="vdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)""",
})
self.__vdu = t
if hasattr(self, '_set'):
self._set()
def _unset_vdu(self):
self.__vdu = YANGDynClass(base=YANGListType("id",yc_vdu_nst__nst_netslice_subnet_instantiation_parameters_vnf_vdu, yang_name="vdu", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="vdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)
def _get_internal_vld(self):
"""
Getter method for internal_vld, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vnf/internal_vld (list)
"""
return self.__internal_vld
def _set_internal_vld(self, v, load=False):
"""
Setter method for internal_vld, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vnf/internal_vld (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_internal_vld is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_internal_vld() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",yc_internal_vld_nst__nst_netslice_subnet_instantiation_parameters_vnf_internal_vld, yang_name="internal-vld", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="internal-vld", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """internal_vld must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",yc_internal_vld_nst__nst_netslice_subnet_instantiation_parameters_vnf_internal_vld, yang_name="internal-vld", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="internal-vld", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)""",
})
self.__internal_vld = t
if hasattr(self, '_set'):
self._set()
def _unset_internal_vld(self):
self.__internal_vld = YANGDynClass(base=YANGListType("name",yc_internal_vld_nst__nst_netslice_subnet_instantiation_parameters_vnf_internal_vld, yang_name="internal-vld", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="internal-vld", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)
member_vnf_index = __builtin__.property(_get_member_vnf_index, _set_member_vnf_index)
vimAccountId = __builtin__.property(_get_vimAccountId, _set_vimAccountId)
vdu = __builtin__.property(_get_vdu, _set_vdu)
internal_vld = __builtin__.property(_get_internal_vld, _set_internal_vld)
_pyangbind_elements = OrderedDict([('member_vnf_index', member_vnf_index), ('vimAccountId', vimAccountId), ('vdu', vdu), ('internal_vld', internal_vld), ])
class yc_dns_server_nst__nst_netslice_subnet_instantiation_parameters_vld_ip_profile_dns_server(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module nst - based on the path /nst/netslice-subnet/instantiation-parameters/vld/ip-profile/dns-server. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__address',)
_yang_name = 'dns-server'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'nst', u'netslice-subnet', u'instantiation-parameters', u'vld', u'ip-profile', u'dns-server']
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/ip_profile/dns_server/address (inet:ip-address)
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/ip_profile/dns_server/address (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with inet:ip-address""",
'defined-type': "inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
address = __builtin__.property(_get_address, _set_address)
_pyangbind_elements = OrderedDict([('address', address), ])
class yc_dhcp_params_nst__nst_netslice_subnet_instantiation_parameters_vld_ip_profile_dhcp_params(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module nst - based on the path /nst/netslice-subnet/instantiation-parameters/vld/ip-profile/dhcp-params. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__enabled','__count','__start_address',)
_yang_name = 'dhcp-params'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint8', is_config=True)
self.__start_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="start-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
self.__enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='boolean', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'nst', u'netslice-subnet', u'instantiation-parameters', u'vld', u'ip-profile', u'dhcp-params']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/ip_profile/dhcp_params/enabled (boolean)
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/ip_profile/dhcp_params/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='boolean', is_config=True)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='boolean', is_config=True)
def _get_count(self):
"""
Getter method for count, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/ip_profile/dhcp_params/count (uint8)
"""
return self.__count
def _set_count(self, v, load=False):
"""
Setter method for count, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/ip_profile/dhcp_params/count (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_count() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = | |
Linearize non-linear problem """
H = zeros((param_size, param_size))
g = zeros(param_size)
# Form Hessian and R.H.S of Gauss newton
for _, factor in self.factors.items():
factor_params = [params[pid].param for pid in factor.param_ids]
r, jacobians = factor.eval(factor_params)
# Form Hessian
nb_params = len(factor_params)
for i in range(nb_params):
param_i = params[factor.param_ids[i]]
if param_i.fix:
continue
idx_i = param_idxs[factor.param_ids[i]]
size_i = param_i.min_dims
J_i = jacobians[i]
for j in range(i, nb_params):
param_j = params[factor.param_ids[j]]
if param_j.fix:
continue
idx_j = param_idxs[factor.param_ids[j]]
size_j = param_j.min_dims
J_j = jacobians[j]
rs = idx_i
re = idx_i + size_i
cs = idx_j
ce = idx_j + size_j
if i == j: # Diagonal
H[rs:re, cs:ce] += J_i.T @ J_j
else: # Off-Diagonal
H[rs:re, cs:ce] += J_i.T @ J_j
H[cs:ce, rs:re] += H[rs:re, cs:ce].T
# Form R.H.S. Gauss Newton g
rs = idx_i
re = idx_i + size_i
g[rs:re] += (-J_i.T @ r)
return (H, g)
def _evaluate(self, params):
""" Evaluate """
(param_idxs, param_size) = self._form_param_indices()
(H, g) = self._linearize(params, param_idxs, param_size)
return ((H, g), param_idxs)
def _calculate_residuals(self, params):
""" Calculate Residuals """
residuals = []
for _, factor in self.factors.items():
factor_params = [params[pid].param for pid in factor.param_ids]
r = factor.eval(factor_params, only_residuals=True)
residuals.append(r)
return np.array(residuals).flatten()
def _calculate_cost(self, params):
""" Calculate Cost """
r = self._calculate_residuals(params)
return 0.5 * (r.T @ r)
@staticmethod
def _update(params_k, param_idxs, dx):
""" Update """
params_kp1 = copy.deepcopy(params_k)
for param_id, param in params_kp1.items():
# Check if param even exists
if param_id not in param_idxs:
continue
# Update parameter
start = param_idxs[param_id]
end = start + param.min_dims
param_dx = dx[start:end]
update_state_variable(param, param_dx)
return params_kp1
@staticmethod
def _solve_for_dx(lambda_k, H, g):
""" Solve for dx """
# Damp Hessian
H = H + lambda_k * eye(H.shape[0])
# H = H + lambda_k * np.diag(H.diagonal())
# # Pseudo inverse
# dx = pinv(H) @ g
# # Linear solver
# dx = np.linalg.solve(H, g)
# # Cholesky decomposition
c, low = scipy.linalg.cho_factor(H)
dx = scipy.linalg.cho_solve((c, low), g)
# SVD
# dx = solve_svd(H, g)
# # Sparse cholesky decomposition
# sH = scipy.sparse.csc_matrix(H)
# dx = scipy.sparse.linalg.spsolve(sH, g)
return dx
def solve(self, verbose=False):
""" Solve """
lambda_k = self.solver_lambda
params_k = copy.deepcopy(self.params)
cost_k = self._calculate_cost(params_k)
# First evaluation
if verbose:
print(f"nb_factors: {len(self.factors)}")
print(f"nb_params: {len(self.params)}")
self._print_to_console(0, lambda_k, cost_k, cost_k)
# Iterate
for i in range(1, self.solver_max_iter):
# Update and calculate cost
((H, g), param_idxs) = self._evaluate(params_k)
dx = self._solve_for_dx(lambda_k, H, g)
params_kp1 = self._update(params_k, param_idxs, dx)
cost_kp1 = self._calculate_cost(params_kp1)
# Verbose
if verbose:
self._print_to_console(i, lambda_k, cost_kp1, cost_k)
# Accept or reject update
if cost_kp1 < cost_k:
# Accept update
cost_k = cost_kp1
params_k = params_kp1
lambda_k /= 10.0
else:
# Reject update
params_k = params_k
lambda_k *= 10.0
# Finish - set the original params the optimized values
# Note: The reason we don't just do `self.params = params_k` is because
# that would destroy the references to outside `FactorGraph()`.
for param_id, param in params_k.items():
self.params[param_id].param = param.param
# FEATURE TRACKING #############################################################
def draw_matches(img_i, img_j, kps_i, kps_j, **kwargs):
"""
Draw keypoint matches between images `img_i` and `img_j` with keypoints
`kps_i` and `kps_j`
"""
assert len(kps_i) == len(kps_j)
nb_kps = len(kps_i)
viz = cv2.hconcat([img_i, img_j])
viz = cv2.cvtColor(viz, cv2.COLOR_GRAY2RG)
color = (0, 255, 0)
radius = 3
thickness = kwargs.get('thickness', cv2.FILLED)
linetype = kwargs.get('linetype', cv2.LINE_AA)
for n in range(nb_kps):
pt_i = None
pt_j = None
if hasattr(kps_i[n], 'pt'):
pt_i = (int(kps_i[n].pt[0]), int(kps_i[n].pt[1]))
pt_j = (int(kps_j[n].pt[0] + img_i.shape[1]), int(kps_j[n].pt[1]))
else:
pt_i = (int(kps_i[n][0]), int(kps_i[n][1]))
pt_j = (int(kps_j[n][0] + img_i.shape[1]), int(kps_j[n][1]))
cv2.circle(viz, pt_i, radius, color, thickness, lineType=linetype)
cv2.circle(viz, pt_j, radius, color, thickness, lineType=linetype)
cv2.line(viz, pt_i, pt_j, color, 1, linetype)
return viz
def draw_keypoints(img, kps, inliers=None, **kwargs):
"""
Draw points `kps` on image `img`. The `inliers` boolean list is optional
and is expected to be the same size as `kps` denoting whether the point
should be drawn or not.
"""
inliers = [1 for i in range(len(kps))] if inliers is None else inliers
radius = kwargs.get('radius', 2)
color = kwargs.get('color', (0, 255, 0))
thickness = kwargs.get('thickness', cv2.FILLED)
linetype = kwargs.get('linetype', cv2.LINE_AA)
viz = img
if len(img.shape) == 2:
viz = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
for n, kp in enumerate(kps):
if inliers[n]:
p = None
if hasattr(kp, 'pt'):
p = (int(kp.pt[0]), int(kp.pt[1]))
else:
p = (int(kp[0]), int(kp[1]))
cv2.circle(viz, p, radius, color, thickness, lineType=linetype)
return viz
def sort_keypoints(kps):
""" Sort a list of cv2.KeyPoint based on their response """
responses = [kp.response for kp in kps]
indices = range(len(responses))
indices = sorted(indices, key=lambda i: responses[i], reverse=True)
return [kps[i] for i in indices]
def spread_keypoints(img, kps, min_dist, **kwargs):
"""
Given a set of keypoints `kps` make sure they are atleast `min_dist` pixels
away from each other, if they are not remove them.
"""
# Pre-check
if not kps:
return kps
# Setup
debug = kwargs.get('debug', False)
prev_kps = kwargs.get('prev_kps', [])
min_dist = int(min_dist)
img_h, img_w = img.shape
A = np.zeros(img.shape) # Allowable areas are marked 0 else not allowed
# Loop through previous keypoints
for kp in prev_kps:
# Convert from keypoint to tuple
p = (int(kp.pt[0]), int(kp.pt[1]))
# Fill the area of the matrix where the next keypoint cannot be around
rs = int(max(p[1] - min_dist, 0.0))
re = int(min(p[1] + min_dist + 1, img_h))
cs = int(max(p[0] - min_dist, 0.0))
ce = int(min(p[0] + min_dist + 1, img_w))
A[rs:re, cs:ce] = np.ones((re - rs, ce - cs))
# Loop through keypoints
kps_results = []
for kp in sort_keypoints(kps):
# Convert from keypoint to tuple
p = (int(kp.pt[0]), int(kp.pt[1]))
# Check if point is ok to be added to results
if A[p[1], p[0]] > 0.0:
continue
# Fill the area of the matrix where the next keypoint cannot be around
rs = int(max(p[1] - min_dist, 0.0))
re = int(min(p[1] + min_dist + 1, img_h))
cs = int(max(p[0] - min_dist, 0.0))
ce = int(min(p[0] + min_dist + 1, img_w))
A[rs:re, cs:ce] = np.ones((re - rs, ce - cs))
A[p[1], p[0]] = 2
# Add to results
kps_results.append(kp)
# Debug
if debug:
img = draw_keypoints(img, kps_results, radius=3)
plt.figure()
ax = plt.subplot(121)
ax.imshow(A)
ax.set_xlabel('pixel')
ax.set_ylabel('pixel')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax = plt.subplot(122)
ax.imshow(img)
ax.set_xlabel('pixel')
ax.set_ylabel('pixel')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
plt.show()
return kps_results
class FeatureGrid:
"""
FeatureGrid
The idea is to take all the feature positions and put them into grid cells
across the full image space. This is so that one could keep track of how many
feautures are being tracked in each individual grid cell and act accordingly.
o-----> x
| ---------------------
| | 0 | 1 | 2 | 3 |
V ---------------------
y | 4 | 5 | 6 | 7 |
---------------------
| 8 | 9 | 10 | 11 |
---------------------
| 12 | 13 | 14 | 15 |
---------------------
grid_x = ceil((max(1, pixel_x) / img_w) * grid_cols) - 1.0
grid_y = ceil((max(1, pixel_y) / img_h) * grid_rows) - 1.0
cell_id = int(grid_x + (grid_y * grid_cols))
"""
def __init__(self, grid_rows, grid_cols, image_shape, keypoints):
assert len(image_shape) == 2
self.grid_rows = grid_rows
self.grid_cols = grid_cols
self.image_shape = image_shape
self.keypoints = keypoints
self.cell = [0 for i in range(self.grid_rows * self.grid_cols)]
for kp in keypoints:
if hasattr(kp, 'pt'):
# cv2.KeyPoint
assert (kp.pt[0] >= 0 and kp.pt[0] <= image_shape[1])
assert (kp.pt[1] >= 0 and kp.pt[1] <= image_shape[0])
self.cell[self.cell_index(kp.pt)] += 1
else:
# Tuple
assert (kp[0] >= 0 and kp[0] <= image_shape[1])
assert (kp[1] >= 0 and kp[1] <= image_shape[0])
self.cell[self.cell_index(kp)] += 1
def cell_index(self, pt):
""" Return cell index based on point `pt` """
pixel_x, pixel_y = pt
img_h, img_w = self.image_shape
grid_x = math.ceil((max(1, pixel_x) / img_w) * self.grid_cols) - 1.0
grid_y = math.ceil((max(1, pixel_y) / img_h) * self.grid_rows) - 1.0
cell_id = int(grid_x + (grid_y * self.grid_cols))
return cell_id
def count(self, cell_idx):
""" Return cell count """
return self.cell[cell_idx]
def grid_detect(detector, image, **kwargs):
"""
Detect features uniformly using a grid system.
"""
optflow_mode = kwargs.get('optflow_mode', False)
max_keypoints = kwargs.get('max_keypoints', 240)
grid_rows = kwargs.get('grid_rows', 3)
grid_cols = kwargs.get('grid_cols', 4)
prev_kps = | |
dropcol_importances(rf, X_train, y_train)
"""
if X_valid is None: X_valid = X_train
if y_valid is None: y_valid = y_train
model_ = clone(model)
model_.random_state = 999
model_.fit(X_train, y_train)
if callable(metric):
baseline = metric(model_, X_valid, y_valid, sample_weights)
else:
baseline = model_.score(X_valid, y_valid, sample_weights)
imp = []
for col in X_train.columns:
model_ = clone(model)
model_.random_state = 999
model_.fit(X_train.drop(col,axis=1), y_train)
if callable(metric):
s = metric(model_, X_valid.drop(col,axis=1), y_valid, sample_weights)
else:
s = model_.score(X_valid.drop(col,axis=1), y_valid, sample_weights)
drop_in_score = baseline - s
imp.append(drop_in_score)
imp = np.array(imp)
I = pd.DataFrame(data={'Feature':X_train.columns, 'Importance':imp})
I = I.set_index('Feature')
I = I.sort_values('Importance', ascending=False)
return I
def oob_dropcol_importances(rf, X_train, y_train):
"""
Compute drop-column feature importances for scikit-learn.
Given a RandomForestClassifier or RandomForestRegressor in rf
and training X and y data, return a data frame with columns
Feature and Importance sorted in reverse order by importance.
A clone of rf is trained once to get the baseline score and then
again, once per feature to compute the drop in out of bag (OOB)
score.
return: A data frame with Feature, Importance columns
SAMPLE CODE
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1, oob_score=True)
X_train, y_train = ..., ...
rf.fit(X_train, y_train)
imp = oob_dropcol_importances(rf, X_train, y_train)
"""
rf_ = clone(rf)
rf_.random_state = 999
rf_.oob_score = True
rf_.fit(X_train, y_train)
baseline = rf_.oob_score_
imp = []
for col in X_train.columns:
rf_ = clone(rf)
rf_.random_state = 999
rf_.oob_score = True
rf_.fit(X_train.drop(col, axis=1), y_train)
drop_in_score = baseline - rf_.oob_score_
imp.append(drop_in_score)
imp = np.array(imp)
I = pd.DataFrame(data={'Feature':X_train.columns, 'Importance':imp})
I = I.set_index('Feature')
I = I.sort_values('Importance', ascending=False)
return I
def importances_raw(rf, X_train, y_train, n_samples=5000):
if isinstance(rf, RandomForestClassifier):
return permutation_importances_raw(rf, X_train, y_train, oob_classifier_accuracy, n_samples)
elif isinstance(rf, RandomForestRegressor):
return permutation_importances_raw(rf, X_train, y_train, oob_regression_r2_score, n_samples)
return None
def permutation_importances_raw(rf, X_train, y_train, metric, n_samples=5000):
"""
Return array of importances from pre-fit rf; metric is function
that measures accuracy or R^2 or similar. This function
works for regressors and classifiers.
"""
X_sample, y_sample, _ = sample(X_train, y_train, n_samples)
if not hasattr(rf, 'estimators_'):
rf.fit(X_sample, y_sample)
baseline = metric(rf, X_sample, y_sample)
X_train = X_sample.copy(deep=False) # shallow copy
y_train = y_sample
imp = []
for col in X_train.columns:
save = X_train[col].copy()
X_train[col] = np.random.permutation(X_train[col])
m = metric(rf, X_train, y_train)
X_train[col] = save
drop_in_metric = baseline - m
imp.append(drop_in_metric)
return np.array(imp)
def _get_unsampled_indices(tree, n_samples):
"""
An interface to get unsampled indices regardless of sklearn version.
"""
if LooseVersion(sklearn.__version__) >= LooseVersion("0.22"):
# Version 0.22 or newer uses 3 arguments.
from sklearn.ensemble.forest import _get_n_samples_bootstrap
n_samples_bootstrap = _get_n_samples_bootstrap(n_samples, n_samples)
return _generate_unsampled_indices(tree.random_state, n_samples, n_samples_bootstrap)
else:
# Version 0.21 or older uses only two arguments.
return _generate_unsampled_indices(tree.random_state, n_samples)
def oob_classifier_accuracy(rf, X_train, y_train):
"""
Compute out-of-bag (OOB) accuracy for a scikit-learn random forest
classifier. We learned the guts of scikit's RF from the BSD licensed
code:
https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/ensemble/forest.py#L425
"""
X = X_train.values
y = y_train.values
n_samples = len(X)
n_classes = len(np.unique(y))
predictions = np.zeros((n_samples, n_classes))
for tree in rf.estimators_:
unsampled_indices = _get_unsampled_indices(tree, n_samples)
tree_preds = tree.predict_proba(X[unsampled_indices, :])
predictions[unsampled_indices] += tree_preds
predicted_class_indexes = np.argmax(predictions, axis=1)
predicted_classes = [rf.classes_[i] for i in predicted_class_indexes]
oob_score = np.mean(y == predicted_classes)
return oob_score
def oob_regression_r2_score(rf, X_train, y_train):
"""
Compute out-of-bag (OOB) R^2 for a scikit-learn random forest
regressor. We learned the guts of scikit's RF from the BSD licensed
code:
https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/ensemble/forest.py#L702
"""
X = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
y = y_train.values if isinstance(y_train, pd.Series) else y_train
n_samples = len(X)
predictions = np.zeros(n_samples)
n_predictions = np.zeros(n_samples)
for tree in rf.estimators_:
unsampled_indices = _get_unsampled_indices(tree, n_samples)
tree_preds = tree.predict(X[unsampled_indices, :])
predictions[unsampled_indices] += tree_preds
n_predictions[unsampled_indices] += 1
if (n_predictions == 0).any():
warnings.warn("Too few trees; some variables do not have OOB scores.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
oob_score = r2_score(y, predictions)
return oob_score
def stemplot_importances(df_importances,
yrot=0,
label_fontsize=10,
width=4,
minheight=1.5,
vscale=1.0,
imp_range=(-.002, .15),
color='#375FA5',
bgcolor=None, # seaborn uses '#F1F8FE'
xtick_precision=2,
title=None):
GREY = '#444443'
I = df_importances
unit = 1
imp = I.Importance.values
mindrop = np.min(imp)
maxdrop = np.max(imp)
imp_padding = 0.002
imp_range = (min(imp_range[0], mindrop - imp_padding), max(imp_range[1], maxdrop))
barcounts = np.array([f.count('\n')+1 for f in I.index])
N = np.sum(barcounts)
ymax = N * unit
# print(f"barcounts {barcounts}, N={N}, ymax={ymax}")
height = max(minheight, ymax * .27 * vscale)
plt.close()
fig = plt.figure(figsize=(width,height))
ax = plt.gca()
ax.set_xlim(*imp_range)
ax.set_ylim(0,ymax)
ax.spines['top'].set_linewidth(.3)
ax.spines['right'].set_linewidth(.3)
ax.spines['left'].set_linewidth(.3)
ax.spines['bottom'].set_linewidth(.3)
if bgcolor:
ax.set_facecolor(bgcolor)
yloc = []
y = barcounts[0]*unit / 2
yloc.append(y)
for i in range(1,len(barcounts)):
wprev = barcounts[i-1]
w = barcounts[i]
y += (wprev + w)/2 * unit
yloc.append(y)
yloc = np.array(yloc)
ax.xaxis.set_major_formatter(FormatStrFormatter(f'%.{xtick_precision}f'))
ax.set_xticks([maxdrop, imp_range[1]])
ax.tick_params(labelsize=label_fontsize, labelcolor=GREY)
ax.invert_yaxis() # labels read top-to-bottom
if title:
ax.set_title(title, fontsize=label_fontsize+1, fontname="Arial", color=GREY)
plt.hlines(y=yloc, xmin=imp_range[0], xmax=imp, lw=barcounts*1.2, color=color)
for i in range(len(I.index)):
plt.plot(imp[i], yloc[i], "o", color=color, markersize=barcounts[i]+2)
ax.set_yticks(yloc)
ax.set_yticklabels(I.index, fontdict={'verticalalignment': 'center'})
plt.tick_params(
pad=0,
axis='y',
which='both',
left=False)
# rotate y-ticks
if yrot is not None:
plt.yticks(rotation=yrot)
plt.tight_layout()
return PimpViz()
def plot_importances(df_importances,
yrot=0,
label_fontsize=10,
width=4,
minheight=1.5,
vscale=1,
imp_range=(-.002, .15),
color='#D9E6F5',
bgcolor=None, # seaborn uses '#F1F8FE'
xtick_precision=2,
title=None,
ax=None):
"""
Given an array or data frame of importances, plot a horizontal bar chart
showing the importance values.
:param df_importances: A data frame with Feature, Importance columns
:type df_importances: pd.DataFrame
:param width: Figure width in default units (inches I think). Height determined
by number of features.
:type width: int
:param minheight: Minimum plot height in default matplotlib units (inches?)
:type minheight: float
:param vscale: Scale vertical plot (default .25) to make it taller
:type vscale: float
:param label_fontsize: Font size for feature names and importance values
:type label_fontsize: int
:param yrot: Degrees to rotate feature (Y axis) labels
:type yrot: int
:param label_fontsize: The font size for the column names and x ticks
:type label_fontsize: int
:param scalefig: Scale width and height of image (widthscale,heightscale)
:type scalefig: 2-tuple of floats
:param xtick_precision: How many digits after decimal for importance values.
:type xtick_precision: int
:param xtick_precision: Title of plot; set to None to avoid.
:type xtick_precision: string
:param ax: Matplotlib "axis" to plot into
:return: None
SAMPLE CODE
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1, oob_score=True)
X_train, y_train = ..., ...
rf.fit(X_train, y_train)
imp = importances(rf, X_test, y_test)
viz = plot_importances(imp)
viz.save('file.svg')
viz.save('file.pdf')
viz.view() # or just viz in notebook
"""
I = df_importances
unit = 1
ypadding = .1
imp = I.Importance.values
mindrop = np.min(imp)
maxdrop = np.max(imp)
imp_padding = 0.002
imp_range = (min(imp_range[0], mindrop - imp_padding), max(imp_range[1], maxdrop + imp_padding))
barcounts = np.array([f.count('\n')+1 for f in I.index])
N = np.sum(barcounts)
ymax = N * unit + len(I.index) * ypadding + ypadding
# print(f"barcounts {barcounts}, N={N}, ymax={ymax}")
height = max(minheight, ymax * .2 * vscale)
if ax is None:
plt.close()
fig, ax = plt.subplots(1,1,figsize=(width,height))
ax.set_xlim(*imp_range)
ax.set_ylim(0,ymax)
ax.spines['top'].set_linewidth(.3)
ax.spines['right'].set_linewidth(.3)
ax.spines['left'].set_linewidth(.3)
ax.spines['bottom'].set_linewidth(.3)
if bgcolor:
ax.set_facecolor(bgcolor)
yloc = []
y = barcounts[0]*unit / 2 + ypadding
yloc.append(y)
for i in range(1,len(barcounts)):
wprev = barcounts[i-1]
w = barcounts[i]
y += (wprev + w)/2 * unit + ypadding
yloc.append(y)
yloc = np.array(yloc)
ax.xaxis.set_major_formatter(FormatStrFormatter(f'%.{xtick_precision}f'))
# too close to show both max and right edge?
if maxdrop/imp_range[1] > 0.9 or maxdrop < 0.02:
ax.set_xticks([0, imp_range[1]])
else:
ax.set_xticks([0, maxdrop, imp_range[1]])
ax.tick_params(labelsize=label_fontsize, labelcolor=GREY)
ax.invert_yaxis() # labels read top-to-bottom
if title:
ax.set_title(title, fontsize=label_fontsize+1, fontname="Arial", color=GREY)
barcontainer = ax.barh(y=yloc, width=imp,
height=barcounts*unit,
tick_label=I.index,
color=color, align='center')
# Alter appearance of each bar
for rect in barcontainer.patches:
rect.set_linewidth(.5)
rect.set_edgecolor(GREY)
# rotate y-ticks
if yrot is not None:
ax.tick_params(labelrotation=yrot)
return PimpViz()
def oob_dependences(rf, X_train, n_samples=5000):
"""
Given a random forest model, rf, and training observation independent
variables in X_train (a dataframe), compute the OOB R^2 score using each var
as a dependent variable. We retrain rf for each var. Only numeric columns are considered.
By default, sample up to 5000 observations to compute feature dependencies.
:return: Return a DataFrame with Feature/Dependence values for each variable. Feature is the dataframe index.
"""
numcols = [col for col in X_train if is_numeric_dtype(X_train[col])]
X_train = sample_rows(X_train, n_samples)
df_dep = pd.DataFrame(columns=['Feature','Dependence'])
df_dep = df_dep.set_index('Feature')
for col in numcols:
X, y = X_train.drop(col, axis=1), X_train[col]
rf.fit(X, y)
df_dep.loc[col] = rf.oob_score_
df_dep = df_dep.sort_values('Dependence', ascending=False)
return df_dep
def feature_dependence_matrix(X_train,
rfmodel=RandomForestRegressor(n_estimators=50, oob_score=True),
zero=0.001,
sort_by_dependence=False,
n_samples=5000):
"""
Given training observation independent variables in X_train (a dataframe),
compute the feature importance using each var | |
'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaPrimalWeaponsLevel2': {
'build_time': 190,
'built_from': ['DehakaHatchery'],
'display_name': 'Primal Attacks Level 2',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaPrimalWeaponsLevel3': {
'build_time': 220,
'built_from': ['DehakaHatchery'],
'display_name': 'Primal Attacks Level 3',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaPrimalArmorLevel1': {
'build_time': 160,
'built_from': ['DehakaHatchery'],
'display_name': 'Primal Carapace Level 1',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaPrimalArmorLevel2': {
'build_time': 190,
'built_from': ['DehakaHatchery'],
'display_name': 'Primal Carapace Level 2',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaPrimalArmorLevel3': {
'build_time': 220,
'built_from': ['DehakaHatchery'],
'display_name': 'Primal Carapace Level 3',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaRavasaurVSArmor': {
'build_time': 60,
'built_from': ['DehakaGlevigStructure'],
'display_name': 'Dissolving Acid',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaRavasaurRange': {
'build_time': 60,
'built_from': ['DehakaGlevigStructure'],
'display_name': 'Enlarged Parotid Glands',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaRoachMoveSpeed': {
'build_time': 60,
'built_from': ['DehakaGlevigStructure'],
'display_name': 'Glial Reconstitution',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaGlevigRoachFireBreath': {
'build_time': 120,
'built_from': ['DehakaGlevigStructure'],
'display_name': 'Concentrated Fire',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaHydraliskSpeed': {
'build_time': 60,
'built_from': ['DehakaGlevigStructure'],
'display_name': 'Muscular Augments',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaImpalerTenderize': {
'build_time': 60,
'built_from': ['DehakaGlevigStructure'],
'display_name': 'Tenderize',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaUltraliskCrashingCharge': {
'build_time': 60,
'built_from': ['DehakaDakrunStructure'],
'display_name': 'Brutal Charge',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaUltraliskRegen': {
'build_time': 60,
'built_from': ['DehakaDakrunStructure'],
'display_name': 'Healing Adaptation',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaUltraliskBash': {
'build_time': 60,
'built_from': ['DehakaDakrunStructure'],
'display_name': 'Impaling Strike',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaTyrannozorFanOfKnives': {
'build_time': 60,
'built_from': ['DehakaDakrunStructure'],
'display_name': 'Barrage of Spikes',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaTyrannozorArmorAura': {
'build_time': 60,
'built_from': ['DehakaDakrunStructure'],
'display_name': 'Tyrant\'s Protection',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaMutaliskAirDoubleDamage': {
'build_time': 60,
'built_from': ['DehakaMurvarStructure'],
'display_name': 'Slicing Glave',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaMutaliskDamageReduction': {
'build_time': 60,
'built_from': ['DehakaMurvarStructure'],
'display_name': 'Shifting Carapace',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaMutaliskRespawnOnDeath': {
'build_time': 90,
'built_from': ['DehakaMurvarStructure'],
'display_name': 'Primal Reconstitution',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaGuardianExplosiveBomb': {
'build_time': 60,
'built_from': ['DehakaMurvarStructure'],
'display_name': 'Explosive Spores',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaGuardianFury': {
'build_time': 60,
'built_from': ['DehakaMurvarStructure'],
'display_name': 'Primordial Fury',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
'DehakaMurvarCreeperAir': {
'build_time': 90,
'built_from': ['DehakaMurvarStructure'],
'display_name': 'Aerial Burst Sacs',
'race': 'Zerg',
'type': 'Upgrade',
'is_morph': False,
},
},
'Horner': {
# Units
'HHSCV': {
'build_time': 17,
'built_from': ['CommandCenter'],
'display_name': 'SCV',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'HHReaper': {
'build_time': 14,
'built_from': ['HHMercStarportNoArmy'],
'display_name': 'Reaper',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'HHWidowMine': {
'build_time': 21,
'built_from': ['HHMercStarportNoArmy'],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'HHHellion': {
'build_time': 14,
'built_from': ['HHMercStarportNoArmy'],
'display_name': 'Hellion',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'HHHellionTank': {
'build_time': 14,
'built_from': ['HHMercStarportNoArmy'],
'display_name': 'Hellbat',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
# Calldowns
'HHWraith_SpawnerUnit': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'HHViking_SpawnerUnit': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'HHRaven_SpawnerUnit': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'HHBattlecruiser_SpawnerUnit': {
'build_time': 0,
'built_from': [],
'display_name': 'Sovereign Battlecruiser',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
# Buildings
'HHMercStarportCargo': {
'build_time': 60,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit', # not really sure what this counts as
'is_morph': False,
},
'HHCommandCenter': {
'build_time': 0,
'built_from': [],
'display_name': 'Command Center',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'HHMercStarportNoArmy': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'HHMercCompound': {
'build_time': 0,
'built_from': [],
'display_name': 'Engineering Bay',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'HHBomberPlatform': {
'build_time': 0,
'built_from': [],
'display_name': 'Strike Fighter Platform',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'HHStarport': {
'build_time': 0,
'built_from': [],
'display_name': 'Dominion Starport',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
# Upgrades
'HHReaperG4ClusterBombs': {
'build_time': 60,
'built_from': ['HHMercCompound'],
'display_name': 'LE9 Cluster Charges',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHReaperFlight': {
'build_time': 90,
'built_from': ['HHMercCompound'],
'display_name': 'Jetpack Overdrive',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHWidowMineDeathBlossom': {
'build_time': 60,
'built_from': ['HHMercCompound'],
'display_name': 'Executioner Missiles',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHHellionStimDeath': {
'build_time': 60,
'built_from': ['HHMercCompound'],
'display_name': 'Aerosol Stim Emitters',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHHellionStimDeath': {
'build_time': 60,
'built_from': ['HHMercCompound'],
'display_name': 'Aerosol Stim Emitters',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHWidowMineBurrowedRange': {
'build_time': 60,
'built_from': ['EngineeringBay'],
'display_name': 'Black Market Launchers',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHHellionRocket': {
'build_time': 90,
'built_from': ['EngineeringBay'],
'display_name': 'Tar Bombs',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHHellionAttackDoT': {
'build_time': 90,
'built_from': ['EngineeringBay'],
'display_name': 'Immolation Fluid',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHHellionFearDeath': {
'build_time': 60,
'built_from': ['EngineeringBay'],
'display_name': 'Wildfire Explosives',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHWraithPermaCloak': {
'build_time': 90,
'built_from': ['StarportTechLab'],
'display_name': 'Unregistered Cloak System',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHWraithFury': {
'build_time': 90,
'built_from': ['StarportTechLab'],
'display_name': 'Trigger Override',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHVikingRockets': {
'build_time': 90,
'built_from': ['StarportTechLab'],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHVikingPiercingAttacks': {
'build_time': 90,
'built_from': ['StarportTechLab'],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'FleetwideJump': {
'build_time': 90,
'built_from': ['StarportTechLab'],
'display_name': 'Tactical Jump',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHMultiLock': {
'build_time': 90,
'built_from': ['StarportTechLab'],
'display_name': 'Multi-Threaded Sensors',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHVehicleAndShipWeaponsLevel1': {
'build_time': 160,
'built_from': ['Armory'],
'display_name': 'Horner Weapons Level 1',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHVehicleAndShipWeaponsLevel2': {
'build_time': 190,
'built_from': ['Armory'],
'display_name': 'Horner Weapons Level 2',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHVehicleAndShipWeaponsLevel3': {
'build_time': 220,
'built_from': ['Armory'],
'display_name': 'Horner Weapons Level 3',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHVehicleAndShipArmorsLevel1': {
'build_time': 160,
'built_from': ['Armory'],
'display_name': 'Horner Armor Level 1',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHVehicleAndShipArmorsLevel2': {
'build_time': 190,
'built_from': ['Armory'],
'display_name': 'Horner Armor Level 2',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHVehicleAndShipArmorsLevel3': {
'build_time': 220,
'built_from': ['Armory'],
'display_name': 'Horner Armor Level 3',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHBattlecruiserYamatoAttacks': {
'build_time': 90,
'built_from': ['FusionCore'],
'display_name': 'Overcharged Reactor',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
'HHBomberNapalm': {
'build_time': 90,
'built_from': ['FusionCore'],
'display_name': 'Napalm Payload',
'race': 'Terran',
'type': 'Upgrade',
'is_morph': False,
},
},
'Tychus': {
# Tychus Units
'TychusSCV': {
'build_time': 17,
'built_from': ['CommandCenter'],
'display_name': 'SCV',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusCoop': {
'build_time': 0,
'built_from': [],
'display_name': 'Tychus',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusReaper': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusWarhound': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusFirebat': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusHERC': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusMarauder': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusGhost': {
'build_time': 0,
'built_from': [],
'display_name': 'Vega',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusSpectre': {
'build_time': 0,
'built_from': [],
'display_name': 'Nux',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
'TychusMedic': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Unit',
'is_morph': False,
},
# Tychus Buildings
# Joeyray's Bar missing
'TychusEngineeringBay': {
'build_time': 0,
'built_from': [],
'display_name': 'Engineering Bay',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'TychusMercCompound': {
'build_time': 0,
'built_from': [],
'display_name': '<NAME>',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'TychusSCVAutoTurret': {
'build_time': 0,
'built_from': [],
'display_name': 'Auto-Turret',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'TychusArmory': {
'build_time': 0,
'built_from': [],
'display_name': 'Muscle Armory',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'TychusCommandCenter': {
'build_time': 0,
'built_from': [],
'display_name': 'Command Center',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'TychusGhostAcademy': {
'build_time': 0,
'built_from': [],
'display_name': 'Fixers Safehouse',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
'TychusMedivacPlatform': {
'build_time': 0,
'built_from': [],
'display_name': 'Medivac Platform',
'race': 'Terran',
'type': 'Building',
'is_morph': False,
},
# Tychus Upgrades
'TychusOutlawWeaponsLevel1': | |
'{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TopicRuleErrorActionS3.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TopicRuleErrorActionS3.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_name: str,
key: str,
role_arn: str):
"""
:param str bucket_name: The Amazon S3 bucket name.
:param str key: The object key.
:param str role_arn: The IAM role ARN that allows access to the CloudWatch alarm.
"""
pulumi.set(__self__, "bucket_name", bucket_name)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> str:
"""
The Amazon S3 bucket name.
"""
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter
def key(self) -> str:
"""
The object key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The IAM role ARN that allows access to the CloudWatch alarm.
"""
return pulumi.get(self, "role_arn")
@pulumi.output_type
class TopicRuleErrorActionSns(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "roleArn":
suggest = "role_arn"
elif key == "targetArn":
suggest = "target_arn"
elif key == "messageFormat":
suggest = "message_format"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TopicRuleErrorActionSns. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TopicRuleErrorActionSns.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TopicRuleErrorActionSns.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
role_arn: str,
target_arn: str,
message_format: Optional[str] = None):
"""
:param str role_arn: The ARN of the IAM role that grants access.
:param str target_arn: The ARN of the SNS topic.
:param str message_format: The message format of the message to publish. Accepted values are "JSON" and "RAW".
"""
pulumi.set(__self__, "role_arn", role_arn)
pulumi.set(__self__, "target_arn", target_arn)
if message_format is not None:
pulumi.set(__self__, "message_format", message_format)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The ARN of the IAM role that grants access.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="targetArn")
def target_arn(self) -> str:
"""
The ARN of the SNS topic.
"""
return pulumi.get(self, "target_arn")
@property
@pulumi.getter(name="messageFormat")
def message_format(self) -> Optional[str]:
"""
The message format of the message to publish. Accepted values are "JSON" and "RAW".
"""
return pulumi.get(self, "message_format")
@pulumi.output_type
class TopicRuleErrorActionSqs(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "queueUrl":
suggest = "queue_url"
elif key == "roleArn":
suggest = "role_arn"
elif key == "useBase64":
suggest = "use_base64"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TopicRuleErrorActionSqs. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TopicRuleErrorActionSqs.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TopicRuleErrorActionSqs.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
queue_url: str,
role_arn: str,
use_base64: bool):
"""
:param str queue_url: The URL of the Amazon SQS queue.
:param str role_arn: The ARN of the IAM role that grants access.
:param bool use_base64: Specifies whether to use Base64 encoding.
"""
pulumi.set(__self__, "queue_url", queue_url)
pulumi.set(__self__, "role_arn", role_arn)
pulumi.set(__self__, "use_base64", use_base64)
@property
@pulumi.getter(name="queueUrl")
def queue_url(self) -> str:
"""
The URL of the Amazon SQS queue.
"""
return pulumi.get(self, "queue_url")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The ARN of the IAM role that grants access.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="useBase64")
def use_base64(self) -> bool:
"""
Specifies whether to use Base64 encoding.
"""
return pulumi.get(self, "use_base64")
@pulumi.output_type
class TopicRuleErrorActionStepFunctions(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "roleArn":
suggest = "role_arn"
elif key == "stateMachineName":
suggest = "state_machine_name"
elif key == "executionNamePrefix":
suggest = "execution_name_prefix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TopicRuleErrorActionStepFunctions. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TopicRuleErrorActionStepFunctions.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TopicRuleErrorActionStepFunctions.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
role_arn: str,
state_machine_name: str,
execution_name_prefix: Optional[str] = None):
"""
:param str role_arn: The ARN of the IAM role that grants access to start execution of the state machine.
:param str state_machine_name: The name of the Step Functions state machine whose execution will be started.
:param str execution_name_prefix: The prefix used to generate, along with a UUID, the unique state machine execution name.
"""
pulumi.set(__self__, "role_arn", role_arn)
pulumi.set(__self__, "state_machine_name", state_machine_name)
if execution_name_prefix is not None:
pulumi.set(__self__, "execution_name_prefix", execution_name_prefix)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The ARN of the IAM role that grants access to start execution of the state machine.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="stateMachineName")
def state_machine_name(self) -> str:
"""
The name of the Step Functions state machine whose execution will be started.
"""
return pulumi.get(self, "state_machine_name")
@property
@pulumi.getter(name="executionNamePrefix")
def execution_name_prefix(self) -> Optional[str]:
"""
The prefix used to generate, along with a UUID, the unique state machine execution name.
"""
return pulumi.get(self, "execution_name_prefix")
@pulumi.output_type
class TopicRuleFirehose(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deliveryStreamName":
suggest = "delivery_stream_name"
elif key == "roleArn":
suggest = "role_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TopicRuleFirehose. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TopicRuleFirehose.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TopicRuleFirehose.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
delivery_stream_name: str,
role_arn: str,
separator: Optional[str] = None):
"""
:param str delivery_stream_name: The delivery stream name.
:param str role_arn: The IAM role ARN that grants access to the Amazon Kinesis Firehose stream.
:param str separator: A character separator that is used to separate records written to the Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' (Windows newline), ',' (comma).
"""
pulumi.set(__self__, "delivery_stream_name", delivery_stream_name)
pulumi.set(__self__, "role_arn", role_arn)
if separator is not None:
pulumi.set(__self__, "separator", separator)
@property
@pulumi.getter(name="deliveryStreamName")
def delivery_stream_name(self) -> str:
"""
The delivery stream name.
"""
return pulumi.get(self, "delivery_stream_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The IAM role ARN that grants access to the Amazon Kinesis Firehose stream.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def separator(self) -> Optional[str]:
"""
A character separator that is used to separate records written to the Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' (Windows newline), ',' (comma).
"""
return pulumi.get(self, "separator")
@pulumi.output_type
class TopicRuleIotAnalytic(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "channelName":
suggest = "channel_name"
elif key == "roleArn":
suggest = "role_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TopicRuleIotAnalytic. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TopicRuleIotAnalytic.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TopicRuleIotAnalytic.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
channel_name: str,
role_arn: str):
"""
:param str channel_name: Name of AWS IOT Analytics channel.
:param str role_arn: The ARN of the IAM role that grants access.
"""
pulumi.set(__self__, "channel_name", channel_name)
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> str:
"""
Name of AWS IOT Analytics channel.
"""
return pulumi.get(self, "channel_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The ARN of the IAM role that grants access.
"""
return pulumi.get(self, "role_arn")
@pulumi.output_type
class TopicRuleIotEvent(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "inputName":
suggest = "input_name"
elif key == "roleArn":
suggest = "role_arn"
elif key == "messageId":
suggest = "message_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TopicRuleIotEvent. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TopicRuleIotEvent.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TopicRuleIotEvent.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
input_name: str,
role_arn: str,
message_id: Optional[str] = None):
"""
:param str input_name: The name of the AWS IoT Events input.
:param str role_arn: The ARN of the IAM role that grants access.
:param str message_id: Use this to ensure that only one input (message) with a given messageId is processed by an AWS IoT Events detector.
"""
pulumi.set(__self__, "input_name", input_name)
pulumi.set(__self__, "role_arn", role_arn)
if message_id is not None:
pulumi.set(__self__, "message_id", message_id)
@property
@pulumi.getter(name="inputName")
def input_name(self) -> str:
"""
The name of the AWS IoT Events input.
"""
return pulumi.get(self, "input_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
The ARN of the IAM role that grants access.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="messageId")
def message_id(self) -> Optional[str]:
"""
Use this to ensure that only one input (message) with a given messageId is processed by an AWS IoT Events detector.
"""
| |
from sqlalchemy import create_engine, Column, Integer, BigInteger, String, Boolean, MetaData, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.types import DateTime, Date, Interval
from sqlalchemy.pool import NullPool
from .conf import settings
from logging import Logger
print("loaded dbobjects module")
class DB:
#print "loaded DB Class"
database_string = 'postgresql+psycopg2://' + settings.DB_USER + ':' + settings.DB_PASSWD + '@' + settings.DB_HOST + ':' + str(settings.DB_PORT) + '/' + settings.DB_DATABASE
pg_db_engine = create_engine(database_string, poolclass=NullPool, echo=settings.DEBUG_ALCHEMY)
mymetadata = MetaData(bind=pg_db_engine)
Base = declarative_base(metadata=mymetadata)
def __init__(self):
#postgresql[+driver]://<user>:<pass>@<host>/<dbname> #, server_side_cursors=True)
self.Session = sessionmaker() # Was
#self.Session = sessionmaker(bind=self.pg_db_engine) # JCS
loglevel = 'DEBUG'
self.log = Logger(settings.LOGGING_INI, loglevel)
class MapBase():
def __init__(self, field_dict):
if settings.DEBUG:
print("Base Class created: %s" % self.__class__.__name__)
#def __init__(self, field_dict):
if settings.DEBUG:
print(field_dict)
for x, y in field_dict.iteritems():
self.__setattr__(x,y)
def __repr__(self):
field_dict = vars(self)
out = ''
if len(field_dict) > 0:
for x, y in field_dict.iteritems():
if x[0] != "_":
out = out + "%s = %s, " % (x,y)
return "<%s(%s)>" % (self.__class__.__name__, out)
else:
return ''
class SiteServiceParticipation(DB.Base, MapBase):
__tablename__ = 'site_service_participation'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
household_index_id = Column(Integer, ForeignKey('household.id'))
site_service_participation_idid_num = Column(String(32))
site_service_participation_idid_num_date_collected = Column(DateTime(timezone=False))
site_service_participation_idid_str = Column(String(32))
site_service_participation_idid_str_date_collected = Column(DateTime(timezone=False))
site_service_idid_num = Column(String(32)) # JCS
#site_service_idid_num_date_collected = Column(DateTime(timezone=False)) # JCS
destination = Column(String(32))
destination_date_collected = Column(DateTime(timezone=False))
destination_other = Column(String(32))
destination_other_date_collected = Column(DateTime(timezone=False))
destination_tenure = Column(String(32))
destination_tenure_date_collected = Column(DateTime(timezone=False))
disabling_condition = Column(String(32))
disabling_condition_date_collected = Column(DateTime(timezone=False))
participation_dates_start_date = Column(DateTime(timezone=False))
participation_dates_start_date_date_collected = Column(DateTime(timezone=False))
participation_dates_end_date = Column(DateTime(timezone=False))
participation_dates_end_date_date_collected = Column(DateTime(timezone=False))
veteran_status = Column(String(32))
veteran_status_date_collected = Column(DateTime(timezone=False))
#adding a reported column. Hopefully this will append the column to the table def.
reported = Column(Boolean)
site_service_participation_id_delete = Column(String(32))
site_service_participation_id_delete_occurred_date = Column(DateTime(timezone=False))
site_service_participation_id_delete_effective_date = Column(DateTime(timezone=False))
fk_participation_to_need = relationship('Need', backref='fk_need_to_participation')
fk_participation_to_serviceevent = relationship('ServiceEvent')
fk_participation_to_personhistorical = relationship('PersonHistorical')
fk_participation_to_person = Column(Integer, ForeignKey('person.id'))
useexisting = True
class Need(DB.Base, MapBase):
__tablename__ = 'need'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id')) # JCS
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id')) # JCS
export_index_id = Column(Integer, ForeignKey('export.id'))
need_idid_num = Column(String(32))
need_idid_num_date_collected = Column(DateTime(timezone=False))
need_idid_str = Column(String(32))
need_idid_str_date_collected = Column(DateTime(timezone=False))
site_service_idid_num = Column(String(32))
site_service_idid_num_date_collected = Column(DateTime(timezone=False))
site_service_idid_str = Column(String(32))
site_service_idid_str_date_collected = Column(DateTime(timezone=False))
service_event_idid_num = Column(String(32))
service_event_idid_num_date_collected = Column(DateTime(timezone=False))
service_event_idid_str = Column(String(32))
service_event_idid_str_date_collected = Column(DateTime(timezone=False))
need_status = Column(String(32))
need_status_date_collected = Column(DateTime(timezone=False))
taxonomy = Column(String(32))
reported = Column(Boolean)
## HUD 3.0
person_index_id = Column(Integer, ForeignKey('person.id'))
need_id_delete = Column(String(32))
need_id_delete_occurred_date = Column(DateTime(timezone=False))
need_id_delete_delete_effective_date = Column(DateTime(timezone=False))
need_effective_period_start_date = Column(DateTime(timezone=False))
need_effective_period_end_date = Column(DateTime(timezone=False))
need_recorded_date = Column(DateTime(timezone=False))
useexisting = True
class Races(DB.Base, MapBase):
__tablename__ = 'races'
id = Column(Integer, primary_key=True)
person_index_id = Column(Integer, ForeignKey('person.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
race_unhashed = Column(Integer)
race_hashed = Column(String(32))
race_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
## HUD 3.0
race_data_collection_stage = Column(String(32))
race_date_effective = Column(DateTime(timezone=False))
useexisting = True
class OtherNames(DB.Base, MapBase):
__tablename__ = 'other_names'
id = Column(Integer, primary_key=True)
person_index_id = Column(Integer, ForeignKey('person.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
other_first_name_unhashed = Column(String(50))
other_first_name_hashed = Column(String(50))
other_first_name_date_collected = Column(DateTime(timezone=False))
other_first_name_date_effective = Column(DateTime(timezone=False))
other_first_name_data_collection_stage = Column(String(32))
other_middle_name_unhashed = Column(String(50))
other_middle_name_hashed = Column(String(50))
other_middle_name_date_collected = Column(DateTime(timezone=False))
other_middle_name_date_effective = Column(DateTime(timezone=False))
other_middle_name_data_collection_stage = Column(String(32))
other_last_name_unhashed = Column(String(50))
other_last_name_hashed = Column(String(50))
other_last_name_date_collected = Column(DateTime(timezone=False))
other_last_name_date_effective = Column(DateTime(timezone=False))
other_last_name_data_collection_stage = Column(String(32))
other_suffix_unhashed = Column(String(50))
other_suffix_hashed = Column(String(50))
other_suffix_date_collected = Column(DateTime(timezone=False))
other_suffix_date_effective = Column(DateTime(timezone=False))
other_suffix_data_collection_stage = Column(String(32))
useexisting = True
class HUDHomelessEpisodes(DB.Base, MapBase):
__tablename__ = 'hud_homeless_episodes'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
start_date = Column(String(32))
start_date_date_collected = Column(DateTime(timezone=False))
end_date = Column(String(32))
end_date_date_collected = Column(DateTime(timezone=False))
useexisting = True
class Veteran(DB.Base, MapBase):
__tablename__ = 'veteran'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
service_era = Column(Integer)
service_era_date_collected = Column(DateTime(timezone=False))
military_service_duration = Column(Integer)
military_service_duration_date_collected = Column(DateTime(timezone=False))
served_in_war_zone = Column(Integer)
served_in_war_zone_date_collected = Column(DateTime(timezone=False))
war_zone = Column(Integer)
war_zone_date_collected = Column(DateTime(timezone=False))
war_zone_other = Column(String(50))
war_zone_other_date_collected = Column(DateTime(timezone=False))
months_in_war_zone = Column(Integer)
months_in_war_zone_date_collected = Column(DateTime(timezone=False))
received_fire = Column(Integer)
received_fire_date_collected = Column(DateTime(timezone=False))
military_branch = Column(Integer)
military_branch_date_collected = Column(DateTime(timezone=False))
military_branch_other = Column(String(50))
military_branch_other_date_collected = Column(DateTime(timezone=False))
discharge_status = Column(Integer)
discharge_status_date_collected = Column(DateTime(timezone=False))
discharge_status_other = Column(String(50))
discharge_status_other_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class DrugHistory(DB.Base, MapBase):
__tablename__ = 'drug_history'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
drug_history_id = Column(String(32))
drug_history_id_date_collected = Column(DateTime(timezone=False))
drug_code = Column(Integer)
drug_code_date_collected = Column(DateTime(timezone=False))
drug_use_frequency = Column(Integer)
drug_use_frequency_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class EmergencyContact(DB.Base, MapBase):
__tablename__ = 'emergency_contact'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
emergency_contact_id = Column(String(32))
emergency_contact_id_date_collected = Column(DateTime(timezone=False))
emergency_contact_name = Column(String(32))
emergency_contact_name_date_collected = Column(DateTime(timezone=False))
emergency_contact_phone_number_0 = Column(String(32))
emergency_contact_phone_number_date_collected_0 = Column(DateTime(timezone=False))
emergency_contact_phone_number_type_0 = Column(String(32))
emergency_contact_phone_number_1 = Column(String(32))
emergency_contact_phone_number_date_collected_1 = Column(DateTime(timezone=False))
emergency_contact_phone_number_type_1 = Column(String(32))
emergency_contact_address_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_start_date = Column(DateTime(timezone=False))
emergency_contact_address_start_date_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_end_date = Column(DateTime(timezone=False))
emergency_contact_address_end_date_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_line1 = Column(String(32))
emergency_contact_address_line1_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_line2 = Column(String(32))
emergency_contact_address_line2_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_city = Column(String(32))
emergency_contact_address_city_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_state = Column(String(32))
emergency_contact_address_state_date_collected = Column(DateTime(timezone=False))
emergency_contact_relation_to_client = Column(String(32))
emergency_contact_relation_to_client_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class PersonAddress(DB.Base, MapBase):
__tablename__ = 'person_address'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
address_period_start_date = Column(DateTime(timezone=False))
address_period_start_date_date_collected = Column(DateTime(timezone=False))
address_period_end_date = Column(DateTime(timezone=False))
address_period_end_date_date_collected = Column(DateTime(timezone=False))
pre_address_line = Column(String(100))
pre_address_line_date_collected = Column(DateTime(timezone=False))
pre_address_line_date_effective = Column(DateTime(timezone=False))
pre_address_line_data_collection_stage = Column(String(32))
line1 = Column(String(100))
line1_date_collected = Column(DateTime(timezone=False))
line1_date_effective = Column(DateTime(timezone=False))
line1_data_collection_stage = Column(String(32))
line2 = Column(String(100))
line2_date_collected = Column(DateTime(timezone=False))
line2_date_effective = Column(DateTime(timezone=False))
line2_data_collection_stage = Column(String(32))
city = Column(String(100))
city_date_collected = Column(DateTime(timezone=False))
city_date_effective = Column(DateTime(timezone=False))
city_data_collection_stage = Column(String(32))
county = Column(String(32))
county_date_collected = Column(DateTime(timezone=False))
county_date_effective = Column(DateTime(timezone=False))
county_data_collection_stage = Column(String(32))
state = Column(String(32))
state_date_collected = Column(DateTime(timezone=False))
state_date_effective = Column(DateTime(timezone=False))
state_data_collection_stage = Column(String(32))
zipcode = Column(String(10))
zipcode_date_collected = Column(DateTime(timezone=False))
zipcode_date_effective = Column(DateTime(timezone=False))
zipcode_data_collection_stage = Column(String(32))
country = Column(String(32))
country_date_collected = Column(DateTime(timezone=False))
country_date_effective = Column(DateTime(timezone=False))
country_data_collection_stage = Column(String(32))
is_last_permanent_zip = Column(Integer)
is_last_permanent_zip_date_collected = Column(DateTime(timezone=False))
is_last_permanent_zip_date_effective = Column(DateTime(timezone=False))
is_last_permanent_zip_data_collection_stage = Column(String(32))
zip_quality_code = Column(Integer)
zip_quality_code_date_collected = Column(DateTime(timezone=False))
zip_quality_code_date_effective = Column(DateTime(timezone=False))
zip_quality_code_data_collection_stage = Column(String(32))
reported = Column(Boolean)
## HUD 3.0
person_address_delete = Column(String(32))
person_address_delete_occurred_date = Column(DateTime(timezone=False))
person_address_delete_effective_date = Column(DateTime(timezone=False))
useexisting = True
class PersonHistorical(DB.Base, MapBase):
__tablename__ = 'person_historical'
id = Column(Integer, primary_key=True)
call_index_id = Column(Integer, ForeignKey('call.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id')) # JCS
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id')) # JCS
person_historical_id_id_num = Column(String(32))
person_historical_id_id_str = Column(String(32))
person_historical_id_delete_effective_date = Column(DateTime(timezone=False))
person_historical_id_delete = Column(Integer)
person_historical_id_delete_occurred_date = Column(DateTime(timezone=False))
barrier_code = Column(String(32))
barrier_code_date_collected = Column(DateTime(timezone=False))
barrier_other = Column(String(32))
barrier_other_date_collected = Column(DateTime(timezone=False))
child_currently_enrolled_in_school = Column(String(32))
child_currently_enrolled_in_school_date_collected = Column(DateTime(timezone=False))
currently_employed = Column(String(32))
currently_employed_date_collected = Column(DateTime(timezone=False))
currently_in_school = Column(String(32))
currently_in_school_date_collected = Column(DateTime(timezone=False))
degree_code = Column(String(32))
degree_code_date_collected = Column(DateTime(timezone=False))
degree_other = Column(String(32))
degree_other_date_collected = Column(DateTime(timezone=False))
developmental_disability = Column(String(32))
developmental_disability_date_collected = Column(DateTime(timezone=False))
domestic_violence = Column(String(32))
domestic_violence_date_collected = Column(DateTime(timezone=False))
domestic_violence_how_long = Column(String(32))
domestic_violence_how_long_date_collected = Column(DateTime(timezone=False))
due_date = Column(String(32))
due_date_date_collected = Column(DateTime(timezone=False))
employment_tenure = Column(String(32))
employment_tenure_date_collected = Column(DateTime(timezone=False))
health_status = Column(String(32))
health_status_date_collected = Column(DateTime(timezone=False))
highest_school_level = Column(String(32))
highest_school_level_date_collected = Column(DateTime(timezone=False))
hivaids_status = Column(String(32))
hivaids_status_date_collected = Column(DateTime(timezone=False))
hours_worked_last_week = Column(String(32))
hours_worked_last_week_date_collected = Column(DateTime(timezone=False))
hud_chronic_homeless = Column(String(32))
hud_chronic_homeless_date_collected = Column(DateTime(timezone=False))
hud_homeless = Column(String(32))
hud_homeless_date_collected = Column(DateTime(timezone=False))
site_service_id = Column(Integer)
###HUDHomelessEpisodes (subtable)
###IncomeAndSources (subtable)
length_of_stay_at_prior_residence = Column(String(32))
length_of_stay_at_prior_residence_date_collected = Column(DateTime(timezone=False))
looking_for_work = Column(String(32))
looking_for_work_date_collected = Column(DateTime(timezone=False))
mental_health_indefinite = Column(String(32))
mental_health_indefinite_date_collected = Column(DateTime(timezone=False))
mental_health_problem = Column(String(32))
mental_health_problem_date_collected = Column(DateTime(timezone=False))
non_cash_source_code = Column(String(32))
non_cash_source_code_date_collected = Column(DateTime(timezone=False))
non_cash_source_other = Column(String(32))
non_cash_source_other_date_collected = Column(DateTime(timezone=False))
###PersonAddress (subtable)
person_email = Column(String(32))
person_email_date_collected = Column(DateTime(timezone=False))
person_phone_number = Column(String(32))
person_phone_number_date_collected = Column(DateTime(timezone=False))
physical_disability = Column(String(32))
physical_disability_date_collected = Column(DateTime(timezone=False))
pregnancy_status = Column(String(32))
pregnancy_status_date_collected = Column(DateTime(timezone=False))
prior_residence = Column(String(32))
prior_residence_date_collected = Column(DateTime(timezone=False))
prior_residence_other = Column(String(32))
prior_residence_other_date_collected = Column(DateTime(timezone=False))
reason_for_leaving = Column(String(32))
reason_for_leaving_date_collected = Column(DateTime(timezone=False))
reason_for_leaving_other = Column(String(32))
reason_for_leaving_other_date_collected = Column(DateTime(timezone=False))
school_last_enrolled_date = Column(String(32))
school_last_enrolled_date_date_collected = Column(DateTime(timezone=False))
school_name = Column(String(32))
school_name_date_collected = Column(DateTime(timezone=False))
school_type = Column(String(32))
school_type_date_collected = Column(DateTime(timezone=False))
subsidy_other = Column(String(32))
subsidy_other_date_collected = Column(DateTime(timezone=False))
subsidy_type = Column(String(32))
subsidy_type_date_collected = Column(DateTime(timezone=False))
substance_abuse_indefinite = Column(String(32))
substance_abuse_indefinite_date_collected = Column(DateTime(timezone=False))
substance_abuse_problem = Column(String(32))
substance_abuse_problem_date_collected = Column(DateTime(timezone=False))
total_income = Column(String(32))
total_income_date_collected = Column(DateTime(timezone=False))
###Veteran (subtable)
vocational_training = Column(String(32))
vocational_training_date_collected = Column(DateTime(timezone=False))
annual_personal_income = Column(Integer)
annual_personal_income_date_collected = Column(DateTime(timezone=False))
employment_status = Column(Integer)
employment_status_date_collected = Column(DateTime(timezone=False))
family_size = Column(Integer)
family_size_date_collected = Column(DateTime(timezone=False))
hearing_impaired = Column(Integer)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide constants and composite types for binding with ctypes.
"""
from __future__ import absolute_import
import ctypes
import ctypes.util
import os
from .utils import coerce_char_p, python_2_unicode_compatible, read_ejdb_config
JDBIDKEYNAME = '_id'
# enum { /** Database open modes */
# JBOREADER = 1u << 0, /**< Open as a reader. */
# JBOWRITER = 1u << 1, /**< Open as a writer. */
# JBOCREAT = 1u << 2, /**< Create if db file not exists. */
# JBOTRUNC = 1u << 3, /**< Truncate db on open. */
# JBONOLCK = 1u << 4, /**< Open without locking. */
# JBOLCKNB = 1u << 5, /**< Lock without blocking. */
# JBOTSYNC = 1u << 6 /**< Synchronize every transaction. */
# };
JBOREADER = 1 << 0
JBOWRITER = 1 << 1
JBOCREAT = 1 << 2
JBOTRUNC = 1 << 3
JBONOLCK = 1 << 4
JBOLCKNB = 1 << 5
JBOTSYNC = 1 << 5
# enum { /** Index modes, index types. */
# JBIDXDROP = 1 << 0, /**< Drop index. */
# JBIDXDROPALL = 1 << 1, /**< Drop index for all types. */
# JBIDXOP = 1 << 2, /**< Optimize indexes. */
# JBIDXREBLD = 1 << 3, /**< Rebuild index. */
# JBIDXNUM = 1 << 4, /**< Number index. */
# JBIDXSTR = 1 << 5, /**< String index.*/
# JBIDXARR = 1 << 6, /**< Array token index. */
# JBIDXISTR = 1 << 7 /**< Case insensitive string index */
# };
JBIDXDROP = 1 << 0
JBIDXDROPALL = 1 << 1
JBIDXOP = 1 << 2
JBIDXREBLD = 1 << 3
JBIDXNUM = 1 << 4
JBIDXSTR = 1 << 5
JBIDXARR = 1 << 6
JBIDXISTR = 1 << 7
# enum { /*< Query search mode flags in ejdbqryexecute() */
# JBQRYCOUNT = 1, /*< Query only count(*) */
# JBQRYFINDONE = 1 << 1 /*< Fetch first record only */
# };
JBQRYCOUNT = 1
JBQRYFINDONE = 1 << 1
# #define BSON_OK 0
# #define BSON_ERROR -1
BSON_OK = 0
BSON_ERROR = -1
# typedef enum {
# BSON_EOO = 0,
# BSON_DOUBLE = 1,
# BSON_STRING = 2,
# BSON_OBJECT = 3,
# BSON_ARRAY = 4,
# BSON_BINDATA = 5,
# BSON_UNDEFINED = 6,
# BSON_OID = 7,
# BSON_BOOL = 8,
# BSON_DATE = 9,
# BSON_NULL = 10,
# BSON_REGEX = 11,
# BSON_DBREF = 12, /**< Deprecated. */
# BSON_CODE = 13,
# BSON_SYMBOL = 14,
# BSON_CODEWSCOPE = 15,
# BSON_INT = 16,
# BSON_TIMESTAMP = 17,
# BSON_LONG = 18
# } bson_type;
BSON_EOO = 0 # End of object.
BSON_DOUBLE = 1
BSON_STRING = 2
BSON_OBJECT = 3
BSON_ARRAY = 4
BSON_BINDATA = 5
BSON_UNDEFINED = 6
BSON_OID = 7
BSON_BOOL = 8
BSON_DATE = 9
BSON_NULL = 10
BSON_REGEX = 11
BSON_DBREF = 12
BSON_CODE = 13
BSON_SYMBOL = 14
BSON_CODEWSCOPE = 15
BSON_INT = 16
BSON_TIMESTAMP = 17
BSON_LONG = 18
# enum bson_binary_subtype_t {
# BSON_BIN_BINARY = 0,
# BSON_BIN_FUNC = 1,
# BSON_BIN_BINARY_OLD = 2,
# BSON_BIN_UUID = 3,
# BSON_BIN_MD5 = 5,
# BSON_BIN_USER = 128
# };
BSON_BIN_BINARY = 0
BSON_BIN_FUNC = 1
BSON_BIN_BINARY_OLD = 2
BSON_BIN_UUID = 3
BSON_BIN_MD5 = 5
BSON_BIN_USER = 128
# We treat these constructs as opaque pointers.
BSONREF = ctypes.c_void_p
BSONITERREF = ctypes.c_void_p
EJDBREF = ctypes.c_void_p
EJQREF = ctypes.c_void_p
TCLISTREF = ctypes.c_void_p
TCXSTRREF = ctypes.c_void_p
EJQRESULT = TCLISTREF
# struct EJCOLL { /**> EJDB Collection. */
# char *cname; /**> Collection name. */
# int cnamesz; /**> Collection name length. */
# TCTDB *tdb; /**> Collection TCTDB. */
# EJDB *jb; /**> Database handle. */
# void *mmtx; /*> Mutex for method */
# };
# TODO: This is private API. Find a way to retrieve cname without this.
class EJCOLL(ctypes.Structure):
_fields_ = [
('cname', ctypes.c_char_p),
('cnamesz', ctypes.c_int),
('tdb', ctypes.c_void_p),
('jb', EJDBREF),
('mmtx', ctypes.c_void_p),
]
EJCOLLREF = ctypes.POINTER(EJCOLL)
# typedef struct { /**< EJDB collection tuning options. */
# bool large;
# /**< Large collection. It can be larger than 2GB. Default false */
# bool compressed;
# /**< Collection records will be compressed with DEFLATE compression.
# Default: false */
# int64_t records;
# /**< Expected records number in the collection. Default: 128K */
# int cachedrecords;
# /**< Maximum number of records cached in memory. Default: 0 */
# } EJCOLLOPTS;
class EJCOLLOPTS(ctypes.Structure):
_fields_ = [
('large', ctypes.c_bool),
('compressed', ctypes.c_bool),
('records', ctypes.c_int64),
('cachedrecords', ctypes.c_int),
]
EJCOLLOPTSREF = ctypes.POINTER(EJCOLLOPTS)
# #pragma pack(1)
# typedef union {
# char bytes[12];
# int ints[3];
# } bson_oid_t;
# #pragma pack()
@python_2_unicode_compatible
class BSONOID(ctypes.Union):
_pack_ = 1
_fields_ = [
('bytes', ctypes.c_char * 12),
('ints', ctypes.c_int * 3),
]
def __str__(self):
buf = ctypes.create_string_buffer(25)
bson.oid_to_string(ctypes.byref(self), buf)
s = buf.value.decode('ascii') # ASCII is enough since OID is hex.
return s
@classmethod
def from_string(cls, s):
s = coerce_char_p(s)
if not ejdb.isvalidoidstr(s):
raise ValueError('OID should be a 24-character-long hex string.')
oid = cls()
bson.oid_from_string(ctypes.byref(oid), s)
return oid
BSONOIDREF = ctypes.POINTER(BSONOID)
class Lib(object):
pass
# Will contain C functions after initialization.
ejdb = Lib()
bson = Lib()
tc = Lib()
initialized = False
def init(ejdb_path=None):
ejdb_path = (
ejdb_path
or os.environ.get('CTYPES_EJDB_PATH')
or read_ejdb_config()
or ctypes.util.find_library('ejdb')
)
if not ejdb_path: # pragma: no cover
raise RuntimeError('EJDB binary not found')
# Resolve lib path to package root.
ejdb_path = os.path.join(get_package_root(), ejdb_path)
# Access to the C library.
_ = ctypes.cdll.LoadLibrary(ejdb_path)
ejdb.version = _.ejdbversion
ejdb.version.argtypes = []
ejdb.version.restype = ctypes.c_char_p
# TODO: Expose `ejdbformatversion` as a tuple (int, int, int) when it's
# available.
ejdb.isvalidoidstr = _.ejdbisvalidoidstr
ejdb.isvalidoidstr.argtypes = [ctypes.c_char_p]
ejdb.isvalidoidstr.restype = ctypes.c_bool
ejdb.ecode = _.ejdbecode
ejdb.ecode.argtypes = [EJDBREF]
ejdb.ecode.restype = ctypes.c_int
ejdb.errmsg = _.ejdberrmsg
ejdb.errmsg.argtypes = [ctypes.c_int]
ejdb.errmsg.restype = ctypes.c_char_p
ejdb.del_ = _.ejdbdel
ejdb.del_.argtypes = [EJDBREF]
ejdb.del_.restype = None
ejdb.new = _.ejdbnew
ejdb.new.argtypes = []
ejdb.new.restype = EJDBREF
ejdb.close = _.ejdbclose
ejdb.close.argtypes = [EJDBREF]
ejdb.close.restype = ctypes.c_bool
ejdb.open = _.ejdbopen
ejdb.open.argtypes = [EJDBREF, ctypes.c_char_p, ctypes.c_int]
ejdb.open.restype = ctypes.c_bool
ejdb.isopen = _.ejdbisopen
ejdb.isopen.argtypes = [EJDBREF]
ejdb.isopen.restype = ctypes.c_bool
#
ejdb.getcoll = _.ejdbgetcoll
ejdb.getcoll.argtypes = [EJDBREF, ctypes.c_char_p]
ejdb.getcoll.restype = EJCOLLREF
ejdb.getcolls = _.ejdbgetcolls
ejdb.getcolls.argtypes = [EJDBREF]
ejdb.getcolls.restype = ctypes.c_void_p
ejdb.createcoll = _.ejdbcreatecoll
ejdb.createcoll.argtypes = [EJDBREF, ctypes.c_char_p, EJCOLLOPTSREF]
ejdb.createcoll.restype = EJCOLLREF
ejdb.rmcoll = _.ejdbrmcoll
ejdb.rmcoll.argtypes = [EJDBREF, ctypes.c_char_p, ctypes.c_bool]
ejdb.rmcoll.restype = ctypes.c_bool
ejdb.savebson2 = _.ejdbsavebson2
ejdb.savebson2.argtypes = [EJCOLLREF, BSONREF, BSONOIDREF, ctypes.c_bool]
ejdb.savebson2.restype = ctypes.c_bool
ejdb.rmbson = _.ejdbrmbson
ejdb.rmbson.argtypes = [EJCOLLREF, BSONOIDREF]
ejdb.rmbson.restype = ctypes.c_bool
ejdb.loadbson = _.ejdbloadbson
ejdb.loadbson.argtypes = [EJCOLLREF, BSONOIDREF]
ejdb.loadbson.restype = BSONREF
ejdb.setindex = _.ejdbsetindex
ejdb.setindex.argtypes = [EJCOLLREF, ctypes.c_char_p, ctypes.c_int]
ejdb.setindex.restype = ctypes.c_bool
ejdb.meta = _.ejdbmeta
ejdb.meta.argtypes = [EJDBREF]
ejdb.meta.restype = BSONREF
#
ejdb.createquery = _.ejdbcreatequery
ejdb.createquery.argtypes = [
EJDBREF, BSONREF, BSONREF, ctypes.c_int, BSONREF,
]
ejdb.createquery.restype = EJQREF
ejdb.querydel = _.ejdbquerydel
ejdb.querydel.argtypes = [EJQREF]
ejdb.querydel.restype = None
ejdb.qryexecute = _.ejdbqryexecute
ejdb.qryexecute.argtypes = [
EJCOLLREF,
EJQREF, # The query.
ctypes.POINTER(ctypes.c_uint32), # Will hold the output count.
ctypes.c_int, # If set to `JBQRYCOUNT`, only performs counting.
TCXSTRREF, # Optional debug logging output.
]
ejdb.qryexecute.restype = EJQRESULT
#
ejdb.tranbegin = _.ejdbtranbegin
ejdb.tranbegin.argtypes = [EJCOLLREF]
ejdb.tranbegin.restype = ctypes.c_bool
ejdb.trancommit = _.ejdbtrancommit
ejdb.trancommit.argtypes = [EJCOLLREF]
ejdb.trancommit.restype = ctypes.c_bool
ejdb.tranabort = _.ejdbtranabort
ejdb.tranabort.argtypes = [EJCOLLREF]
ejdb.tranabort.restype = ctypes.c_bool
ejdb.transtatus = _.ejdbtranstatus
ejdb.transtatus.argtypes = [EJCOLLREF, ctypes.POINTER(ctypes.c_bool)]
ejdb.transtatus.restype = ctypes.c_bool
ejdb.syncdb = _.ejdbsyncdb
ejdb.syncdb.argtypes = [EJDBREF]
ejdb.syncdb.restype = ctypes.c_bool
tc.listdel = _.tclistdel
tc.listdel.argtypes = [TCLISTREF]
tc.listdel.restype = None
tc.listnum = _.tclistnum
tc.listnum.argtypes = [TCLISTREF]
tc.listnum.restype = ctypes.c_int
# Return type in the original tcutil.h declaration is char *, but it really
# is a data array, so we use c_void_p here to prevent Python casting it to
# bytes. Consumer of this method should use ctypes.string_at or ctypes.cast
# to get the content.
tc.listval2 = _.tclistval2
tc.listval2.argtypes = [TCLISTREF, ctypes.c_int]
tc.listval2.restype = ctypes.c_void_p
bson.create = _.bson_create
bson.create.argtypes = []
bson.create.restype = BSONREF
bson.del_ = _.bson_del
bson.del_.argtypes = [BSONREF]
bson.del_.restype = None
bson.init = _.bson_init
bson.init.argtypes = [BSONREF]
bson.init.restype = None
bson.init_as_query = _.bson_init_as_query
bson.init_as_query.argtypes = [BSONREF]
bson.init_as_query.restype = None
# Second arg type in the original bson.h declaration is char *, but it
# really is a data pointer, so we use c_void_p here.
bson.init_on_stack = _.bson_init_on_stack
bson.init_on_stack.argtypes = [
BSONREF, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
]
bson.init_on_stack.restype = None
bson.finish = _.bson_finish
bson.finish.argtypes = [BSONREF]
bson.finish.restype = ctypes.c_int
bson.append_oid = _.bson_append_oid
bson.append_oid.argtypes = [BSONREF, ctypes.c_char_p, BSONOIDREF]
bson.append_oid.restype = ctypes.c_int
bson.append_int = _.bson_append_int
bson.append_int.argtypes = [BSONREF, ctypes.c_char_p, ctypes.c_int]
bson.append_int.restype = ctypes.c_int
bson.append_long = _.bson_append_long
bson.append_long.argtypes = [BSONREF, ctypes.c_char_p, ctypes.c_int64]
bson.append_long.restype = ctypes.c_int
bson.append_double = _.bson_append_double
bson.append_double.argtypes = [BSONREF, ctypes.c_char_p, ctypes.c_double]
bson.append_double.restype = ctypes.c_int
bson.append_string_n = _.bson_append_string_n
bson.append_string_n.argtypes = [
BSONREF, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int,
]
bson.append_string_n.restype = ctypes.c_int
# Type of the third argument in the original bson.h declaration is char,
# | |
"""
result = RectangleList()
islands = list(self.islands(useexitstatus=True))
onepixel = self.onepixel
gxdict = collections.defaultdict(dict)
gydict = collections.defaultdict(dict)
primaryregionsx = {}
primaryregionsy = {}
shape = {tuple(r.shape) for r in self.rectangles}
if len(shape) > 1:
raise ValueError("Some rectangles have different shapes")
shape = shape.pop()
for gc, island in enumerate(islands, start=1):
rectangles = [self.rectangles[self.rectangledict[n]] for n in island]
for i, (primaryregions, gdict) in enumerate(zip((primaryregionsx, primaryregionsy), (gxdict, gydict))):
#find the gx and gy that correspond to cx and cy
average = []
cs = sorted({r.cxvec[i] for r in rectangles})
for g, c in enumerate(cs, start=1):
gdict[gc][c] = g
theserectangles = [r for r in rectangles if r.cxvec[i] == c]
average.append(np.mean(units.nominal_values([self.x(r)[i] for r in theserectangles])))
#find mx1, my1, mx2, my2
#the middle ones come from the average positions of the HPFs on either side
primaryregions[gc] = [(x1+shape[i] + x2)/2 for x1, x2 in more_itertools.pairwise(average)]
if len(primaryregions[gc]) >= 2:
#the outer ones come from fitting a line to the middle ones
m, b = units.np.polyfit(
x=range(1, len(average)),
y=primaryregions[gc],
deg=1,
)
primaryregions[gc].insert(0, m*0+b)
primaryregions[gc].append(m*len(average)+b)
else:
#can't fit a line because there are only at most 2 rows/columns, so do an approximation
allcs = sorted({r.cxvec[i] for r in self.rectangles})
mindiff = min(np.diff(allcs))
divideby = 1
while mindiff / divideby > shape[i]:
divideby += 1
mindiff /= divideby
if len(primaryregions[gc]) == 1:
primaryregions[gc].insert(0, primaryregions[gc][0] - mindiff)
primaryregions[gc].append(primaryregions[gc][1] + mindiff)
else: #len(primaryregions[gc]) == 0
primaryregions[gc].append(average[0] + (shape[i] - mindiff) / 2)
primaryregions[gc].append(average[0] + (shape[i] + mindiff) / 2)
mx1 = {}
mx2 = {}
my1 = {}
my2 = {}
#set gx, gy, mx1, my1, mx2, my2 for the HPFs
for i, island in enumerate(islands, start=1):
for rid in island:
r = self.rectangles[self.rectangledict[rid]]
gx = gxdict[i][r.cx]
gy = gydict[i][r.cy]
mx1[rid] = primaryregionsx[i][gx-1]
mx2[rid] = primaryregionsx[i][gx]
my1[rid] = primaryregionsy[i][gy-1]
my2[rid] = primaryregionsy[i][gy]
#see if the primary regions of any HPFs in different islands overlap
for (i1, island1), (i2, island2) in itertools.combinations(enumerate(islands, start=1), r=2):
if len(island1) == 1 or len(island2) == 1: continue #orphans are excluded
#first see if the islands overlap
x11 = min(primaryregionsx[i1])
x21 = max(primaryregionsx[i1])
x12 = min(primaryregionsx[i2])
x22 = max(primaryregionsx[i2])
y11 = min(primaryregionsy[i1])
y21 = max(primaryregionsy[i1])
y12 = min(primaryregionsy[i2])
y22 = max(primaryregionsy[i2])
#if a box around the islands overlaps in both x and y
if (
max(x21, x22) - min(x11, x12) + 1e-5*x11 < (x21 - x11) + (x22 - x12)
and max(y21, y22) - min(y11, y12) + 1e-5*x11 < (y21 - y11) + (y22 - y12)
):
self.__logger.info(f"Primary regions for islands {i1} and {i2} overlap in both x and y, seeing if any field primary regions overlap")
xoverlapstoadjust = collections.defaultdict(list)
yoverlapstoadjust = collections.defaultdict(list)
cornerstoadjust = collections.defaultdict(list)
for rid1, rid2 in itertools.product(island1, island2):
xx11 = mx1[rid1]
xx21 = mx2[rid1]
xx12 = mx1[rid2]
xx22 = mx2[rid2]
yy11 = my1[rid1]
yy21 = my2[rid1]
yy12 = my1[rid2]
yy22 = my2[rid2]
if (
max(xx21, xx22) - min(xx11, xx12) + 1e-5*x11 < (xx21 - xx11) + (xx22 - xx12)
and max(yy21, yy22) - min(yy11, yy12) + 1e-5*x11 < (yy21 - yy11) + (yy22 - yy12)
):
self.__logger.warningglobal(f"Primary regions for fields {rid1} and {rid2} overlap, adjusting them")
threshold = 100*onepixel
xs = ys = None
ridax = ridbx = riday = ridby = None
if abs(xx21 - xx12) <= threshold:
xs = xx12, xx21
ridax, ridbx = rid2, rid1
elif abs(xx11 - xx22) <= threshold:
xs = xx11, xx22
ridax, ridbx = rid1, rid2
if abs(yy21 - yy12) <= threshold:
ys = yy12, yy21
riday, ridby = rid2, rid1
elif abs(yy11 - yy22) <= threshold:
ys = yy11, yy22
riday, ridby = rid1, rid2
if xs is ys is None:
raise ValueError(f"Primary regions for fields {rid1} and {rid2} have too big of an overlap")
if xs is not None and ys is not None:
cornerstoadjust[xs, ys].append((ridax, ridbx, riday, ridby))
elif xs is not None:
xoverlapstoadjust[xs].append((ridax, ridbx))
elif ys is not None:
yoverlapstoadjust[ys].append((riday, ridby))
cornerxscounter = collections.Counter(xs for xs, ys in cornerstoadjust)
corneryscounter = collections.Counter(ys for xs, ys in cornerstoadjust)
xswith2corners = [xs for xs, count in cornerxscounter.items() if count >= 2]
yswith2corners = [ys for ys, count in corneryscounter.items() if count >= 2]
for (xs, ys), rids in cornerstoadjust.items():
if xs in xswith2corners:
xoverlapstoadjust[xs] += [(ridax, ridbx) for ridax, ridbx, riday, ridby in rids]
if ys in yswith2corners:
yoverlapstoadjust[ys] += [(riday, ridby) for ridax, ridbx, riday, ridby in rids]
for (xs, ys), rids in cornerstoadjust.items():
if xs in xoverlapstoadjust:
xoverlapstoadjust[xs] += [(ridax, ridbx) for ridax, ridbx, riday, ridby in rids]
if ys in yoverlapstoadjust:
yoverlapstoadjust[ys] += [(riday, ridby) for ridax, ridbx, riday, ridby in rids]
for ((oldmx1, oldmx2), (oldmy1, oldmy2)), rids in cornerstoadjust.items():
if xs in xoverlapstoadjust or ys in yoverlapstoadjust:
pass
elif oldmx1 - oldmx2 < oldmy1 - oldmy2:
xoverlapstoadjust[oldmx1, oldmx2] += rids
else:
yoverlapstoadjust[oldmy1, oldmy2] += rids
for (oldmx1, oldmx2), rids in xoverlapstoadjust.items():
for rid1, rid2 in rids:
newmx = (oldmx1 + oldmx2)/2
assert (mx1[rid1] == oldmx1 or mx1[rid1] == newmx) and (mx2[rid2] == oldmx2 or mx2[rid2] == newmx), (mx1[rid1], oldmx1, mx2[rid2], oldmx2, newmx)
mx1[rid1] = mx2[rid2] = newmx
for (oldmy1, oldmy2), rids in yoverlapstoadjust.items():
for rid1, rid2 in rids:
newmy = (oldmy1 + oldmy2)/2
assert (my1[rid1] == oldmy1 or my1[rid1] == newmy) and (my2[rid2] == oldmy2 or my2[rid2] == newmy), (my1[rid1], oldmy1, my2[rid2], oldmy2, newmy)
my1[rid1] = my2[rid2] = newmy
for rid1, rid2 in itertools.product(island1, island2):
xx11 = mx1[rid1]
xx21 = mx2[rid1]
xx12 = mx1[rid2]
xx22 = mx2[rid2]
yy11 = my1[rid1]
yy21 = my2[rid1]
yy12 = my1[rid2]
yy22 = my2[rid2]
if (
max(xx21, xx22) - min(xx11, xx12) + 1e-5*x11 < (xx21 - xx11) + (xx22 - xx12)
and max(yy21, yy22) - min(yy11, yy12) + 1e-5*x11 < (yy21 - yy11) + (yy22 - yy12)
):
raise ValueError(f"Primary regions for fields {rid1} and {rid2} still overlap")
#if there are any HPFs that are in the wrong quadrant (negative px or py), adjust the whole slide
minpxvec = [np.inf * onepixel, np.inf * onepixel]
for rectangle in self.rectangles:
for gc, island in enumerate(islands, start=1):
if rectangle.n in island:
break
else:
assert False
gx = gxdict[gc][rectangle.cx]
gy = gydict[gc][rectangle.cy]
pxvec = self.x(rectangle) - self.origin
minpxvec = np.min([minpxvec, units.nominal_values(pxvec)], axis=0)
result.append(
Field(
rectangle=rectangle,
ixvec=floattoint(np.round((rectangle.xvec / onepixel).astype(float))) * onepixel,
gc=0 if len(island) == 1 else gc,
pxvec=pxvec,
gxvec=(gx, gy),
primaryregionx=np.array([mx1[rectangle.n], mx2[rectangle.n]]) - self.origin[0],
primaryregiony=np.array([my1[rectangle.n], my2[rectangle.n]]) - self.origin[1],
readingfromfile=False,
)
)
minx, miny = np.floor(minpxvec/(100*onepixel))*100*onepixel
if minx > 0: minx = 0
if miny > 0: miny = 0
if minx or miny:
self.__logger.warningglobal(f"Some HPFs have (x, y) < (xposition, yposition), shifting the whole slide by {-minx, -miny}")
for f in result:
f.pxvec -= (minx, miny)
f.primaryregionx -= minx
f.primaryregiony -= miny
return result
@property
def fields(self):
return self.__fields()
def writetable(self, *filenames, rtol=1e-3, atol=1e-5, check=False, **kwargs):
"""
Write the affine, fields, and fieldoverlaps csvs
check: cross check that reading the csvs back gives the same result
this is nontrivial because we lose part of the covariance matrix
(cross talk between non-adjacent HPFs and between HPFs and the affine
matrix) in this procedure
rtol, atol: tolerance for the cross check
"""
affinefilename, fieldsfilename, fieldoverlapfilename = filenames
fields = self.fields
writetable(fieldsfilename, fields, rowclass=Field, **kwargs)
affine = []
n = 0
for rowcoordinate, row in zip("xy", self.T):
for columncoordinate, entry in zip("xy", row):
n += 1
affine.append(
AffineNominalEntry(
n=n,
matrixentry=entry,
description="a"+rowcoordinate+columncoordinate
)
)
for entry1, entry2 in itertools.combinations_with_replacement(affine[:], 2):
n += 1
affine.append(AffineCovarianceEntry(n=n, entry1=entry1, entry2=entry2))
writetable(affinefilename, affine, rowclass=AffineEntry, **kwargs)
writetable(fieldoverlapfilename, self.fieldoverlaps, **kwargs)
if check:
self.__logger.debug("reading back from the file")
readback = ReadStitchResult(*filenames, rectangles=self.rectangles, overlaps=self.overlaps, origin=self.origin, logger=self.__logger)
self.__logger.debug("done reading")
x1 = self.x()
T1 = self.T
x2 = readback.x()
T2 = readback.T
self.__logger.debug("comparing nominals")
units.np.testing.assert_allclose(units.nominal_values(x1), units.nominal_values(x2), atol=atol, rtol=rtol)
units.np.testing.assert_allclose(units.nominal_values(T1), units.nominal_values(T2), atol=atol, rtol=rtol)
self.__logger.debug("comparing individual errors")
units.np.testing.assert_allclose(units.std_devs(x1), units.std_devs(x2), atol=atol, rtol=rtol)
units.np.testing.assert_allclose(units.std_devs(T1), units.std_devs(T2), atol=atol, rtol=rtol)
self.__logger.debug("comparing overlap errors")
for o in self.overlaps:
units.np.testing.assert_allclose(units.covariance_matrix(self.dx(o)), units.covariance_matrix(readback.dx(o)), atol=atol, rtol=rtol)
self.__logger.debug("done")
class StitchResultFullCovariance(StitchResultBase):
"""
Base class for a stitch result that has the full covariance matrix
"""
def __init__(self, *, | |
#! /usr/bin/env python3
import collections
import datetime
import pprint
import re
import urllib.parse
import uuid
import voluptuous as v
import pymongo
import bson
s_old_story_base = v.Schema({
"_id": bson.objectid.ObjectId,
"id": str,
"url": v.Url(),
"title": str,
"scraped": v.Any(datetime.datetime, None),
"metadata": dict,
"created_at": datetime.datetime,
"publication_date": v.Any(datetime.datetime, None),
}, required=True)
s_old_story = v.Any(
s_old_story_base.extend({
"metadata": {},
"closedAt": v.Any(datetime.datetime, None),
"closedMessage": None,
"settings": dict,
"tags": [],
"type": "assets",
"updated_at": datetime.datetime,
"author": str,
"description": str,
"image": v.Url(),
"modified_date": None,
"section": str,
}, required=True),
s_old_story_base.extend({
"metadata": v.Schema({"source": "wpimport"}, required=True),
}),
)
s_old_user_import = v.Schema({
"_id": bson.objectid.ObjectId,
"id": str,
"username": str,
"lowercaseUsername": str,
"profiles": [v.Schema({
"provider": "disqus",
"id": str,
}, required=True)],
"metadata": v.Schema({"source": "wpimport", v.Optional("trust"): dict}, required=True),
"created_at": datetime.datetime,
v.Optional("updated_at"): datetime.datetime,
v.Optional("action_counts"): dict, # can skip importing this I think
}, required=True)
s_old_user_organic = v.Schema({
"_id": bson.objectid.ObjectId,
"status": v.Schema({
"username": v.Schema({
"status": v.Any("SET", "UNSET", "APPROVED"),
"history": [v.Schema({
"assigned_by": v.Any(None, str),
"_id": bson.objectid.ObjectId,
"status": v.Any("SET", "UNSET", "REJECTED", "CHANGED", "APPROVED"),
"created_at": datetime.datetime,
}, required=True)],
}, required=True),
"banned": v.Schema({
"status": bool,
"history": [v.Schema({
"assigned_by": str,
"message": str,
"_id": bson.objectid.ObjectId,
"status": bool,
"created_at": datetime.datetime,
}, required=True)],
}, required=True),
"suspension": v.Schema({
"until": None,
"history": [],
}, required=True),
"alwaysPremod": v.Schema({
"status": bool,
"history": [v.Schema({
"assigned_by": str,
"_id": bson.objectid.ObjectId,
"status": bool,
"created_at": datetime.datetime,
}, required=True)],
}, required=True),
}, required=True),
"role": v.Any("ADMIN", "STAFF", "COMMENTER", "MODERATOR"),
"ignoresUsers": [str],
"username": str,
"lowercaseUsername": str,
v.Optional("password"): str,
"profiles": [v.Any(
v.Schema({
"id": str,
"provider": v.Any("google", "facebook"),
}, required=True),
v.Schema({
"id": str,
"provider": "local",
v.Optional("metadata"): v.Schema({
"confirmed_at": datetime.datetime,
"recaptcha_required": bool,
}, required=False),
}, required=True),
)],
"id": str,
"tokens": [],
"tags": [],
"created_at": datetime.datetime,
"updated_at": datetime.datetime,
"__v": int, # version
v.Optional("metadata"): v.Schema({
"avatar": v.Any(v.Url(), '', re.compile(r"data:").match),
"lastAccountDownload": datetime.datetime,
"notifications": {
"settings": {
"onReply": bool,
"onFeatured": bool,
"digestFrequency": v.Any("HOURLY", "DAILY", "NONE"),
},
"digests": [dict], # only a couple of users have this set - I think it's for buffering notifs, so can ignore
},
"trust": {
"comment": {"karma": int},
"flag": {"karma": int},
},
"scheduledDeletionDate": datetime.datetime, # best to handle this at import time I think
}, required=False),
v.Optional("action_counts"): dict, # can skip importing this I think
}, required=True)
s_old_user = v.Any(s_old_user_organic, s_old_user_import)
s_old_comment_base = v.Schema({
"_id": bson.objectid.ObjectId,
"status": v.Any("ACCEPTED", "REJECTED", "NONE"),
v.Optional("status_history"): [v.Schema({
"assigned_by": v.Any(None, str),
"type": v.Any("ACCEPTED", "NONE", "REJECTED", "SYSTEM_WITHHELD"),
"created_at": datetime.datetime,
}, required=True)],
"id": str,
"author_id": str,
"parent_id": v.Any(None, str),
"created_at": datetime.datetime,
"updated_at": datetime.datetime,
"asset_id": str,
"body": str,
"reply_count": int,
v.Optional("action_counts"): v.Schema({
"respect": int,
"flag": int,
"flag_comment_other": int,
"flag_comment_offensive": int,
"flag_spam_comment": int,
"dontagree": int,
"flag_trust": int,
"flag_comment_spam": int,
}, required=False),
}, required=True)
s_old_comment_import = s_old_comment_base.extend({
"metadata": v.Schema({
"richTextBody": str,
"source": "wpimport",
}, required=True),
}, required=True)
s_old_comment_organic = s_old_comment_base.extend({
"body_history": [v.Schema({
"_id": bson.objectid.ObjectId,
"body": str,
"created_at": datetime.datetime,
}, required=True)],
"tags": [v.Schema({
"assigned_by": str,
"tag": {
"permissions": {
"public": True, "roles": [v.Any("ADMIN", "MODERATOR")], "self": bool,
},
"models": ["COMMENTS"],
"name": v.Any("STAFF", "OFF_TOPIC", "FEATURED"),
"created_at": datetime.datetime,
},
"created_at": datetime.datetime,
}, required=True)],
"metadata": v.Schema({
"richTextBody": str,
v.Optional("akismet"): bool,
}, required=True),
"__v": int,
}, required=True)
s_old_comment_deleted = v.Schema({
"_id": bson.objectid.ObjectId,
"id": str,
"body": None,
"body_history": [],
"asset_id": str,
"author_id": None,
"status_history": [],
"status": "ACCEPTED",
"parent_id": v.Any(None, str),
"reply_count": int,
"action_counts": {},
"tags": [],
"metadata": {},
"deleted_at": datetime.datetime,
"created_at": datetime.datetime,
"updated_at": datetime.datetime,
}, required=True)
s_old_comment = v.Any(s_old_comment_organic, s_old_comment_import, s_old_comment_deleted)
s_old_action_ignore = v.Schema({
"action_type": v.Any("FLAG", "DONTAGREE"),
}, extra=v.ALLOW_EXTRA)
s_old_action_respect = v.Schema({
"_id": bson.objectid.ObjectId,
"action_type": "RESPECT",
"group_id": None,
"item_id": str,
"item_type": "COMMENTS",
"user_id": str,
"__v": int,
"created_at": datetime.datetime,
"id": str,
"metadata": {},
"updated_at": datetime.datetime,
}, required=True)
s_old_action = v.Any(s_old_action_ignore, s_old_action_respect)
c = pymongo.MongoClient()
olddb = c.talk
newdb = c.coral
tenantID = newdb.tenants.find_one()["id"]
site = newdb.sites.find_one()
siteID = site["id"]
site["commentCounts"]["action"]["REACTION"] = 0
for k in site["commentCounts"]["status"]:
site["commentCounts"]["status"][k] = 0
site["commentCounts"]["moderationQueue"]["total"] = 0
for k in site["commentCounts"]["moderationQueue"]["queues"]:
site["commentCounts"]["moderationQueue"]["queues"][k] = 0
print("translating stories...")
stories = []
stories_by_id = {}
stories_unicode = collections.defaultdict(list)
stories_unicode_replace = {}
stories_http = set()
def normalise(url):
replacedhttp = False
if url.startswith("http://"):
url = "https://" + url[7:]
replacedhttp = True
if not url.startswith("https://www.angrymetalguy.com"):
print(url)
assert False
host, path = url[:29], url[29:]
if "%" in path:
return host + urllib.parse.quote(urllib.parse.unquote(path)), True, replacedhttp
for i in path:
if ord(i) > 127:
return host + urllib.parse.quote(path), True, replacedhttp
if replacedhttp:
return url, True, True
return url, False, False
for story in olddb.assets.find():
# things that need filling in later:
# commentCounts, lastCommentedAt
url, normed, httpnormed = normalise(story["url"])
if normed or url in stories_http:
stories_unicode[url].append(story["id"])
if httpnormed:
# ugh this is horrible
stories_http.add(url)
for story_exist in stories:
if url == story_exist["url"]:
stories_unicode[url].append(story_exist["id"])
continue
if story["scraped"] is None and story["metadata"].get("source", None) is None:
#print("skipping probable unicode wonkiness", story["url"])
continue
if story.get("title", "").startswith("Page Not Found"):
# spot of data cleaning while we're here
continue
try:
s_old_story(story)
except v.MultipleInvalid as e:
pprint.pp(story)
for i in e.errors:
print(i)
raise
except v.Invalid:
pprint.pp(story)
raise
if story.get("settings", {}) != {}:
print("non-empty settings on", story["url"])
pprint.pp(story["settings"])
s = {
"tenantID": tenantID,
"siteID": siteID,
"url": story["url"],
"commentCounts": {
"action": {
"REACTION": 0,
},
"status": {
"APPROVED": 0,
"NONE": 0,
"PREMOD": 0,
"REJECTED": 0,
"SYSTEM_WITHHELD": 0,
},
"moderationQueue": {
"total": 0,
"queues": {
"unmoderated": 0,
"reported": 0,
"pending": 0,
},
},
},
"settings": {},
"createdAt": story["created_at"],
"id": story["id"],
"metadata": {
"title": story["title"],
"publishedAt": story["publication_date"],
# author, description, image
},
"scrapedAt": story["scraped"],
#"updatedAt": ,
"lastCommentedAt": None,
}
if "source" in story["metadata"]:
s["metadata"]["source"] = story["metadata"]["source"]
for i in ["author", "description", "image"]:
if i in story:
s["metadata"][i] = story[i]
stories.append(s)
stories_by_id[s["id"]] = s
print("\nfixing unicode stories...")
rewritten = 0
redirected = 0
for url, ids in stories_unicode.items():
if len(ids) == 1:
s = stories_by_id[ids[0]]
if s["url"] != url:
print("rewrote", s["url"], "to", url)
s["url"] = url
rewritten += 1
else:
redirected += 1
found_correct = None
for id in ids:
if id not in stories_by_id:
# wonky record that was skipped
continue
s = stories_by_id[id]
if s["url"] == url:
found_correct = id
print("mapping wonky urls to", url)
break
else:
print("no correct url for", url)
found_correct = s["id"]
s["url"] = url
for id in ids:
if id == found_correct:
continue
stories_unicode_replace[id] = found_correct
if id in stories_by_id:
del stories_by_id[id]
stories = list(stories_by_id.values())
print("rewrote", rewritten, "to correct url normalisation")
print("redirected", redirected, "to correct url normalisation")
print("\ntranslating users...")
users = []
users_by_id = {}
deletedusers = set()
for user in olddb.users.find():
# things that need filling in later:
# commentCounts
# things that might need filling in now but I have skipped:
# the various history fields in status
try:
s_old_user(user)
except v.MultipleInvalid as e:
pprint.pp(user)
for i in e.errors:
print(i)
raise
except v.Invalid:
pprint.pp(user)
raise
assert len(user["profiles"]) == 1
if user.get("metadata", {}).get("avatar", "").startswith("data"):
print("data url for", user["username"])
u = {
"tenantID": tenantID,
"tokens": [],
"ignoredUsers": [],
"status": {
"username": {"history": []},
"suspension": {"history": []},
"ban": {"active": False, "history": []},
"premod": {"active": False, "history": []},
"warning": {"active": False, "history": []},
},
"notifications": {
"onReply": False,
"onFeatured": False,
"onModeration": False,
"onStaffReplies": False,
"digestFrequency": "NONE",
},
"moderatorNotes": [],
"digests": [],
"createdAt": user["created_at"],
"commentCounts": {
"status": {
"APPROVED": 0,
"NONE": 0,
"PREMOD": 0,
"REJECTED": 0,
"SYSTEM_WITHHELD": 0,
},
},
# email
"username": user["username"],
"role": user.get("role", "COMMENTER"),
"profiles": [],
"id": user["id"],
# emailVerified
"metadata": {},
}
if user.get("metadata", {}).get("source", "") == "wpimport":
u["profiles"].append({"type": user["profiles"][0]["provider"], "id":user["profiles"][0]["id"]})
u["metadata"]["source"] = "wpimport"
else:
if user["status"]["banned"]["status"]:
u["status"]["ban"]["active"] = True
if user["status"]["alwaysPremod"]["status"]:
u["status"]["premod"]["active"] = True
if user["ignoresUsers"]:
ignore = []
for iid in user["ignoresUsers"]:
ignore.append({"id": iid, "createdAt": datetime.datetime.today()})
u["ignoredUsers"] = ignore
prof = user["profiles"][0]
p = {}
if prof["provider"] == "local":
p["type"] = "local"
p["id"] = prof["id"]
p["password"] = user["password"]
p["passwordID"] = str(<KEY>
u["email"] = prof["id"]
u["emailVerified"] = "confirmed_at" in prof.get("metadata", {})
else:
p["type"] = prof["provider"]
p["id"] = prof["id"]
u["profiles"].append(p)
meta = user.get("metadata", {})
if "avatar" in meta:
av = meta["avatar"]
if av != "" and not av.startswith("data:"):
u["avatar"] = meta["avatar"]
if "notifications" in meta:
if meta["notifications"]["settings"].get("onReply", False):
u["notifications"]["onReply"] = True
if meta["notifications"]["settings"].get("onFeatured", False):
u["notifications"]["onFeatured"] = True
dig = meta["notifications"]["settings"].get("digestFrequency", "NONE")
u["notifications"]["digestFrequency"] = dig
if "scheduledDeletionDate" in meta:
deletedusers.add(user["id"])
continue
users.append(u)
users_by_id[u["id"]] = u
print("\ntranslating comments...")
comments = []
comments_by_id = {}
for comment in olddb.comments.find():
# things that need filling in later:
# childIDs, childCount, ancestorIDs
try:
s_old_comment(comment)
except v.MultipleInvalid as e:
pprint.pp(comment)
for i in e.errors:
print(i)
raise
except v.Invalid:
pprint.pp(comment)
raise
if comment["asset_id"] in stories_unicode_replace:
comment["asset_id"] = stories_unicode_replace[comment["asset_id"]]
if comment["asset_id"] not in stories_by_id:
# story skipped due to unicode issues, ignore comments
continue
c = {
"id": comment["id"],
"tenantID": tenantID,
"childIDs": [],
"childCount": 0,
| |
<filename>pysrc/generator.py
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
############################################################################
## this is the main generator and the decoder generator.
## the main data structures are:
##
## class all_generator_info_t(object):
## the catch-all for all state -- at least it tries to be.
##
## class generator_common_t(object):
## class generator_info_t(generator_common_t):
## each generator has a parser
##
## class parser_t(object):
##
## which contains:
##
## class partitionable_info_t(object):
## class instruction_info_t(partitionable_info_t):
##
## class nonterminal_info_t(object):
## class nonterminal_dict_t(object):
## class bits_list_t(object):
##
## contains a list of:
##
## class bit_info_t(object):
## class state_info_t(object):
## class prebinding_t(object):
## class opnds.operand_info_t(object):
## class graph_node(object):
## class code_gen_dec_args_t(object):
## class table_init_object_t(object):
## class bit_group_info_t(object):
## class reg_info_t(object):
## class width_info_t(object):
############################################################################
from __future__ import print_function
import os
import sys
import copy
import glob
import re
import optparse
import collections
def find_dir(d):
directory = os.getcwd()
last = ''
while directory != last:
target_directory = os.path.join(directory,d)
if os.path.exists(target_directory):
return target_directory
last = directory
directory = os.path.split(directory)[0]
return None
mbuild_install_path = os.path.join(os.path.dirname(sys.argv[0]), '..', 'mbuild')
if not os.path.exists(mbuild_install_path):
mbuild_install_path = find_dir('mbuild')
sys.path= [mbuild_install_path] + sys.path
try:
import mbuild
except:
sys.stderr.write("\nERROR(generator.py): Could not find mbuild. " +
"Should be a sibling of the xed directory.\n\n")
sys.exit(1)
xed2_src_path = os.path.join(os.path.dirname(sys.argv[0]))
if not os.path.exists(xed2_src_path):
xed2_src_path = find_dir('xed2')
sys.path= [ xed2_src_path ] + sys.path
sys.path= [ os.path.join(xed2_src_path,'pysrc') ] + sys.path
from genutil import *
import genutil
import operand_storage
import slash_expand
import flag_gen
from verbosity import *
import opnds
import opnd_types
import cpuid_rdr
import map_info_rdr
send_stdout_message_to_file = False
if send_stdout_message_to_file:
fn = "out"
set_msgs(open(fn,"w"))
sys.stderr.write("Writing messages to file: [" + fn + "]\n")
check_python_version(2,4)
from codegen import *
import metaenum
import enum_txt_writer
import chipmodel
import ctables
import ild
import refine_regs
import classifier
#####################################################################
## OPTIONS
#####################################################################
def setup_arg_parser():
arg_parser = optparse.OptionParser()
arg_parser.add_option('--debug',
action='store_true',
dest='debug',
default=False,
help='Start PDB debugger')
arg_parser.add_option('--limit-enum-strings',
action='store_true',
dest='limit_enum_strings',
default=False,
help='Save space by limiting the enum strings')
arg_parser.add_option('--gendir',
action='store',
dest='gendir',
default='gen',
help='Directory for generated files')
arg_parser.add_option('--xeddir',
action='store',
dest='xeddir',
default='',
help='Directory for generated files')
arg_parser.add_option('--input-regs',
action='store',
dest='input_regs',
default='',
help='Register input file')
arg_parser.add_option('--input-widths',
action='store',
dest='input_widths',
default='',
help='Widths input file')
arg_parser.add_option('--input-extra-widths',
action='store',
dest='input_extra_widths',
default='',
help='Extra widths input file')
arg_parser.add_option('--input-element-types',
action='store',
dest='input_element_types',
default='',
help='File with mappings from type names to' +
' widths and base element types')
arg_parser.add_option('--input-element-type-base',
action='store',
dest='input_element_type_base',
default='',
help='new chunk for element type enum')
arg_parser.add_option('--input-pointer-names',
action='store',
dest='input_pointer_names',
default='',
help='Pointer names input file for disassembly')
arg_parser.add_option('--input-fields',
action='store',
dest='input_fields',
default='',
help='Operand storage description input file')
arg_parser.add_option('--input',
action='store',
dest='input',
default='',
help='Input file')
arg_parser.add_option('--input-state',
action='store',
dest='input_state',
default='xed-state-bits.txt',
help='state input file')
arg_parser.add_option('--inst',
action='store',
dest='inst_init_file',
default='xed-init-inst-table.c',
help='Instruction table init file')
arg_parser.add_option('--sout',
action='store',
dest='structured_output_fn',
default='xed-sout.txt',
help='Emit structured output file')
arg_parser.add_option('--patterns',
action='store',
dest='structured_input_fn',
default='',
help='Read structured input file')
arg_parser.add_option('--chip-models',
action='store',
dest='chip_models_input_fn',
default='',
help='Chip models input file name')
arg_parser.add_option('--ctables',
action='store',
dest='ctables_input_fn',
default='',
help='Conversion tables input file name')
arg_parser.add_option('--isa',
action='store',
dest='isa_input_file',
default='',
help='Read structured input file containing' +
' the ISA INSTRUCTIONS() nonterminal')
arg_parser.add_option('--spine',
action='store',
dest='spine',
default='',
help='Read the spine file containing the' +
' top-most decoder nonterminal')
arg_parser.add_option('--print-graph',
action='store_true',
dest='print_graph',
default=False,
help='Print the graph for each nonterminal (big)')
arg_parser.add_option('--verbosity', '--verbose', '-v',
action='append',
dest='verbosity',
default=[],
help='Level of verbosity, repeatable. ' +
' Values=1..7, enc,merge')
arg_parser.add_option('--no-imm-suffix',
action='store_false',
dest='add_suffix_to_imm',
default=True,
help='Omit width suffixes from iforms')
arg_parser.add_option('--cpuid',
action='store',
dest='cpuid_input_fn',
default='',
help='isa-set to cpuid map input file')
arg_parser.add_option('--map-descriptions',
action='store',
dest='map_descriptions_input_fn',
default='',
help='map descriptions input file')
arg_parser.add_option("--compress-operands",
action="store_true",
dest="compress_operands",
default=False,
help="use bit-fields to compress the "+
"operand storage.")
arg_parser.add_option("--add-orphan-inst-to-future-chip",
action="store_true",
dest="add_orphan_inst_to_future_chip",
default=False,
help="Add orphan isa-sets to future chip definition.")
return arg_parser
#####################################################################
header_pattern = re.compile(r'[.][Hh]$')
def is_header(fn):
global header_pattern
if header_pattern.search(fn):
return True
return False
############################################################################
# Compiled patterns used in this program
############################################################################
delete_iclass_pattern = re.compile('^DELETE')
delete_iclass_full_pattern = \
re.compile(r'^DELETE[ ]*[:][ ]*(?P<iclass>[A-Za-z_0-9]+)')
udelete_pattern = re.compile('^UDELETE')
udelete_full_pattern = \
re.compile(r'^UDELETE[ ]*[:][ ]*(?P<uname>[A-Za-z_0-9]+)')
operand_token_pattern = re.compile('OPERAND')
underscore_pattern = re.compile(r'_')
invert_pattern = re.compile(r'[!]')
instructions_pattern = re.compile(r'INSTRUCTIONS')
equals_pattern = re.compile(r'(?P<lhs>[^!]+)=(?P<rhs>.+)')
not_equals_pattern = re.compile(r'(?P<lhs>[^!]+)!=(?P<rhs>.+)')
quick_equals_pattern= re.compile(r'=')
colon_pattern= re.compile(r'[:]')
bits_and_letters_underscore_pattern = re.compile(r'^[10a-z_]+$')
hex_pattern = re.compile(r'0[xX][0-9A-Fa-f]+')
slash_macro_pattern = re.compile(r'([a-z][/][0-9]{1,2})')
nonterminal_string = r'([A-Z][a-zA-Z0-9_]*)[(][)]'
parens_to_end_of_line = re.compile(r'[(][)].*::.*$') # with double colon
lookupfn_w_args_pattern = re.compile(r'[\[][a-z]+]')
#nonterminal_start_pattern=re.compile(r'^' + nonterminal_string + r'\s*::')
nonterminal_start_pattern=re.compile(r'::')
nonterminal_pattern=re.compile(nonterminal_string)
nonterminal_parens_pattern = re.compile(r'[(][^)]*[)]')
binary_pattern = re.compile(r'^[01_]+$') # only 1's and 0's
formal_binary_pattern = re.compile(r'^0b[01_]+$') # only 1's and 0's leading 0b
one_zero_pattern = re.compile(r'^[01]') # just a leading 0 or 1
completely_numeric = re.compile(r'^[0-9]+$') # only numbers
# things identified by the restriction_pattern are the operand deciders:
restriction_pattern = re.compile(r'([A-Z0-9_]+)(!=|=)([bx0-9A-Z_]+)')
all_caps_pattern = re.compile(r'^[A-Z_0-9]+$')
not11_pattern = re.compile(r'NOT11[(]([a-z]{2})[)]')
letter_basis_pattern = re.compile(r'[a-z]')
all_zeros_pattern = re.compile(r'^[0]+$')
type_ending_pattern = re.compile(r'_t$')
uniq_pattern = re.compile(r'_uniq(.*)$')
ntwidth_pattern = re.compile('NTWIDTH')
paren_underscore_pattern = re.compile(r'[(][)][_]+')
all_lower_case_pattern = re.compile(r'^[a-z]+$')
pattern_binding_pattern = re.compile(
r'(?P<name>[A-Za-z_0-9]+)[\[](?P<bits>[A-Za-z01_]+)]')
uppercase_pattern = re.compile(r'[A-Z]')
reg_operand_name_pattern = re.compile("^REG(?P<regno>[0-9]+)$")
############################################################################
def comment(s):
return '/* {} */'.format(s)
def all_the_same(lst):
"return True if all the elements of the list are the same"
first = lst[0]
for x in lst:
if x != first:
return False
return True
def pad_to_multiple_of_8bits(x):
ilen = len(x)
frac = ilen & 7
if frac == 0:
return x
t = []
while frac < 8:
t.append('0')
frac = frac + 1
t.extend(x)
return t
############################################################################
# $$ nonterminal_info_t
class nonterminal_info_t(object):
def __init__(self,name, type=None):
self.name = name
self.type = type
self.start_node = None
def set_start_node(self,n):
self.start_node = n
def is_lookup_function(self):
if self.type != None:
return True
return False
# $$ nonterminal_dict_t
class nonterminal_dict_t(object):
"""dictionary holding nonterminal information for code generation"""
def __init__(self):
# dictionary of nonterminal_info_t's by short name.
# nonterminal_info_t has {name, type, start_node}
self.nonterminal_info = {}
def keys(self):
return list(self.nonterminal_info.keys())
def add_graph_node(self, nt_name, node_id):
"""set the node id in the graph node"""
if nt_name not in self.nonterminal_info:
self.add_to_dict(nt_name)
n = self.nonterminal_info[nt_name]
n.start_node = node_id
def get_node(self,nt_name):
if nt_name in self.nonterminal_info:
return self.nonterminal_info[nt_name]
die("Did not find " + nt_name + " in the nonterminal dictionary.")
def add_to_dict(self,short_nt_name, nt_type=None):
msge("Adding " + short_nt_name + " to nonterminal dict")
#nonterminal_info_t has {name, type, start_node, encode, decoder}
new_nt = nonterminal_info_t(short_nt_name, nt_type)
self.nonterminal_info[short_nt_name] = new_nt
def record_nonterminal(self,nt_name, nt_type):
if nt_name:
if nt_name not in self.nonterminal_info:
#msge("Adding NT: " + nt_name)
self.add_to_dict(nt_name, nt_type)
else:
die("Bad nonterminal name")
############################################################################
# $$ bit_info_t
class bit_info_t(object):
"""The patterns are built up of bits of various kinds. Normal 1/0
bits are type bit. The other kinds of bits are dontcares which are
letter names, state bits, operand tests and nonterminals.
"""
bit_types = [ 'bit', 'dontcare', 'operand', 'nonterminal' ]
def __init__(self, value, btype='bit', pbit=-1):
self.btype = btype # See bit_info_t.bit_types
self.value = value
# Physical bits are bits that are real. They are offsets from
# the beginnning of this nonterminal or the last nonterminal.
self.pbit = pbit
self.token = None # operand decider
self.test = None # eq or ne
self.requirement = None # the value the od must have (or not have)
if btype == 'operand':
# for operands, we split them in to a token name and a required value.
#search for FOO=233 or FOO!=233
m = restriction_pattern.search(value)
if not m:
die("bad operand decider: "+ value)
(token,test,requirement) = m.groups([0,1,2])
if vod():
msge("OperandDecider Token= " + token +
" Test= " + test + " Requirement= " + requirement)
self.token = token
self.requirement = make_numeric(requirement, value)
if test == '=':
self.test='eq'
else:
self.test='ne'
def __eq__(self,other):
if other == None:
return False
if self.value == other.value:
if self.btype == other.btype:
return True
return False
def __ne__(self,other):
if other == None:
return True
if self.value != other.value:
return True
if self.btype != other.btype:
return True
return False
def __str__(self):
s = self.btype + '/' + str(self.value)
if self.pbit != -1:
s += '/PBIT' + str(self.pbit)
return s
def just_bits(self):
return self.value
def is_nonterminal(self):
if self.btype == 'nonterminal':
return True
return False
def is_operand_decider(self):
if self.btype == 'operand':
return True
return False
def is_dont_care(self):
if self.btype == 'dontcare':
return True
return False
def is_real_bit(self):
if self.btype == 'dontcare' or self.btype == 'bit':
return True
return False
def is_one_or_zero(self):
if self.btype == 'bit':
return True
return False
def nonterminal_name(self):
if self.is_nonterminal():
g = nonterminal_pattern.search(self.value)
if | |
self._client.report_instances_status(instance_id=self.instance_id, **_params)
def allocate_public_ip_address(self, **params):
_params = _transfer_params(params)
self._client.allocate_public_ip_address(instance_id=self.instance_id, **_params)
def attach_classic_link_vpc(self, **params):
_params = _transfer_params(params)
self._client.attach_classic_link_vpc(instance_id=self.instance_id, **_params)
def convert_nat_public_ip_to_eip(self, **params):
_params = _transfer_params(params)
self._client.convert_nat_public_ip_to_eip(instance_id=self.instance_id, **_params)
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_instance(instance_id=self.instance_id, **_params)
def describe_eni_monitor_data(self, **params):
_params = _transfer_params(params)
self._client.describe_eni_monitor_data(instance_id=self.instance_id, **_params)
def describe_instance_attribute(self, **params):
_params = _transfer_params(params)
self._client.describe_instance_attribute(instance_id=self.instance_id, **_params)
def describe_instance_monitor_data(self, **params):
_params = _transfer_params(params)
self._client.describe_instance_monitor_data(instance_id=self.instance_id, **_params)
def describe_instance_physical_attribute(self, **params):
_params = _transfer_params(params)
self._client.describe_instance_physical_attribute(instance_id=self.instance_id, **_params)
def describe_instance_vnc_passwd(self, **params):
_params = _transfer_params(params)
self._client.describe_instance_vnc_passwd(instance_id=self.instance_id, **_params)
def describe_instance_vnc_url(self, **params):
_params = _transfer_params(params)
self._client.describe_instance_vnc_url(instance_id=self.instance_id, **_params)
def describe_user_data(self, **params):
_params = _transfer_params(params)
self._client.describe_user_data(instance_id=self.instance_id, **_params)
def detach_classic_link_vpc(self, **params):
_params = _transfer_params(params)
self._client.detach_classic_link_vpc(instance_id=self.instance_id, **_params)
def get_instance_console_output(self, **params):
_params = _transfer_params(params)
response = self._client.get_instance_console_output(instance_id=self.instance_id, **_params)
return response
def get_instance_screenshot(self, **params):
_params = _transfer_params(params)
response = self._client.get_instance_screenshot(instance_id=self.instance_id, **_params)
return response
def modify_attribute(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_attribute(instance_id=self.instance_id, **_params)
def modify_auto_release_time(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_auto_release_time(instance_id=self.instance_id, **_params)
def modify_auto_renew_attribute(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_auto_renew_attribute(instance_id=self.instance_id, **_params)
def modify_deployment(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_deployment(instance_id=self.instance_id, **_params)
def modify_disk_charge_type(self, **params):
_params = _transfer_params(params)
self._client.modify_disk_charge_type(instance_id=self.instance_id, **_params)
def modify_network_spec(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_network_spec(instance_id=self.instance_id, **_params)
def modify_prepay_instance_spec(self, **params):
_params = _transfer_params(params)
self._client.modify_prepay_instance_spec(instance_id=self.instance_id, **_params)
def modify_spec(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_spec(instance_id=self.instance_id, **_params)
def modify_vnc_passwd(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_vnc_passwd(instance_id=self.instance_id, **_params)
def reactivate(self, **params):
_params = _transfer_params(params)
self._client.reactivate_instances(instance_id=self.instance_id, **_params)
def reboot(self, **params):
_params = _transfer_params(params)
self._client.reboot_instance(instance_id=self.instance_id, **_params)
def redeploy(self, **params):
_params = _transfer_params(params)
self._client.redeploy_instance(instance_id=self.instance_id, **_params)
def renew(self, **params):
_params = _transfer_params(params)
self._client.renew_instance(instance_id=self.instance_id, **_params)
def replace_system_disk(self, **params):
_params = _transfer_params(params)
response = self._client.replace_system_disk(instance_id=self.instance_id, **_params)
return response['DiskId']
def start(self, **params):
_params = _transfer_params(params)
self._client.start_instance(instance_id=self.instance_id, **_params)
def stop(self, **params):
_params = _transfer_params(params)
self._client.stop_instance(instance_id=self.instance_id, **_params)
def modify_vpc_attribute(self, **params):
_params = _transfer_params(params)
self._client.modify_instance_vpc_attribute(instance_id=self.instance_id, **_params)
def refresh(self):
result = self._client.describe_instances(instance_ids=self.instance_id)
items = _new_get_key_in_response(result, 'Instances.Instance')
if not items:
raise ClientException(msg=
"Failed to find instance data from DescribeInstances response. "
"InstanceId = {0}".format(self.instance_id))
self._assign_attributes(items[0])
def wait_until(self, target_status, timeout=120):
start_time = time.time()
while True:
end_time = time.time()
if end_time - start_time >= timeout:
raise Exception("Timed out: no {0} status after {1} seconds.".format(
target_status, timeout))
self.refresh()
if self.status == target_status:
return
time.sleep(1)
class _ECSInstanceTypeResource(ServiceResource):
def __init__(self, instance_type_id, _client=None):
ServiceResource.__init__(self, "ecs.instance_type", _client=_client)
self.instance_type_id = instance_type_id
self.baseline_credit = None
self.cpu_core_count = None
self.eni_private_ip_address_quantity = None
self.eni_quantity = None
self.gpu_amount = None
self.gpu_spec = None
self.initial_credit = None
self.instance_bandwidth_rx = None
self.instance_bandwidth_tx = None
self.instance_family_level = None
self.instance_pps_rx = None
self.instance_pps_tx = None
self.instance_type_family = None
self.local_storage_amount = None
self.local_storage_capacity = None
self.local_storage_category = None
self.memory_size = None
class _ECSKeyPairResource(ServiceResource):
def __init__(self, key_pair_name, _client=None):
ServiceResource.__init__(self, "ecs.key_pair", _client=_client)
self.key_pair_name = key_pair_name
self.key_pair_finger_print = None
self.resource_group_id = None
self.tags = None
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_key_pairs(key_pair_name=self.key_pair_name, **_params)
def attach(self, **params):
_params = _transfer_params(params)
self._client.attach_key_pair(key_pair_name=self.key_pair_name, **_params)
def detach(self, **params):
_params = _transfer_params(params)
self._client.detach_key_pair(key_pair_name=self.key_pair_name, **_params)
def refresh(self):
result = self._client.describe_key_pairs(key_pair_name=self.key_pair_name)
items = _new_get_key_in_response(result, 'KeyPairs.KeyPair')
if not items:
raise ClientException(msg=
"Failed to find key_pair data from DescribeKeyPairs response. "
"KeyPairName = {0}".format(self.key_pair_name))
self._assign_attributes(items[0])
class _ECSLaunchTemplateResource(ServiceResource):
def __init__(self, launch_template_id, _client=None):
ServiceResource.__init__(self, "ecs.launch_template", _client=_client)
self.launch_template_id = launch_template_id
self.create_time = None
self.created_by = None
self.default_version_number = None
self.latest_version_number = None
self.launch_template_name = None
self.modified_time = None
self.resource_group_id = None
self.tags = None
def refresh(self):
result = self._client.describe_launch_templates(
list_of_launch_template_id=[self.launch_template_id, ])
items = _new_get_key_in_response(result, 'LaunchTemplateSets.LaunchTemplateSet')
if not items:
raise ClientException(msg=
"Failed to find launch_template data from DescribeLaunchTemplates response. "
"LaunchTemplateId = {0}".format(self.launch_template_id))
self._assign_attributes(items[0])
class _ECSNatGatewayResource(ServiceResource):
def __init__(self, nat_gateway_id, _client=None):
ServiceResource.__init__(self, "ecs.nat_gateway", _client=_client)
self.nat_gateway_id = nat_gateway_id
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_nat_gateway(nat_gateway_id=self.nat_gateway_id, **_params)
class _ECSNetworkInterfaceResource(ServiceResource):
def __init__(self, network_interface_id, _client=None):
ServiceResource.__init__(self, "ecs.network_interface", _client=_client)
self.network_interface_id = network_interface_id
self.associated_public_ip = None
self.creation_time = None
self.description = None
self.instance_id = None
self.ipv6_sets = None
self.mac_address = None
self.network_interface_name = None
self.private_ip_address = None
self.private_ip_sets = None
self.resource_group_id = None
self.security_group_ids = None
self.service_id = None
self.service_managed = None
self.status = None
self.tags = None
self.type_ = None
self.vswitch_id = None
self.vpc_id = None
self.zone_id = None
def assign_ipv6_addresses(self, **params):
_params = _transfer_params(params)
self._client.assign_ipv6_addresses(network_interface_id=self.network_interface_id,
**_params)
def assign_private_ip_addresses(self, **params):
_params = _transfer_params(params)
self._client.assign_private_ip_addresses(network_interface_id=self.network_interface_id,
**_params)
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_network_interface(network_interface_id=self.network_interface_id,
**_params)
def modify_attribute(self, **params):
_params = _transfer_params(params)
self._client.modify_network_interface_attribute(
network_interface_id=self.network_interface_id, **_params)
def unassign_ipv6_addresses(self, **params):
_params = _transfer_params(params)
self._client.unassign_ipv6_addresses(network_interface_id=self.network_interface_id,
**_params)
def unassign_private_ip_addresses(self, **params):
_params = _transfer_params(params)
self._client.unassign_private_ip_addresses(network_interface_id=self.network_interface_id,
**_params)
def attach(self, **params):
_params = _transfer_params(params)
self._client.attach_network_interface(network_interface_id=self.network_interface_id,
**_params)
def create_network_interface_permission(self, **params):
_params = _transfer_params(params)
self._client.create_network_interface_permission(
network_interface_id=self.network_interface_id, **_params)
def detach(self, **params):
_params = _transfer_params(params)
self._client.detach_network_interface(network_interface_id=self.network_interface_id,
**_params)
def refresh(self):
result = self._client.describe_network_interfaces(
list_of_network_interface_id=[self.network_interface_id, ])
items = _new_get_key_in_response(result, 'NetworkInterfaceSets.NetworkInterfaceSet')
if not items:
raise ClientException(msg=
"Failed to find network_interface data from DescribeNetworkInterfaces response. "
"NetworkInterfaceId = {0}".format(self.network_interface_id))
self._assign_attributes(items[0])
class _ECSNetworkInterfacePermissionResource(ServiceResource):
def __init__(self, network_interface_permission_id, _client=None):
ServiceResource.__init__(self, "ecs.network_interface_permission", _client=_client)
self.network_interface_permission_id = network_interface_permission_id
self.account_id = None
self.network_interface_id = None
self.permission = None
self.permission_state = None
self.service_name = None
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_network_interface_permission(
network_interface_permission_id=self.network_interface_permission_id, **_params)
def refresh(self):
result = self._client.describe_network_interface_permissions(
list_of_network_interface_permission_id=[self.network_interface_permission_id, ])
items = _new_get_key_in_response(result,
'NetworkInterfacePermissions.NetworkInterfacePermission')
if not items:
raise ClientException(msg=
"Failed to find network_interface_permission data from DescribeNetworkInterfacePermissions response. "
"NetworkInterfacePermissionId = {0}".format(
self.network_interface_permission_id))
self._assign_attributes(items[0])
class _ECSPhysicalConnectionResource(ServiceResource):
def __init__(self, physical_connection_id, _client=None):
ServiceResource.__init__(self, "ecs.physical_connection", _client=_client)
self.physical_connection_id = physical_connection_id
self.access_point_id = None
self.ad_location = None
self.bandwidth = None
self.business_status = None
self.circuit_code = None
self.creation_time = None
self.description = None
self.enabled_time = None
self.line_operator = None
self.name = None
self.peer_location = None
self.port_number = None
self.port_type = None
self.redundant_physical_connection_id = None
self.spec = None
self.status = None
self.type_ = None
def cancel(self, **params):
_params = _transfer_params(params)
self._client.cancel_physical_connection(physical_connection_id=self.physical_connection_id,
**_params)
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_physical_connection(physical_connection_id=self.physical_connection_id,
**_params)
def describe_virtual_border_routers_for(self, **params):
_params = _transfer_params(params)
self._client.describe_virtual_border_routers_for_physical_connection(
physical_connection_id=self.physical_connection_id, **_params)
def enable(self, **params):
_params = _transfer_params(params)
self._client.enable_physical_connection(physical_connection_id=self.physical_connection_id,
**_params)
def modify_attribute(self, **params):
_params = _transfer_params(params)
self._client.modify_physical_connection_attribute(
physical_connection_id=self.physical_connection_id, **_params)
def terminate(self, **params):
_params = _transfer_params(params)
self._client.terminate_physical_connection(
physical_connection_id=self.physical_connection_id, **_params)
class _ECSRegionResource(ServiceResource):
def __init__(self, region_id, _client=None):
ServiceResource.__init__(self, "ecs.region", _client=_client)
self.region_id = region_id
self.local_name = None
self.region_endpoint = None
self.status = None
def refresh(self):
result = self._client.describe_regions(region_id=self.region_id)
items = _new_get_key_in_response(result, 'Regions.Region')
if not items:
raise ClientException(msg=
"Failed to find region data from DescribeRegions response. "
"RegionId = {0}".format(self.region_id))
self._assign_attributes(items[0])
class _ECSReservedInstanceResource(ServiceResource):
def __init__(self, reserved_instance_id, _client=None):
ServiceResource.__init__(self, "ecs.reserved_instance", _client=_client)
self.reserved_instance_id = reserved_instance_id
self.creation_time = None
self.description = None
self.expired_time = None
self.instance_amount = None
self.instance_type = None
self.offering_type = None
self.operation_locks = None
self.platform = None
self.region_id = None
self.reserved_instance_name = None
self.resource_group_id = None
self.scope = None
self.start_time = None
self.status = None
self.zone_id = None
def modify_attribute(self, **params):
_params = _transfer_params(params)
self._client.modify_reserved_instance_attribute(
reserved_instance_id=self.reserved_instance_id, **_params)
def refresh(self):
result = self._client.describe_reserved_instances(
list_of_reserved_instance_id=[self.reserved_instance_id, ])
items = _new_get_key_in_response(result, 'ReservedInstances.ReservedInstance')
if not items:
raise ClientException(msg=
"Failed to find reserved_instance data from DescribeReservedInstances response. "
"ReservedInstanceId = {0}".format(self.reserved_instance_id))
self._assign_attributes(items[0])
def wait_until(self, target_status, timeout=120):
start_time = time.time()
while True:
end_time = time.time()
if end_time - start_time >= timeout:
raise Exception("Timed out: no {0} status after {1} seconds.".format(
target_status, timeout))
self.refresh()
if self.status == target_status:
return
time.sleep(1)
class _ECSRouteTableResource(ServiceResource):
def __init__(self, route_table_id, _client=None):
ServiceResource.__init__(self, "ecs.route_table", _client=_client)
self.route_table_id = route_table_id
self.creation_time = None
self.resource_group_id = None
self.route_entrys = None
self.route_table_type = None
self.vrouter_id = None
def create_route_entry(self, **params):
_params = _transfer_params(params)
self._client.create_route_entry(route_table_id=self.route_table_id, **_params)
def delete_route_entry(self, **params):
_params = _transfer_params(params)
self._client.delete_route_entry(route_table_id=self.route_table_id, **_params)
def refresh(self):
result = self._client.describe_route_tables(route_table_id=self.route_table_id)
items = _new_get_key_in_response(result, 'RouteTables.RouteTable')
if not items:
raise ClientException(msg=
"Failed to find route_table data from DescribeRouteTables response. "
"RouteTableId = {0}".format(self.route_table_id))
self._assign_attributes(items[0])
class _ECSRouterInterfaceResource(ServiceResource):
def __init__(self, router_interface_id, _client=None):
ServiceResource.__init__(self, "ecs.router_interface", _client=_client)
self.router_interface_id = router_interface_id
self.access_point_id = None
self.business_status = None
self.charge_type = None
self.connected_time = None
self.creation_time = None
self.description = None
self.end_time = None
self.health_check_source_ip = None
self.health_check_target_ip = None
self.name = None
self.opposite_access_point_id = None
self.opposite_interface_business_status = None
self.opposite_interface_id = None
self.opposite_interface_owner_id = None
self.opposite_interface_spec = None
self.opposite_interface_status = None
self.opposite_region_id = None
self.opposite_router_id = None
self.opposite_router_type = None
self.role = None
self.router_id = None
self.router_type = None
self.spec = None
self.status = None
def activate(self, **params):
_params = _transfer_params(params)
self._client.activate_router_interface(router_interface_id=self.router_interface_id,
**_params)
def connect(self, **params):
_params = _transfer_params(params)
self._client.connect_router_interface(router_interface_id=self.router_interface_id,
**_params)
def deactivate(self, **params):
_params = _transfer_params(params)
self._client.deactivate_router_interface(router_interface_id=self.router_interface_id,
**_params)
def delete(self, **params):
_params = _transfer_params(params)
self._client.delete_router_interface(router_interface_id=self.router_interface_id,
**_params)
def modify_attribute(self, **params):
_params = _transfer_params(params)
self._client.modify_router_interface_attribute(router_interface_id=self.router_interface_id,
**_params)
def modify_spec(self, **params):
_params = _transfer_params(params)
self._client.modify_router_interface_spec(router_interface_id=self.router_interface_id,
**_params)
class _ECSSecurityGroupResource(ServiceResource):
def __init__(self, security_group_id, _client=None):
ServiceResource.__init__(self, "ecs.security_group", _client=_client)
self.security_group_id = security_group_id
self.available_instance_amount = | |
# encoding: utf-8
import os
import re
import sys
import gzip
import time
import json
import socket
import random
import weakref
import datetime
import functools
import threading
import collections
import urllib.error
import urllib.parse
import urllib.request
import collections.abc
import json_dict
from . import utils
class ProxyURLRefreshError(Exception):
pass
class AliveProxiesNotFound(Exception):
pass
class NoFreeProxies(Exception):
pass
def _get_missing(target, source):
"""Возвращает присутствующие в `target`, но отсутствующие в `source` элементы
"""
old_target = set(target)
new_target = old_target.intersection(source)
return old_target.difference(new_target)
def _build_opener(proxy=None):
if proxy is not None:
parsed = urllib.parse.urlparse(proxy)
handler = urllib.request.ProxyHandler({parsed.scheme: proxy})
return urllib.request.build_opener(handler)
else:
return urllib.request.build_opener()
class Proxies:
default_opener = _build_opener()
def __init__(
self,
proxies=None,
proxies_url=None,
proxies_url_gateway=None,
proxies_file=None,
options=None,
):
"""
@param proxies: список адресов прокси-серверов
@param proxies_url: ссылка на список прокси-серверов
@param proxies_file: путь до файла со списком прокси-серверов
@param options: доп. параметры
"""
if options is None:
options = {}
shuffle = options.get('shuffle', False)
if proxies is not None:
proxies = list(proxies)
if shuffle:
random.shuffle(proxies)
auto_refresh_period = options.get('auto_refresh_period')
if auto_refresh_period:
auto_refresh_period = datetime.timedelta(**auto_refresh_period)
blacklist = utils.get_json_dict(json_dict.JsonLastUpdatedOrderedDict, filename=options.get('blacklist'))
cooling_down = utils.get_json_dict(json_dict.JsonOrderedDict, filename=options.get('cooldown'))
stats = utils.get_json_dict(json_dict.JsonDict, filename=options.get('stats'))
if proxies_url_gateway:
url_opener = _build_opener(proxies_url_gateway)
else:
url_opener = None
self._url_opener = url_opener
self._proxies = proxies
self.proxies_url = proxies_url
self.proxies_file = proxies_file
self._shuffle = shuffle
self.slice = options.get('slice')
self.force_type = options.get('type')
self.auto_refresh_period = auto_refresh_period
self._blacklist = blacklist
self._cooling_down = cooling_down
self._stats = stats
self._cleanup_lock = threading.RLock()
self._last_auto_refresh = None
self._auto_refresh_lock = threading.Lock()
self._load_lock = threading.Lock()
self._modified_at = time.perf_counter()
self.__pool = None
self._smart_holdout_start = options.get('smart_holdout_start')
self._options = options
if self._proxies is not None:
proxies = set(self._proxies)
self._cleanup_internals(proxies)
@property
def proxies(self):
if self._proxies is None:
with self._load_lock:
# Вышли из состояния гонки, теперь можно удостовериться в реальной необходимости
if self._proxies is None:
self._proxies = self._load()
self._cleanup_internals(self._proxies)
self._modified_at = time.perf_counter()
return self._proxies
def _load(self):
if self.proxies_url:
proxies = self.read_url(self.proxies_url, opener=self._url_opener)
elif self.proxies_file:
proxies = self.read_file(self.proxies_file)
else:
raise NotImplementedError(
"Can't load proxies: "
"please specify one of the sources ('proxies_url' or 'proxies_file')"
)
if self.slice:
proxies = proxies[slice(*self.slice)]
if self.force_type:
new_type = self.force_type + '://' # `socks` format
proxies = [
re.sub(r'^(?:(.*?)://)?', new_type, proxy)
for proxy in proxies
]
if self._shuffle:
random.shuffle(proxies)
return proxies
def _cleanup_internals(self, proxies):
with self._cleanup_lock:
self._cleanup_blacklist(proxies)
self._cleanup_cooling_down(proxies)
self._cleanup_stats(proxies)
def _cleanup_cooling_down(self, proxies):
for proxy in _get_missing(self._cooling_down, proxies):
self._cooling_down.pop(proxy)
def _cleanup_blacklist(self, proxies):
for proxy in _get_missing(self._blacklist, proxies):
self._blacklist.pop(proxy)
def _cleanup_stats(self, proxies):
for proxy in _get_missing(self._stats, proxies):
self._stats.pop(proxy)
def _get_options(self, *options, missing_ok=True):
if missing_ok:
return {k: self._options.get(k) for k in options}
else:
return {k: self._options[k] for k in options}
@classmethod
def read_string(cls, string, sep=','):
return list(x for x in map(str.strip, string.split(sep)) if x)
@classmethod
def read_url(cls, url, sep='\n', retry=10, sleep_range=(2, 10), timeout=2, opener=None):
if opener is None:
opener = cls.default_opener
while True:
try:
resp = opener.open(url, timeout=timeout)
break
except (urllib.error.HTTPError, socket.timeout):
if not retry:
raise
retry -= 1
time.sleep(random.randint(*sleep_range))
content = resp.read()
if resp.headers.get('Content-Encoding', 'identity') == 'gzip':
content = gzip.decompress(content)
charset = resp.headers.get_content_charset('utf-8')
content = content.decode(charset)
return cls.read_string(content, sep=sep)
@classmethod
def read_file(cls, file_name, sep='\n'):
with open(file_name) as f:
return cls.read_string(f.read(), sep=sep)
def refresh(self):
if not self.proxies_url and not self.proxies_file:
return
try:
self._proxies = self._load()
self._cleanup_internals(self._proxies)
except urllib.error.HTTPError:
import problems
problems.handle(ProxyURLRefreshError, extra={'url': self.proxies_url})
else:
self._modified_at = time.perf_counter()
def _auto_refresh(self):
if self.proxies_file:
with self._auto_refresh_lock:
modification_time = datetime.datetime.fromtimestamp(os.stat(self.proxies_file).st_mtime)
if modification_time == self._last_auto_refresh:
return
self.refresh()
self._last_auto_refresh = modification_time
elif self.proxies_url:
if self.auto_refresh_period is None:
return
with self._auto_refresh_lock:
now = datetime.datetime.now()
if self._last_auto_refresh is not None:
if now - self._last_auto_refresh < self.auto_refresh_period:
return
self.refresh()
self._last_auto_refresh = now
def get_random_address(self):
self._auto_refresh()
return random.choice(self.proxies)
def get_pool(self):
if self.__pool is None:
with self._cleanup_lock: # оптимизация: используем уже существующий лок
# Вышли из состояния гонки, теперь можно удостовериться в реальной необходимости
if self.__pool is None:
options = self._get_options('default_holdout', 'default_bad_holdout', 'force_defaults')
if self._smart_holdout_start is not None:
options['smart_holdout'] = True
options['smart_holdout_start'] = self._smart_holdout_start
options.update(self._get_options('smart_holdout_min', 'smart_holdout_max'))
self.__pool = _Pool(
self, self._cooling_down, self._blacklist, self._stats, self._cleanup_lock,
**options
)
return self.__pool
@classmethod
def from_cfg_string(cls, cfg_string):
"""Возвращает список прокси с тем исключением что список опций берется автоматически.
Формат: json
Доступные опции:
type ('socks5', 'http'; для полного списка типов см. модуль socks):
все прокси будут автоматически промаркированы этип типом
slice (tuple c аргументами для builtins.slice):
будет взят только указанный фрагмент списка прокси-серверов
auto_refresh_period (dict): {'days': ..., 'hours': ..., 'minutes': ...}
как часто необходимо обновлять список прокси-серверов (только для `url` и `file`)
url_gateway:
адрес proxy, через которые будет загружаться список прокси по url
(url, file, list) - может быть именем файла, ссылкой или списком в формате json
Параметры slice и force_type являются необязательными
Примеры:
option = {"list": ["127.0.0.1:3128"]}
option = {"list": ["127.0.0.1:3128", "127.0.0.1:9999"]}
option = {"file": "./my_new_proxies.txt", "type": "socks5"}
option = {"url": "http://example.com/get/proxy_list/", "slice": [35, null], "type": "http"}
option = {"url": "http://example.com/get/proxy_list/", "auto_refresh_period": {"days": 1}}
option = {"url": "http://example.com/get/proxy_list/", "url_gateway": "http://proxy.example.com:9999"}
"""
cfg = json.loads(cfg_string)
proxies = cfg.pop('list', None)
proxies_url = cfg.pop('url', None)
proxies_url_gateway = cfg.pop('url_gateway', None)
proxies_file = cfg.pop('file', None)
return cls(
proxies=proxies,
proxies_url=proxies_url,
proxies_url_gateway=proxies_url_gateway,
proxies_file=proxies_file,
options=cfg
)
class _Pool:
def __init__(
self, proxies: "`Proxies` instance", cooling_down, blacklist, stats, _cleanup_lock=None,
smart_holdout=False, smart_holdout_start=None, smart_holdout_min=None, smart_holdout_max=None,
default_holdout=None, default_bad_holdout=None, force_defaults=False,
):
if smart_holdout:
if smart_holdout_start in (None, 0):
raise RuntimeError("Вы должны указать начальное время охлаждения")
if smart_holdout_max is None:
smart_holdout_max = float('inf')
self._used = set()
self._cond = threading.Condition(lock=_cleanup_lock)
self._free = collections.deque(
p for p in proxies.proxies
if (
p not in blacklist and
p not in cooling_down
)
)
self._proxies = proxies
self._cooling_down = cooling_down
self._blacklist = blacklist
self._stats = stats
self._smart_holdout = smart_holdout
self._smart_holdout_start = smart_holdout_start
self._smart_holdout_min = smart_holdout_min or 0
self._smart_holdout_max = smart_holdout_max
self._default_holdout = default_holdout
self._default_bad_holdout = default_bad_holdout
self._force_defaults = force_defaults
self._proxies_modified_at = proxies._modified_at
@property
def _size(self):
return len(self._free) + len(self._used) + len(self._cooling_down) + len(self._blacklist)
def _cool_released(self):
now = time.time()
cooled = []
for proxy, holdout in self._cooling_down.items():
if now >= holdout:
cooled.append(proxy)
for proxy in cooled:
self._cooling_down.pop(proxy, None)
if proxy not in self._blacklist:
self._free.append(proxy)
def _is_proxies_changed(self):
self._proxies._auto_refresh()
return self._proxies._modified_at != self._proxies_modified_at
def _remove_outdated(self):
# список прокси изменился, оставляем только актуальные
full_list = set(self._proxies.proxies)
for proxy in _get_missing(self._blacklist, full_list):
self._blacklist.pop(proxy, None)
for proxy in _get_missing(self._cooling_down, full_list):
self._cooling_down.pop(proxy, None)
for proxy in _get_missing(self._used, full_list):
self._used.remove(proxy)
for proxy in _get_missing(self._stats, full_list):
self._stats.pop(proxy, None)
free = set(
p for p in full_list
if (
p not in self._used and
p not in self._blacklist and
p not in self._cooling_down
)
)
old_free = set(self._free)
new_free = old_free.intersection(free)
if old_free.difference(new_free):
self._free.clear()
self._free.extend(new_free)
self._proxies_modified_at = self._proxies._modified_at
def _update_stats(self, proxy, bad=False, holdout=None):
proxy_stat = self._stats.get(proxy) or {}
ok, fail = proxy_stat.get('uptime', (0, 0))
if not bad:
ok += 1
else:
fail += 1
proxy_stat['uptime'] = ok, fail
proxy_stat['last_holdout'] = holdout
if (
not bad or
(
holdout is not None and
holdout >= (proxy_stat.get('last_good_holdout') or 0)
)
):
proxy_stat['last_good_holdout'] = holdout
# универсальный способ сказать что статистика обновилась
# тк без вызова метода .save будет работать и с обычным словарем (не только с JsonDict)
self._stats[proxy] = proxy_stat
def _get_next_holdout(self, proxy, bad=False):
"""Рассчитывает время охлаждения.
@param proxy: прокси, для которого необходимо вычислить
@param bad: True - вычисляем охлаждение для неудачи, иначе False
@return: рекомендуемое время охлаждения в секундах или None, если недостаточно данных
"""
# Алгоритм основан на бинарном поиске,
# в отличии от которого нам не известна верхняя граница
proxy_stat = self._stats.get(proxy)
if proxy_stat is None:
return None
last_holdout = proxy_stat['last_holdout']
last_good_holdout = proxy_stat.get('last_good_holdout', 0)
lo = last_holdout # предыдущее время охлаждения (нижняя граница)
if bad:
# Мы получили "бан" ...
if lo < last_good_holdout:
# ... возвращаемся к предыдущему хорошему значению ...
holdout = last_good_holdout
else:
# ... или сдвигаем границу дальше
holdout = lo * 2
else:
# возвращаемся к предыдущей границе (lo / 2)
# но с небольшим отступом - на середину отрезка [(lo / 2), lo]
holdout = lo * 0.75
return holdout
def acquire(self, timeout=None):
start = time.perf_counter()
with self._cond:
while True:
if self._is_proxies_changed():
self._remove_outdated()
self._cool_released()
if self._free:
proxy = self._free.popleft()
self._used.add(proxy)
return proxy
if self._blacklist:
# Возвращаем самый стабильный из блеклиста. Возможно бан снят.
def _uptime(p):
uptime = float('inf')
p_stat = self._stats.get(p)
if p_stat is not None:
ok, failed = p_stat.get('uptime', (0, 0))
if failed != 0:
uptime = ok // failed
else:
uptime = ok
return uptime
proxy = next((
p for p in | |
<gh_stars>1-10
import sys
import datetime
import time
import selenium
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from .config.aaConfig import aaConfig
from .parser.aaConfigParser import aaConfigParser
class AutoAA:
def __init__(self, show):
self.browser = None
self.pr = aaConfigParser()
self.url = "https://www.airasia.com/zh/tw"
self.pricecounter = 1
self.show = show
self.departureFullNameList = dict()
self.departureAbbrevNameList = dict()
self.arrivalFullNameList = dict()
self.arrivalAbbrevNameList = dict()
self.pr.__run__() # read user's config
try:
self.startTime = datetime.datetime.strptime(self.pr.startTime, "%Y/%m/%d %H:%M:%S")
self.frequency = int(self.pr.frequency)
except ValueError as e:
print("AutoAA: start time format incorrect. abort")
sys.exit(1)
def __start__(self):
try:
self.pricecounter = 1
# prevent to open another new chrome window
options = webdriver.ChromeOptions()
options.add_argument("--headless")
# command line argument
if self.show == "show":
self.browser = webdriver.Chrome(
executable_path='/usr/local/bin/chromedriver'
)
else:
self.browser = webdriver.Chrome(
chrome_options=options,
executable_path='/usr/local/bin/chromedriver'
)
except selenium.common.exceptions.WebDriverException as e:
print("AutoAA: Chromedriver need to be in /usr/local/bin. exit")
sys.exit(1)
except selenium.common.exceptions.SessionNotCreatedException as e:
print("AutoAA: Chromedriver version not matching. exit")
sys.exit(1)
try:
# start
print("AutoAA: Start selenium on {}... ".format(self.url), end="")
self.browser.get(self.url)
print("done\n")
except:
print("error. exit")
sys.exit(1)
def selectTicketNum(self):
print("AutoAA: Selecting ticket number... ")
userTicket = {
0: int(self.pr.flightAdult),
1: int(self.pr.flightChildren),
2: int(self.pr.flightBaby)
}
print("AutoAA: {} adults, {} children, {} infants".format(
userTicket[0],
userTicket[1],
userTicket[2]
))
if userTicket[2] > userTicket[0]:
print("AutoAA: error! 1 infant need at least 1 adult")
sys.exit(1)
# click ticket number button to bring up manual
WebDriverWait(self.browser, 3).until(
EC.element_to_be_clickable(
(By.ID, aaConfig.flightIdField)
)
).click()
# wait pop up menu show
WebDriverWait(self.browser, 3).until(
EC.visibility_of_element_located(
(
By.XPATH, '//div[@id="{}"]//div[contains(@class, "{} {}")]'.format(
aaConfig.flightIdField,
aaConfig.flightField1,
aaConfig.flightField2
)
)
)
)
# get current ticket status number
dropdown = self.browser.find_elements_by_xpath(
'//div[contains(@class, "{} {}")] \
//ul[@class="{}"] \
//li[contains(@class, "{} {}")] \
//div[contains(@class, "{} {}")] \
//div[@class="{}"] \
//span[@class="{}"]'.format(
aaConfig.flightField1,
aaConfig.flightField2,
aaConfig.flightField3,
aaConfig.flightField4,
aaConfig.flightField2,
aaConfig.flightField5,
aaConfig.flightField6,
aaConfig.flightField7,
aaConfig.flightField8
)
)
# press button
tempTicketClass = {
0: "adult",
1: "child",
2: "infant"
}
# iterate 3 kinds of ticket number(website)
counter = 0
for element in dropdown:
# get offset of config.ini with website's
offset = userTicket.get(counter, 0) - int(element.text)
for i in range(offset):
# click add ticket button
self.browser.find_element_by_id(
"{}{}{}".format(
aaConfig.flightButtonFieldHead,
tempTicketClass.get(counter, 0),
aaConfig.flightButtonFieldTail
)
).click()
counter += 1
# get current ticket status number
dropdown = self.browser.find_elements_by_xpath(
'//div[contains(@class, "{} {}")] \
//ul[@class="{}"] \
//li[contains(@class, "{} {}")] \
//div[contains(@class, "{} {}")] \
//div[@class="{}"] \
//span[@class="{}"]'.format(
aaConfig.flightField1,
aaConfig.flightField2,
aaConfig.flightField3,
aaConfig.flightField4,
aaConfig.flightField2,
aaConfig.flightField5,
aaConfig.flightField6,
aaConfig.flightField7,
aaConfig.flightField8
)
)
# verify operation, recheck
counter = 0
for element in dropdown:
offset = userTicket.get(counter, 0) - int(element.text)
counter += 1
if offset != 0:
print("AutoAA: Selecting ticket number error. exit")
sys.exit(1)
print("AutoAA: Selecting ticket number done")
def selectFlight(self):
print("AutoAA: Checking flight departure and arrival... ")
# get flight departure and arrival from config file
fd = self.pr.flightDeparture
fa = self.pr.flightArrival
# check departure flight location
departureClcikerId = self.getDepartureList(fd)
if departureClcikerId == -1:
print("failed")
print("AutoAA: {} not found. exit".format(fd))
sys.exit(1)
else:
print("AutoAA: {} ({}) found".format(
self.departureFullNameList.get(departureClcikerId, 0),
self.departureAbbrevNameList.get(departureClcikerId, 0)
))
# click departure location on list
self.browser.find_element_by_id(
"{}{}".format(aaConfig.departureListField, departureClcikerId)
).click()
# check arrival flight location
arrivalClickerId = self.getArrivalList(fa)
if arrivalClickerId == -1:
print("failed")
print("AutoAA: {} not found. exit".format(fd))
sys.exit(1)
else:
print("AutoAA: {} ({}) found".format(
self.arrivalFullNameList.get(arrivalClickerId, 0),
self.arrivalAbbrevNameList.get(arrivalClickerId, 0)
))
# click arrival location on list
self.browser.find_element_by_id(
"{}{}".format(aaConfig.arrivalListField, arrivalClickerId)
).click()
def getArrivalList(self, configArrival):
print("AutoAA: Get arrival list... ", end="")
# bring up arrival list
WebDriverWait(self.browser, 3).until(
EC.element_to_be_clickable(
(By.ID, aaConfig.arrivalBoxField)
)
)
# get all arrival name
items = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}")]'.format(aaConfig.arrivalListField)
)
# write to internal array list
counter = 0
for element in items:
tempF = element.find_element_by_class_name(
aaConfig.stationnameField
).text
tempA = element.find_element_by_class_name(
aaConfig.stationcodeField
).text
self.arrivalFullNameList[counter] = tempF
self.arrivalAbbrevNameList[counter] = tempA
# found config arrival location
# abort constructing arrival list
if configArrival == tempF or configArrival == tempA:
print("done")
return counter
counter += 1
return -1
def getDepartureList(self, configDeparture):
print("AutoAA: Get departure list... ", end="")
# bring up departure list
WebDriverWait(self.browser, 3).until(
EC.element_to_be_clickable(
(By.ID, aaConfig.departureButtonField)
)
).click()
# get all departure name
items = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}")]'.format(aaConfig.departureListField)
)
# home-origin-autocomplete-heatmaplist-33
# write to internal array list
counter = 0
for element in items:
tempF = element.find_element_by_class_name(
aaConfig.stationnameField
).text
tempA = element.find_element_by_class_name(
aaConfig.stationcodeField
).text
self.departureFullNameList[counter] = tempF
self.departureAbbrevNameList[counter] = tempA
# found config departure location
# abort constructing departure list
if configDeparture == tempF or configDeparture == tempA:
print("done")
return counter
counter += 1
return -1
def __login__(self):
# bring up login page
WebDriverWait(self.browser, 30).until(
EC.element_to_be_clickable(
(
By.XPATH, '//button[contains(@class, "{} {}")]'.format(
aaConfig.loginModalFieldClass1,
aaConfig.loginModalFieldClass2
)
)
)
).click()
try:
# fill user's email to login
WebDriverWait(self.browser, 3).until(
EC.element_to_be_clickable((By.ID, aaConfig.loginEmailFieldId))
).send_keys(self.pr.loginEmail)
# fill user's password to login
WebDriverWait(self.browser, 3).until(
EC.element_to_be_clickable((By.ID, aaConfig.loginPasswordFieldId))
).send_keys(self.pr.loginPassword)
print("AutoAA: Logging to air asia... ", end="")
WebDriverWait(self.browser, 3).until(
EC.element_to_be_clickable(
(
By.XPATH, '//button[@class="{}" and @type="{}"]'.format(
aaConfig.loginButtonFieldClass,
aaConfig.loginButtonFieldType
)
)
)
).click()
except selenium.common.exceptions.TimeoutException as e:
print("failed. exit")
sys.exit(1)
# verify
try:
# get air asia user id on website
WebDriverWait(self.browser, 60).until(
EC.element_to_be_clickable(
(
By.XPATH, '//*[contains(text(), "BIG會員帳號")]'
)
)
)
print("done")
# get air asia true user name
tuserName = self.browser.find_element_by_xpath(
'//div[@class="{}"]//span[@class="{}"]'.format(
aaConfig.loginPrompt1, aaConfig.loginPrompt2
)
).text
print("AutoAA: Welcome, {}!".format(tuserName))
# close login panel
self.browser.find_element_by_xpath(
'(//button[@aria-label="{}"])[2]'.format(
"Close navigation"
)
).click()
except selenium.common.exceptions.TimeoutException as e:
print("failed. exit")
sys.exit(1)
def setTicketType(self):
oneWay = int(self.pr.flightOne)
returnWay = int(self.pr.flightReturn)
print("AutoAA: ", end="")
if oneWay == 1 and returnWay == 1:
print("set ticket type error. exit")
sys.exit(1)
else:
if oneWay:
self.browser.find_element_by_xpath(
'//label[@for="{}"]'.format(aaConfig.flightTripOne)
).click()
else:
self.browser.find_element_by_xpath(
'//label[@for="{}"]'.format(aaConfig.flightTripReturn)
).click()
print("{} done".format("one way ticket" if oneWay else "returnWay"))
def setTicketDate(self):
oneWay = int(self.pr.flightOne)
returnWay = int(self.pr.flightReturn)
oneDate = self.pr.flightDDate
returnDate = self.pr.flightRDate
print("AutoAA: Setting ticket date...\nAutoAA: ", end="")
if not self.validate(oneDate) or not self.validate(returnDate):
print("Date format incorrect. exit")
sys.exit(1)
if oneWay == 1 and returnWay == 1:
print("set ticket type error. exit")
sys.exit(1)
else:
# 無論如何都必須選擇起始時間
self.browser.find_element_by_id(
aaConfig.flightDDateField
).click()
self.browser.find_element_by_id(
aaConfig.flightDDateField
).clear()
self.browser.find_element_by_id(
aaConfig.flightDDateField
).send_keys(oneDate)
print("departure date: {}".format(oneDate))
if returnWay:
tmp = self.browser.find_element_by_id(
aaConfig.flightRDateField
)
selenium.webdriver.ActionChains(self.browser).move_to_element(tmp).click(tmp).perform()
self.browser.find_element_by_id(
aaConfig.flightRDateField
).clear()
self.browser.find_element_by_id(
aaConfig.flightRDateField
).send_keys(returnDate)
print("AutoAA: return date: {}".format(returnDate))
self.browser.find_element_by_xpath(
'//button[@class="{}"]'.format(
"calendar-button"
)
).click()
self.browser.find_element_by_id(
aaConfig.flightSearchField
).click()
def queryFlight(self):
oneDate = self.pr.flightDDate
returnDate = self.pr.flightRDate
# 隱性等待直到頁面載入完成
self.browser.implicitly_wait(60)
# 等待頁面載入完成
WebDriverWait(self.browser, 60).until(
EC.visibility_of_element_located(
(
By.ID, aaConfig.flightDepartureRightBtn
)
)
)
WebDriverWait(self.browser, 60).until(
EC.presence_of_element_located(
(
By.XPATH, '//*[contains(@class, "{} {}")]'.format(
"fare-date-item-inner",
"active"
)
)
)
)
self.browser.implicitly_wait(0)
# 尋找當日班機
checker = True
oneDate = self.shorten(oneDate)
# 實做 python do-while
while checker:
# 挑選選擇班次日期相關訊息
lala = self.browser.find_elements_by_css_selector(
'div[id*="{}"]'.format(
"{}{}".format(
aaConfig.flightDepartureDate,
aaConfig.flightDepartureIndex
)
)
)
# 依次遞迴搜索 5 筆結果進行比對
for element in lala:
try:
tempDate = element.text.split(",")[0].replace("月", "/").replace("日", "")
except selenium.common.exceptions.StaleElementReferenceException as e:
tempDate = element.text.split(",")[0].replace("月", "/").replace("日", "")
if oneDate == self.padding(tempDate):
checker = False
# 取得當前 id
id = element.get_attribute("id").replace("{}{}".format(
aaConfig.flightDepartureDate,
aaConfig.flightDepartureIndex
), "")
# 點擊方框,取得票價以及剩餘票數
self.browser.find_element_by_id(
"{}{}{}{}".format(
aaConfig.flightDepartureClickH,
aaConfig.flightDepartureIndex,
id,
aaConfig.flightDepartureClickT
)
).click()
break
if checker:
# 按下一頁
self.browser.find_element_by_xpath(
'//div[@id="{}"]//div[@class="{}"]'.format(
aaConfig.flightDepartureRightBtn,
aaConfig.flightScrollBarDefault
)
).click()
# 等待頁面載入完成
WebDriverWait(self.browser, 60).until(
EC.visibility_of_element_located(
(
By.ID, aaConfig.flightDepartureRightBtn
)
)
)
WebDriverWait(self.browser, 60).until(
EC.presence_of_element_located(
(
By.XPATH, '//*[contains(@class, "{} {}")]'.format(
"fare-date-item-inner",
"active"
)
)
)
)
print("AutoAA: departure date selected")
def selectDepaturePrice(self):
# 等待頁面載入完成
WebDriverWait(self.browser, 60).until(
EC.visibility_of_element_located(
(
By.ID, aaConfig.flightDepartureRightBtn
)
)
)
WebDriverWait(self.browser, 60).until(
EC.presence_of_element_located(
(
By.XPATH, '//div[contains(@class, "{} {}")]'.format(
"fare-date-item-inner",
"active"
)
)
)
)
time.sleep(2)
tmp = self.browser.find_elements_by_xpath(
'//div[contains(@class, "{}")]'.format(
aaConfig.noFlightField
)
)
if tmp:
print("AutoAA: No flights in the desired time. exit")
sys.exit(1)
WebDriverWait(self.browser, 10).until(
EC.presence_of_element_located(
(
By.XPATH, '//*[contains(@id, "{}{}{}0-")]'.format(
aaConfig.flightNormalSeatFieldH,
aaConfig.flightAmountField,
aaConfig.flightSeatFieldT
)
)
)
)
try:
tps = self.pricecounter - 1
print("AutoAA: Querying departure flight price...")
# find all price button
tflightBtn = self.browser.find_elements_by_xpath(
'//*[contains(@id, "{}")]'.format(
aaConfig.flightChoosePriceField
)
)
except selenium.common.exceptions.NoSuchElementException or selenium.common.exceptions.TimeoutException:
print("AutoAA: No flights in the desired time. exit")
sys.exit(1)
# 取得貨幣
self.ct = self.browser.find_element_by_xpath(
'//*[contains(@id, "{}{}{}0-")]'.format(
aaConfig.flightNormalSeatFieldH,
aaConfig.flightCurrencyField,
aaConfig.flightSeatFieldT
)
).text
numUp = 0
# find parity seat price
parityJourney = self.browser.find_elements_by_xpath(
'//*[contains(@id, "{}")]'.format(
aaConfig.flightJourneyField1
)
)
parityAmount = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}") and contains(@id, "{}{}{}-")]'.format(
aaConfig.flightSeatFieldH,
aaConfig.flightAmountField,
aaConfig.flightSeatFieldT,
numUp
)
)
parityBadge = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}") and contains(@id, "{}{}{}-")]'.format(
aaConfig.flightSeatFieldH,
aaConfig.flightBageField,
aaConfig.flightSeatFieldT,
numUp
)
)
parityFlat = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}") and contains(@id, "{}{}{}-")]'.format(
aaConfig.flightPrioritySeatFieldH,
aaConfig.flightAmountField,
aaConfig.flightSeatFieldT,
numUp
)
)
parityBadge2 = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}") and contains(@id, "{}{}{}-")]'.format(
aaConfig.flightPrioritySeatFieldH,
aaConfig.flightBageField,
aaConfig.flightSeatFieldT,
numUp
)
)
# | |
<= 0)
m.e968 = Constraint(expr= m.x472 - 20 * m.b1052 <= 0)
m.e969 = Constraint(expr= m.x473 - 24 * m.b1053 <= 0)
m.e970 = Constraint(expr= m.x474 - 21 * m.b1054 <= 0)
m.e971 = Constraint(expr= m.x475 - 20 * m.b1055 <= 0)
m.e972 = Constraint(expr= m.x476 - 20 * m.b1056 <= 0)
m.e973 = Constraint(expr= m.x477 - 24 * m.b1057 <= 0)
m.e974 = Constraint(expr= m.x478 - 21 * m.b1058 <= 0)
m.e975 = Constraint(expr= m.x479 - 20 * m.b1059 <= 0)
m.e976 = Constraint(expr= m.x480 - 20 * m.b1060 <= 0)
m.e977 = Constraint(expr= m.x481 - 24 * m.b1061 <= 0)
m.e978 = Constraint(expr= m.x482 - 21 * m.b1062 <= 0)
m.e979 = Constraint(expr= m.x483 - 20 * m.b1063 <= 0)
m.e980 = Constraint(expr= m.x484 - 20 * m.b1064 <= 0)
m.e981 = Constraint(expr= m.x485 - 24 * m.b1065 <= 0)
m.e982 = Constraint(expr= m.x486 - 21 * m.b1066 <= 0)
m.e983 = Constraint(expr= m.x487 - 20 * m.b1067 <= 0)
m.e984 = Constraint(expr= m.x488 - 20 * m.b1068 <= 0)
m.e985 = Constraint(expr= m.x505 - 30 * m.b1053 <= 0)
m.e986 = Constraint(expr= m.x506 - 25 * m.b1054 <= 0)
m.e987 = Constraint(expr= m.x507 - 21 * m.b1055 <= 0)
m.e988 = Constraint(expr= m.x508 - 19 * m.b1056 <= 0)
m.e989 = Constraint(expr= m.x509 - 30 * m.b1057 <= 0)
m.e990 = Constraint(expr= m.x510 - 25 * m.b1058 <= 0)
m.e991 = Constraint(expr= m.x511 - 21 * m.b1059 <= 0)
m.e992 = Constraint(expr= m.x512 - 19 * m.b1060 <= 0)
m.e993 = Constraint(expr= m.x513 - 30 * m.b1061 <= 0)
m.e994 = Constraint(expr= m.x514 - 25 * m.b1062 <= 0)
m.e995 = Constraint(expr= m.x515 - 21 * m.b1063 <= 0)
m.e996 = Constraint(expr= m.x516 - 19 * m.b1064 <= 0)
m.e997 = Constraint(expr= m.x517 - 30 * m.b1065 <= 0)
m.e998 = Constraint(expr= m.x518 - 25 * m.b1066 <= 0)
m.e999 = Constraint(expr= m.x519 - 21 * m.b1067 <= 0)
m.e1000 = Constraint(expr= m.x520 - 19 * m.b1068 <= 0)
m.e1001 = Constraint(expr= m.x489 - 30 * m.b1069 <= 0)
m.e1002 = Constraint(expr= m.x490 - 25 * m.b1070 <= 0)
m.e1003 = Constraint(expr= m.x491 - 21 * m.b1071 <= 0)
m.e1004 = Constraint(expr= m.x492 - 19 * m.b1072 <= 0)
m.e1005 = Constraint(expr= m.x493 - 30 * m.b1073 <= 0)
m.e1006 = Constraint(expr= m.x494 - 25 * m.b1074 <= 0)
m.e1007 = Constraint(expr= m.x495 - 21 * m.b1075 <= 0)
m.e1008 = Constraint(expr= m.x496 - 19 * m.b1076 <= 0)
m.e1009 = Constraint(expr= m.x497 - 30 * m.b1077 <= 0)
m.e1010 = Constraint(expr= m.x498 - 25 * m.b1078 <= 0)
m.e1011 = Constraint(expr= m.x499 - 21 * m.b1079 <= 0)
m.e1012 = Constraint(expr= m.x500 - 19 * m.b1080 <= 0)
m.e1013 = Constraint(expr= m.x501 - 30 * m.b1081 <= 0)
m.e1014 = Constraint(expr= m.x502 - 25 * m.b1082 <= 0)
m.e1015 = Constraint(expr= m.x503 - 21 * m.b1083 <= 0)
m.e1016 = Constraint(expr= m.x504 - 19 * m.b1084 <= 0)
m.e1017 = Constraint(expr= m.x345 - 10 * m.b957 <= 0)
m.e1018 = Constraint(expr= m.x346 - 10 * m.b958 <= 0)
m.e1019 = Constraint(expr= m.x347 - 10 * m.b959 <= 0)
m.e1020 = Constraint(expr= m.x348 - 10 * m.b960 <= 0)
m.e1021 = Constraint(expr= m.x349 - 10 * m.b961 <= 0)
m.e1022 = Constraint(expr= m.x350 - 10 * m.b962 <= 0)
m.e1023 = Constraint(expr= m.x351 - 10 * m.b963 <= 0)
m.e1024 = Constraint(expr= m.x352 - 10 * m.b964 <= 0)
m.e1025 = Constraint(expr= m.x353 - 50 * m.b965 <= 0)
m.e1026 = Constraint(expr= m.x354 - 50 * m.b966 <= 0)
m.e1027 = Constraint(expr= m.x355 - 50 * m.b967 <= 0)
m.e1028 = Constraint(expr= m.x356 - 50 * m.b968 <= 0)
m.e1029 = Constraint(expr= m.x357 - 50 * m.b969 <= 0)
m.e1030 = Constraint(expr= m.x358 - 50 * m.b970 <= 0)
m.e1031 = Constraint(expr= m.x359 - 50 * m.b971 <= 0)
m.e1032 = Constraint(expr= m.x360 - 50 * m.b972 <= 0)
m.e1033 = Constraint(expr= m.x361 + m.x377 - 40 * m.b973 <= 0)
m.e1034 = Constraint(expr= m.x362 + m.x378 - 40 * m.b974 <= 0)
m.e1035 = Constraint(expr= m.x363 + m.x379 - 40 * m.b975 <= 0)
m.e1036 = Constraint(expr= m.x364 + m.x380 - 40 * m.b976 <= 0)
m.e1037 = Constraint(expr= m.x365 + m.x381 - 40 * m.b977 <= 0)
m.e1038 = Constraint(expr= m.x366 + m.x382 - 40 * m.b978 <= 0)
m.e1039 = Constraint(expr= m.x367 + m.x383 - 40 * m.b979 <= 0)
m.e1040 = Constraint(expr= m.x368 + m.x384 - 40 * m.b980 <= 0)
m.e1041 = Constraint(expr= m.x369 + m.x385 - 60 * m.b981 <= 0)
m.e1042 = Constraint(expr= m.x370 + m.x386 - 60 * m.b982 <= 0)
m.e1043 = Constraint(expr= m.x371 + m.x387 - 60 * m.b983 <= 0)
m.e1044 = Constraint(expr= m.x372 + m.x388 - 60 * m.b984 <= 0)
m.e1045 = Constraint(expr= m.x373 + m.x389 - 60 * m.b985 <= 0)
m.e1046 = Constraint(expr= m.x374 + m.x390 - 60 * m.b986 <= 0)
m.e1047 = Constraint(expr= m.x375 + m.x391 - 60 * m.b987 <= 0)
m.e1048 = Constraint(expr= m.x376 + m.x392 - 60 * m.b988 <= 0)
m.e1049 = Constraint(expr= m.x393 - 15 * m.b989 <= 0)
m.e1050 = Constraint(expr= m.x394 - 15 * m.b990 <= 0)
m.e1051 = Constraint(expr= m.x395 - 15 * m.b991 <= 0)
m.e1052 = Constraint(expr= m.x396 - 15 * m.b992 <= 0)
m.e1053 = Constraint(expr= m.x397 - 15 * m.b993 <= 0)
m.e1054 = Constraint(expr= m.x398 - 15 * m.b994 <= 0)
m.e1055 = Constraint(expr= m.x399 - 15 * m.b995 <= 0)
m.e1056 = Constraint(expr= m.x400 - 15 * m.b996 <= 0)
m.e1057 = Constraint(expr= m.x401 - 25 * m.b997 <= 0)
m.e1058 = Constraint(expr= m.x402 - 25 * m.b998 <= 0)
m.e1059 = Constraint(expr= m.x403 - 25 * m.b999 <= 0)
m.e1060 = Constraint(expr= m.x404 - 25 * m.b1000 <= 0)
m.e1061 = Constraint(expr= m.x405 - 25 * m.b1001 <= 0)
m.e1062 = Constraint(expr= m.x406 - 25 * m.b1002 <= 0)
m.e1063 = Constraint(expr= m.x407 - 25 * m.b1003 <= 0)
m.e1064 = Constraint(expr= m.x408 - 25 * m.b1004 <= 0)
m.e1065 = Constraint(expr= m.x409 - 15 * m.b1005 <= 0)
m.e1066 = Constraint(expr= m.x410 - 15 * m.b1006 <= 0)
m.e1067 = Constraint(expr= m.x411 - 15 * m.b1007 <= 0)
m.e1068 = Constraint(expr= m.x412 - 15 * m.b1008 <= 0)
m.e1069 = Constraint(expr= m.x413 - 15 * m.b1009 <= 0)
m.e1070 = Constraint(expr= m.x414 - 15 * m.b1010 <= 0)
m.e1071 = Constraint(expr= m.x415 - 15 * m.b1011 <= 0)
m.e1072 = Constraint(expr= m.x416 - 15 * m.b1012 <= 0)
m.e1073 = Constraint(expr= m.x417 - 20 * m.b1013 <= 0)
m.e1074 = Constraint(expr= m.x418 - 20 * m.b1014 <= 0)
m.e1075 = Constraint(expr= m.x419 - 20 * m.b1015 <= 0)
m.e1076 = Constraint(expr= m.x420 - 20 * m.b1016 <= 0)
m.e1077 = Constraint(expr= m.x421 - 20 * m.b1017 <= 0)
m.e1078 = Constraint(expr= m.x422 - 20 * m.b1018 <= 0)
m.e1079 = Constraint(expr= m.x423 - 20 * m.b1019 <= 0)
m.e1080 = Constraint(expr= m.x424 - 20 * m.b1020 <= 0)
m.e1081 = Constraint(expr= m.x441 - 10 * m.b1021 <= 0)
m.e1082 = Constraint(expr= m.x442 - 10 * m.b1022 <= 0)
m.e1083 = Constraint(expr= m.x443 - 10 * m.b1023 <= 0)
m.e1084 = Constraint(expr= m.x444 - 10 * m.b1024 <= 0)
m.e1085 = Constraint(expr= m.x445 - 10 * m.b1025 <= 0)
m.e1086 = Constraint(expr= m.x446 - 10 * m.b1026 <= 0)
m.e1087 = Constraint(expr= m.x447 - 10 * m.b1027 <= 0)
m.e1088 = Constraint(expr= m.x448 - 10 * m.b1028 <= 0)
m.e1089 = Constraint(expr= m.x449 - 20 * m.b1029 <= 0)
m.e1090 = Constraint(expr= m.x450 - 20 * m.b1030 <= 0)
m.e1091 = Constraint(expr= m.x451 - 20 * m.b1031 <= 0)
m.e1092 = Constraint(expr= m.x452 - 20 * m.b1032 <= 0)
m.e1093 = Constraint(expr= m.x453 - 20 * m.b1033 <= 0)
m.e1094 = Constraint(expr= m.x454 - 20 * m.b1034 <= 0)
m.e1095 = Constraint(expr= m.x455 - 20 * m.b1035 <= 0)
m.e1096 = Constraint(expr= m.x456 - 20 * m.b1036 <= 0)
m.e1097 = Constraint(expr= m.x425 + m.x457 - 20 * m.b1037 <= 0)
m.e1098 = Constraint(expr= m.x426 + m.x458 - 20 * m.b1038 <= 0)
m.e1099 = Constraint(expr= m.x427 + m.x459 - 20 * m.b1039 <= 0)
m.e1100 = Constraint(expr= m.x428 + m.x460 - 20 * m.b1040 <= 0)
m.e1101 = Constraint(expr= m.x429 + m.x461 - 20 * m.b1041 <= 0)
m.e1102 = Constraint(expr= m.x430 + m.x462 - 20 * m.b1042 <= 0)
m.e1103 = Constraint(expr= m.x431 + m.x463 - 20 * m.b1043 <= 0)
m.e1104 = Constraint(expr= m.x432 + m.x464 - 20 * | |
<filename>exocartographer/gp_illumination_map.py
from __future__ import division
import numpy as np
import healpy as hp
from . import gp_map as gm
from .gp_map import draw_map_cl, map_logprior_cl
from .util import logit, inv_logit, flat_logit_log_prior
from .analytic_kernel import viewgeom, kernel
def quaternion_multiply(qa, qb):
result = np.zeros(np.broadcast(qa, qb).shape)
result[..., 0] = qa[..., 0]*qb[..., 0] - np.sum(qa[..., 1:]*qb[..., 1:], axis=-1)
result[..., 1] = qa[..., 0]*qb[..., 1] + qa[..., 1]*qb[..., 0] + qa[..., 2]*qb[..., 3] - qa[..., 3]*qb[..., 2]
result[..., 2] = qa[..., 0]*qb[..., 2] - qa[..., 1]*qb[..., 3] + qa[..., 2]*qb[..., 0] + qa[..., 3]*qb[..., 1]
result[..., 3] = qa[..., 0]*qb[..., 3] + qa[..., 1]*qb[..., 2] - qa[..., 2]*qb[..., 1] + qa[..., 3]*qb[...,0]
return result
def rotation_quaternions(axis, angles):
angles = np.atleast_1d(angles)
result = np.zeros((angles.shape[0], 4))
result[:, 0] = np.cos(angles/2.0)
result[:, 1:] = np.sin(angles/2.0)[:, np.newaxis]*axis
return result
def rotate_vector(rqs, v):
nrs = rqs.shape[0]
rqs = rqs
vq = np.zeros((nrs, 4))
vq[:,1:] = v
result = quaternion_multiply(rqs, vq)
rqs[:,1:] *= -1
result = quaternion_multiply(result, rqs)
return result[:,1:]
class IlluminationMapPosterior(object):
"""A posterior class for mapping surfaces from a reflectance time series"
:param times:
Array of times of photometric measurements.
:param reflectance:
Array of photometric measurements at corresponding `times`.
:param sigma_reflectance:
Single value or array of 1-sigma uncertainties on reflectance measurements.
:param nside: (optional)
Resolution of HEALPix surface map. Has to be a power of 2. The number
of pixels will be :math:`12 N_\mathrm{side}^2`.
(default: ``4``)
:param nside_illum: (optional):
Resolution of HEALPix illumination map, i.e., the "kernel" of
illumination that is integrated against the pixel map.
(default: ``16``)
:param map_parameterization: (optional):
Parameterization of surface map to use. `pix` will parameterize the map with
values of each pixel; `alm` will parameterize the map in spherical harmonic coefficients.
(default: ``pix``)
"""
def __init__(self, times, reflectance, sigma_reflectance, nside=4, nside_illum=16, map_parameterization='pix'):
assert nside_illum >= nside, 'IlluminationMapPosterior: must have nside_illum >= nside'
self._map_parameterization = map_parameterization
self._times = np.array(times)
self._reflectance = np.array(reflectance)
self._sigma_reflectance = sigma_reflectance * np.ones_like(times)
self._nside = nside
self._nside_illum = nside_illum
self._fixed_params = {}
_param_names = ['log_error_scale', 'mu', 'log_sigma', 'logit_wn_rel_amp', 'logit_spatial_scale',
'log_rotation_period', 'log_orbital_period',
'logit_phi_orb', 'logit_cos_obl', 'logit_obl_orientation',
'logit_cos_inc']
@property
def times(self):
return self._times
@property
def ntimes(self):
return self.times.shape[0]
@property
def reflectance(self):
return self._reflectance
@property
def sigma_reflectance(self):
return self._sigma_reflectance
@property
def map_parameterization(self):
return self._map_parameterization
@property
def nside(self):
return self._nside
@property
def lmax(self):
return self.nside*4 - 1
@property
def mmax(self):
return self.lmax
@property
def nalms(self):
ncomplex = self.mmax * (2 * self.lmax + 1 - self.mmax) / 2 + self.lmax + 1
return int(2*ncomplex)
@property
def npix(self):
return hp.nside2npix(self.nside)
@property
def nmap_params(self):
if self.map_parameterization == 'pix':
return self.npix
elif self.map_parameterization == 'alm':
return self.nalms
else:
raise RuntimeError("Unrecognized map parameterization {}".format(self.map_parameterization))
@property
def nside_illum(self):
return self._nside_illum
@property
def npix_illum(self):
return hp.nside2npix(self.nside_illum)
@property
def fixed_params(self):
return self._fixed_params
@property
def full_dtype(self):
return np.dtype([(n, np.float) for n in self._param_names])
@property
def dtype(self):
dt = self.full_dtype
free_dt = [(param, dt[param]) for param in dt.names
if param not in self.fixed_params]
return np.dtype(free_dt)
@property
def dtype_map(self):
typel = [(param, self.dtype[param]) for param in self.dtype.names]
typel.append(('map', np.float, self.nmap_params))
return np.dtype(typel)
@property
def full_dtype_map(self):
typel = [(n, self.full_dtype[n]) for n in self.full_dtype.names]
typel.append(('map', np.float, self.nmap_params))
return np.dtype(typel)
@property
def nparams_full(self):
return len(self.full_dtype)
@property
def nparams(self):
return len(self.dtype)
@property
def nparams_full_map(self):
return self.nparams_full + self.nmap_params
@property
def nparams_map(self):
return self.nparams + self.nmap_params
@property
def wn_low(self):
return 0.01
@property
def wn_high(self):
return 0.99
@property
def spatial_scale_low(self):
return hp.nside2resol(self.nside)/3.0
@property
def spatial_scale_high(self):
return 3.0*np.pi
def error_scale(self, p):
return np.exp(self.to_params(p)['log_error_scale'])
def sigma(self, p):
p = self.to_params(p)
return np.exp(p['log_sigma'])
def wn_rel_amp(self, p):
p = self.to_params(p)
return inv_logit(p['logit_wn_rel_amp'], self.wn_low, self.wn_high)
def spatial_scale(self, p):
p = self.to_params(p)
return inv_logit(p['logit_spatial_scale'], self.spatial_scale_low, self.spatial_scale_high)
def rotation_period(self, p):
return np.exp(self.to_params(p)['log_rotation_period'])
def orbital_period(self, p):
return np.exp(self.to_params(p)['log_orbital_period'])
def phi_orb(self, p):
return inv_logit(self.to_params(p)['logit_phi_orb'], low=0, high=2*np.pi)
def cos_obl(self, p):
return inv_logit(self.to_params(p)['logit_cos_obl'], low=0, high=1)
def obl(self, p):
return np.arccos(self.cos_obl(p))
def obl_orientation(self, p):
return inv_logit(self.to_params(p)['logit_obl_orientation'], low=0, high=2*np.pi)
def cos_inc(self, p):
return inv_logit(self.to_params(p)['logit_cos_inc'], low=0, high=1)
def inc(self, p):
return np.arccos(self.cos_inc(p))
def set_params(self, p, dict):
p = np.atleast_1d(p).view(self.dtype)
logit_names = {'wn_rel_amp': (self.wn_low, self.wn_high),
'spatial_scale': (self.spatial_scale_low, self.spatial_scale_high),
'phi_orb': (0, 2*np.pi),
'cos_obl': (0, 1),
'obl_orientation': (0, 2*np.pi),
'cos_inc': (0, 1)}
log_names = set(['err_scale', 'sigma', 'rotation_period', 'orbital_period'])
for n, x in dict.items():
if n in p.dtype.names:
p[n] = x
elif n in logit_names:
l,h = logit_names[n]
p['logit_' + n] = logit(x, l, h)
elif n in log_names:
p['log_' + n] = np.log(x)
return p
def fix_params(self, params):
"""Fix parameters to the specified values and remove them from the
Posterior's `dtype`.
:param params:
A dictionary of parameters to fix, and the values to fix them to.
"""
self._fixed_params.update(params)
def unfix_params(self, params=None):
"""Let fixed parameters vary.
:param params:
A list of parameters to unfix. If ``None``, all parameters will be allowed to vary.
(default: ``None``)
"""
if params is None:
self._fixed_params = {}
else:
for p in params:
try:
self._fixed_params.pop(p)
except KeyError:
continue
def to_params(self, p):
"""
Return a typed version of ndarray `p`.
"""
if isinstance(p, np.ndarray):
if p.dtype == self.full_dtype or p.dtype == self.full_dtype_map:
return p.squeeze()
else:
if p.dtype == self.dtype:
# Extend the array with the fixed parameters
pp = np.empty(p.shape, dtype=self.full_dtype)
for n in p.dtype.names:
pp[n] = p[n]
for n in self.fixed_params.keys():
pp[n] = self.fixed_params[n]
return pp.squeeze()
elif p.dtype == self.dtype_map:
pp = np.empty(p.shape, dtype=self.full_dtype_map)
for n in p.dtype.names:
pp[n] = p[n]
for n in self.fixed_params.keys():
pp[n] = self.fixed_params[n]
return pp.squeeze()
else:
if p.shape[-1] == self.nparams:
return self.to_params(p.view(self.dtype).squeeze())
elif p.shape[-1] == self.nparams_map:
return self.to_params(p.view(self.dtype_map).squeeze())
else:
print(p.shape[-1], self.nparams_map)
raise ValueError("to_params: bad parameter dimension")
else:
p = np.atleast_1d(p)
return self.to_params(p)
def add_map_to_full_params(self, p, map):
return self._add_map(p, map)
def params_map_to_params(self, pm, include_fixed_params=False):
pm = self.to_params(pm)
pp = self.to_params(np.zeros(self.nparams))
for n in pp.dtype.names:
pp[n] = pm[n]
if not include_fixed_params:
unfixed_params = [p for p in pp.dtype.names if p not in self.fixed_params]
pp = pp[unfixed_params]
return pp
def spatial_scale(self, p):
p = self.to_params(p)
return inv_logit(p['logit_spatial_scale'],
low=self.spatial_scale_low,
high=self.spatial_scale_high)
def wn_rel_amp(self, p):
p = self.to_params(p)
return inv_logit(p['logit_wn_rel_amp'],
low=self.wn_low,
high=self.wn_high)
def visibility_illumination_matrix(self, p):
r"""Produce the "kernel" of illumination that is integrated against the pixel map.
:param p:
Array of values for unfixed parameters.
The kernel is composed of a product of cosines: the illumination is
proportional to :math:`\vec{n} \cdot \vec{n_s}`, where :math:`\vec{n}`
is the pixel normal and :math:`\vec{n_s}` is the vector to the star.
The visibility is proportional to :math:`\vec{n} \cdot \vec{n_o}`,
where :math:`\vec{n_o}` is the vector to the observer. The
contribution of any pixel of value `p` to the lightcurve is therefore
:math:`p (\vec{n} \cdot \vec{n_s}) (\vec{n} \cdot \vec{n_o})` if both
:math:`\vec{n} \cdot \vec{n_s}` and :math:`\vec{n} \cdot \vec{n_o}` are
> 0, and zero otherwise. So, we need to evaluate these dot products.
Fix a coordinate system in which to evaluate these dot products
as follows:
The orbit is in the x-y plane (so :math:`\hat{L} \parallel \hat{z}`),
with the x-axis pointing along superior conjunction. The observer is
therefore in the x-z plane, and has an inclination angle :math:`\iota`
in :math:`[0, \pi/2]` to the orbital plane. So :math:`\vec{n_o} =
(-\sin(\iota), 0, \cos(\iota))`.
The planet star vector, :math:`\vec{n_s}`, is given by :math:`\vec{n_s} =
(-\sin(x_i), -\cos(x_i), 0)`, where :math:`x_i` is the orbital phase. If
the orbit has phase :math:`x_{i0}` at `t = 0`, then
:math:`n_s = R_z(2\pi/P_\mathrm{orb}t + x_{i0}) \cdot (-1,0,0)`.
For the normal vector to the planet, we must describe the series of
rotations that maps the orbital coordinate system into the planet-centred
coordinate system. Imagine that the planet spin axis is at first aligned
with the z-axis. We apply :math:`R_y(\mathrm{obl})`, where `obl` is the
obliquity angle in :math:`[0, \pi/2]`, and then
:math:`R_z(\phi_\mathrm{rot})`, where :math:`\phi_\mathrm{rot}` is the
azimuthal angle of the planet's spin axis in :math:`[0, 2\pi]`. Now the
planet's spin axis points to :math:`S =
(\cos(\phi_\mathrm{rot})*\sin(\mathrm{obl}),
\sin(\phi_\mathrm{rot})*\sin(\mathrm{obl}), \cos(\mathrm{obl}))`. At time
:math:`t`, the planet's normals are given by :math:`n(t) =
R_S(2\pi/P_\mathrm{rot} t) n(0)`, where :math:`n(0) =
R_z(\phi_\mathrm{rot}) R_y(\mathrm{obl}) n`, with `n` the body-centred
normals to the pixels. We can now evaluate dot products in the fixed
orbital frame.
In principle, we are done, but there is an efficiency consideration. For
each time at which we have an observation, | |
a list of string or a list of pair
of string (see details in ``encode_plus``).
batch_entity_spans_or_entity_spans_pairs (:obj:`List[List[Tuple[int, int]]]`,
:obj:`List[Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]]`, `optional`)::
Batch of entity span sequences or pairs of entity span sequences to be encoded (see details in
``encode_plus``).
batch_entities_or_entities_pairs (:obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`,
`optional`):
Batch of entity sequences or pairs of entity sequences to be encoded (see details in ``encode_plus``).
max_entity_length (:obj:`int`, `optional`):
The maximum length of the entity sequence.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs,
batch_entities_or_entities_pairs=batch_entities_or_entities_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair]],
batch_entity_spans_or_entity_spans_pairs: Optional[
Union[List[EntitySpanInput], List[Tuple[EntitySpanInput, EntitySpanInput]]]
] = None,
batch_entities_or_entities_pairs: Optional[
Union[List[EntityInput], List[Tuple[EntityInput, EntityInput]]]
] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
if is_split_into_words:
raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
# input_ids is a list of tuples (one for each example in the batch)
input_ids = []
entity_ids = []
entity_token_spans = []
for index, text_or_text_pair in enumerate(batch_text_or_text_pairs):
if not isinstance(text_or_text_pair, (list, tuple)):
text, text_pair = text_or_text_pair, None
else:
text, text_pair = text_or_text_pair
entities, entities_pair = None, None
if batch_entities_or_entities_pairs is not None:
entities_or_entities_pairs = batch_entities_or_entities_pairs[index]
if entities_or_entities_pairs:
if isinstance(entities_or_entities_pairs[0], str):
entities, entities_pair = entities_or_entities_pairs, None
else:
entities, entities_pair = entities_or_entities_pairs
entity_spans, entity_spans_pair = None, None
if batch_entity_spans_or_entity_spans_pairs is not None:
entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index]
if entity_spans_or_entity_spans_pairs:
if isinstance(entity_spans_or_entity_spans_pairs[0][0], int):
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs, None
else:
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs
(
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
) = self._create_input_sequence(
text=text,
text_pair=text_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
**kwargs,
)
input_ids.append((first_ids, second_ids))
entity_ids.append((first_entity_ids, second_entity_ids))
entity_token_spans.append((first_entity_token_spans, second_entity_token_spans))
batch_outputs = self._batch_prepare_for_model(
input_ids,
batch_entity_ids_pairs=entity_ids,
batch_entity_token_spans_pairs=entity_token_spans,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
def _create_input_sequence(
self,
text: Union[TextInput],
text_pair: Optional[Union[TextInput]] = None,
entities: Optional[EntityInput] = None,
entities_pair: Optional[EntityInput] = None,
entity_spans: Optional[EntitySpanInput] = None,
entity_spans_pair: Optional[EntitySpanInput] = None,
**kwargs
) -> Tuple[list, list, list, list, list, list]:
def get_input_ids(text):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
def get_input_ids_and_entity_token_spans(text, entity_spans):
if entity_spans is None:
return get_input_ids(text), None
cur = 0
input_ids = []
entity_token_spans = [None] * len(entity_spans)
split_char_positions = sorted(frozenset(itertools.chain(*entity_spans)))
char_pos2token_pos = {}
for split_char_position in split_char_positions:
orig_split_char_position = split_char_position
if (
split_char_position > 0 and text[split_char_position - 1] == " "
): # whitespace should be prepended to the following token
split_char_position -= 1
if cur != split_char_position:
input_ids += get_input_ids(text[cur:split_char_position])
cur = split_char_position
char_pos2token_pos[orig_split_char_position] = len(input_ids)
input_ids += get_input_ids(text[cur:])
entity_token_spans = [
(char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans
]
return input_ids, entity_token_spans
first_ids, second_ids = None, None
first_entity_ids, second_entity_ids = None, None
first_entity_token_spans, second_entity_token_spans = None, None
if self.task is None:
unk_entity_id = self.entity_vocab["[UNK]"]
mask_entity_id = self.entity_vocab["[MASK]"]
if entity_spans is None:
first_ids = get_input_ids(text)
else:
assert isinstance(entity_spans, list) and (
len(entity_spans) == 0 or isinstance(entity_spans[0], tuple)
), "entity_spans should be given as a list of tuples containing the start and end character indices"
assert entities is None or (
isinstance(entities, list) and (len(entities) == 0 or isinstance(entities[0], str))
), "If you specify entities, they should be given as a list of entity names"
assert entities is None or len(entities) == len(
entity_spans
), "If you specify entities, entities and entity_spans must be the same length"
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
if entities is None:
first_entity_ids = [mask_entity_id] * len(entity_spans)
else:
first_entity_ids = [self.entity_vocab.get(entity, unk_entity_id) for entity in entities]
if text_pair is not None:
if entity_spans_pair is None:
second_ids = get_input_ids(text_pair)
else:
assert isinstance(entity_spans_pair, list) and (
len(entity_spans_pair) == 0 or isinstance(entity_spans_pair[0], tuple)
), "entity_spans_pair should be given as a list of tuples containing the start and end character indices"
assert entities_pair is None or (
isinstance(entities_pair, list)
and (len(entities_pair) == 0 or isinstance(entities_pair[0], str))
), "If you specify entities_pair, they should be given as a list of entity names"
assert entities_pair is None or len(entities_pair) == len(
entity_spans_pair
), "If you specify entities_pair, entities_pair and entity_spans_pair must be the same length"
second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans(
text_pair, entity_spans_pair
)
if entities_pair is None:
second_entity_ids = [mask_entity_id] * len(entity_spans_pair)
else:
second_entity_ids = [self.entity_vocab.get(entity, unk_entity_id) for entity in entities_pair]
elif self.task == "entity_classification":
assert (
isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)
), "Entity spans should be a list containing a single tuple containing the start and end character indices of an entity"
first_entity_ids = [self.entity_vocab["[MASK]"]]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
# add special tokens to input ids
entity_token_start, entity_token_end = first_entity_token_spans[0]
first_ids = (
first_ids[:entity_token_end] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_end:]
)
first_ids = (
first_ids[:entity_token_start]
+ [self.additional_special_tokens_ids[0]]
+ first_ids[entity_token_start:]
)
first_entity_token_spans = [(entity_token_start, entity_token_end + 2)]
elif self.task == "entity_pair_classification":
assert (
isinstance(entity_spans, list)
and len(entity_spans) == 2
and isinstance(entity_spans[0], tuple)
and isinstance(entity_spans[1], tuple)
), "Entity spans should be provided as a list of tuples, each tuple containing the start and end character indices of an entity"
head_span, tail_span = entity_spans
first_entity_ids = [self.entity_vocab["[MASK]"], self.entity_vocab["[MASK2]"]]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
head_token_span, tail_token_span = first_entity_token_spans
token_span_with_special_token_ids = [
(head_token_span, self.additional_special_tokens_ids[0]),
(tail_token_span, self.additional_special_tokens_ids[1]),
]
if head_token_span[0] < tail_token_span[0]:
first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2)
first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4)
token_span_with_special_token_ids = reversed(token_span_with_special_token_ids)
else:
first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4)
first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2)
for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids:
first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:]
first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:]
elif self.task == "entity_span_classification":
mask_entity_id = self.entity_vocab["[MASK]"]
assert isinstance(entity_spans, list) and isinstance(
entity_spans[0], tuple
), "Entity spans should be provided as a list of tuples, each tuple containing the start and end character indices of an entity"
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
first_entity_ids = [mask_entity_id] * len(entity_spans)
else:
raise ValueError(f"Task {self.task} not supported")
return (
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(
self,
batch_ids_pairs: List[Tuple[List[int], None]],
batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]],
batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
batch_entity_ids_pairs: list of entity ids or entity ids pairs
batch_entity_token_spans_pairs: list of entity spans or entity spans pairs
max_entity_length: The maximum length of the entity sequence.
"""
batch_outputs = {}
for input_ids, entity_ids, entity_token_span_pairs in zip(
batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs
):
first_ids, second_ids = input_ids
first_entity_ids, second_entity_ids = entity_ids
first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
outputs = self.prepare_for_model(
first_ids,
second_ids,
entity_ids=first_entity_ids,
pair_entity_ids=second_entity_ids,
entity_token_spans=first_entity_token_spans,
pair_entity_token_spans=second_entity_token_spans,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
| |
\
'ORDER BY forecastdate DESC LIMIT 1'.format(algoCode, ID)
latest_date = pd.read_sql_query(date_query, self.engine) # most recent forecast date calculation
# if table has forecast prices already find the latest one and delete it
# need to use most recent data for today if before market close at 4pm
if not latest_date.empty:
latest_date_str = "'" + str(latest_date['forecastdate'][0]) + "'"
delete_query = 'DELETE FROM dbo_algorithmforecast WHERE algorithmcode={} AND instrumentid={} AND ' \
'forecastdate={}'.format(algoCode, ID, latest_date_str)
self.engine.execute(delete_query)
# get raw price data from database
data_query = 'SELECT date, close FROM dbo_instrumentstatistics WHERE instrumentid=%s ORDER BY Date ASC' % ID
data = pd.read_sql_query(data_query, self.engine)
# regression model from previous days
input_length = 20
# predict ahead
forecast_length = 5
for n in range(input_length, len(data)):
recent_data = data[n - input_length:n]
# get most recent trading day
forecastDate = "'" + str(data['date'][n]) + "'"
# x and y axis
x_axis = np.array(recent_data['date'])
y_axis = np.array(recent_data['close'])
# convert date to a ordinal value to allow for regression
df = pd.DataFrame({'date': x_axis, 'close': y_axis})
df['date'] = pd.to_datetime(df['date'])
df['date'] = df['date'].map(dt.datetime.toordinal)
X = np.array(df['date'])
X = np.array(X)
X = X.reshape(-1, 1)
y = np.array(df['close'])
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, y)
# plt.scatter(X, y, color='red')
# plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color='blue')
# plt.title('Prediction')
# plt.xlabel('Date')
# plt.ylabel('Percentage Change')
# plt.show()
forecast_dates_query = 'SELECT date from dbo_datedim WHERE date > {} AND weekend=0 AND isholiday=0 ' \
'ORDER BY date ASC LIMIT {}'.format(forecastDate, forecast_length)
future_dates = pd.read_sql_query(forecast_dates_query, self.engine)
# delete outdated forecasts for the next period
delete_query = 'DELETE FROM dbo_algorithmforecast WHERE algorithmcode={} AND instrumentid={} AND ' \
'forecastdate>{}'.format(algoCode, ID, forecastDate)
self.engine.execute(delete_query)
for n in range(len(future_dates)):
insert_query = 'INSERT INTO dbo_algorithmforecast VALUES ({}, {}, {}, {}, {})'
forecastDate = future_dates['date'][n]
ordinalDate = forecastDate.toordinal()
forecastDate = "'" + str(future_dates['date'][n]) + "'"
forecastClose = pol_reg.predict(poly_reg.fit_transform([[ordinalDate]]))
forecastClose = (round(forecastClose[0], 3))
# populate entire table if empty
# or add new dates based on information in Statistics table
insert_query = insert_query.format(forecastDate, ID, forecastClose, algoCode, 0)
self.engine.execute(insert_query)
def MSF1(self):
#Queires the database to grab all of the Macro Economic Variable codes
query = "SELECT macroeconcode FROM dbo_macroeconmaster WHERE activecode = 'A'"
id = pd.read_sql_query(query, self.engine)
id = id.reset_index(drop=True)
#Queries the database to grab all of the instrument IDs
query = 'SELECT instrumentid FROM dbo_instrumentmaster'
id2 = pd.read_sql_query(query, self.engine)
id2 = id2.reset_index(drop = True)
# Sets value for number of datapoints you would like to work with
n = 9
# Getting Dates for Future Forecast#
#Initialize the currentDate variable for use when grabbing the forecasted dates
currentDate = datetime.today()
# Creates a list to store future forecast dates
date = []
# This will set the value of count according to which month we are in, this is to avoid having past forecast dates in the list
if (currentDate.month < 4):
count = 0
elif (currentDate.month < 7 and currentDate.month >= 4):
count = 1
elif (currentDate.month < 10 and currentDate.month >= 7):
count = 2
else:
count = 3
# Initialize a variable to the current year
year = currentDate.year
#Prints out the accuracy figures, not necessary can be commented out
FinsterTab.W2020.AccuracyTest.MSF1_accuracy(self.engine)
# Setup a for loop to loop through and append the date list with the date of the start of the next quarter
# For loop will run n times, corresponding to amount of data points we are working with
for i in range(n):
# If the count is 0 then we are still in the first quarter
if (count == 0):
# Append the date list with corresponding quarter and year
date.append(str(year) + "-03-" + "31")
# Increase count so this date is not repeated for this year
count += 1
#Do it again for the next quarter
elif (count == 1):
date.append(str(year) + "-06-" + "30")
count += 1
#And again for the next quarter
elif (count == 2):
date.append(str(year) + "-09-" + "30")
count += 1
# Until we account for the last quarter of the year
else:
date.append(str(year) + "-12-" + "31")
count = 0
# Where we then incrament the year for the next iterations
year = year + 1
#Initializes a list for which we will eventually be storing all data to add to the macroeconalgorithm database table
data = []
#Create a for loop to iterate through all of the instrument ids
for v in id2['instrumentid']:
#Median_forecast will be a dictionary where the key is the date and the value is a list of forecasted prices
median_forecast = {}
#This will be used to easily combine all of the forecasts for different dates to determine the median forecast value
for i in date:
temp = {i: []}
median_forecast.update(temp)
# Initiailizes a variable to represent today's date, used to fetch forecast dates
currentDate = str(datetime.today())
# Applies quotes to current date so it can be read as a string
currentDate = ("'" + currentDate + "'")
#This query will grab quarterly instrument prices from between 2014 and the current date to be used in the forecasting
query = "SELECT close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN '2014-03-21' AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, currentDate)
# Executes the query and stores the result in a dataframe variable
df2 = pd.read_sql_query(query, self.engine)
#This for loop iterates through the different macro economic codes to calculate the percent change for each macroeconomic variable
for x in id['macroeconcode']:
#Retrieves Relevant Data from Database
query = 'SELECT * FROM dbo_macroeconstatistics WHERE macroeconcode = {}'.format('"' + str(x) + '"')
df = pd.read_sql_query(query, self.engine)
macro = df.tail(n)
SP = df2.tail(n)
temp = df.tail(n+1)
temp = temp.reset_index()
#Converts macro variables to precent change
macroPercentChange = macro
macro = macro.reset_index(drop=True)
SP = SP.reset_index(drop=True)
macroPercentChange = macroPercentChange.reset_index(drop=True)
for i in range(0, n):
if (i == 0):
macrov = (macro['statistics'][i]-temp['statistics'][i])/temp['statistics'][i]
macroPercentChange['statistics'].iloc[i] = macrov * 100
else:
macrov = (macro['statistics'][i]-macro['statistics'][i - 1])/macro['statistics'][i - 1]
macroPercentChange['statistics'].iloc[i] = macrov * 100
#Algorithm for forecast price
S = DataForecast.calc(self, macroPercentChange, SP, n) #Calculates the average GDP and S&P values for the given data points over n days and performs operations on GDP average
# temp_price will be used to hold the previous forecast price for the next prediction
temp_price = 0
# isFirst will determine whether or not this is the first calculation being done
# If it is true then we use the most recent instrument statistic to forecast the first pricepoint
# IF it is false then we use the previous forecast price to predict the next forecast price
isFirst = True
# Setup a for loop to calculate the final forecast price and add data to the list variable data
for i in range(n):
if isFirst:
if x in [2, 3, 4]:
temp_price = ((S*SP['close'].iloc[n-1]) + SP['close'].iloc[n-1])
isFirst = False
else:
temp_price = ((S*SP['close'].iloc[n-1]) + SP['close'].iloc[n-1])
isFirst = False
else:
if x in [2, 3, 4]:
temp_price = ((S*temp_price) + temp_price)
else:
temp_price = ((S*temp_price)+ temp_price)
#Once the forecast price is calculated append it to median_forecast list
median_forecast[date[i]].append(temp_price)
#Calculates the median value for each date using a list of prices forecasted by each individual macro economic variable
forecast_prices = []
for i in date:
#Sort the forecasted prices based on date
sorted_prices = sorted(median_forecast[i])
#calculate the median forecasted price for each date
if len(sorted_prices) % 2 == 0:
center = int(len(sorted_prices)/2)
forecast_prices.append(sorted_prices[center])
else:
center = int(len(sorted_prices)/2)
forecast_prices.append((sorted_prices[center] + sorted_prices[center - 1])/2)
#Set up a for loop to construct a list using variables associated with macroeconalgorithm database table
for i in range(len(forecast_prices)):
data.append([date[i], v, 'ALL', forecast_prices[i], 'MSF1', 0])
# Convert | |
<filename>code_python/1.2/edge-extract.py
# ------------------------------------------------------------------------------
# IMPORTS
# ------------------------------------------------------------------------------
from sys import version_info
from sys import path as syspath
from os import path
import json
_CURRENT_DIRECTORY = syspath[0]
try:
import util
# if you have problems visit:
# https://gist.github.com/pinxau1000/8817d4ef0ed766c78bac8e6feafc8b47
# https://github.com/pinxau1000/
except ModuleNotFoundError:
from urllib import request
print("'util.py' not found on the same folder as this script!")
_url_utilpy = "https://gist.githubusercontent.com/pinxau1000/8817d4ef0ed766c78bac8e6feafc8b47/raw/util.py"
print("Downloading util.py from:\n" + _url_utilpy)
# https://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
request.urlretrieve(_url_utilpy, "util.py")
print("Downloading finished!")
import util
try:
import cv2 as cv
except ModuleNotFoundError:
util.install("opencv-python")
import cv2 as cv
try:
from matplotlib import pyplot as plt
except ModuleNotFoundError:
util.install("matplotlib")
from matplotlib import pyplot as plt
try:
import numpy as np
except ModuleNotFoundError:
util.install("numpy>=1.19,<1.19.4")
import numpy as np
try:
from packaging import version
except ModuleNotFoundError:
util.install("packaging")
from packaging import version
try:
import click
except ModuleNotFoundError:
util.install("click")
import click
# ------------------------------------------------------------------------------
# REQUIREMENTS CHECK
# ------------------------------------------------------------------------------
assert version_info.major >= 3 and \
version_info.minor >= 5, \
"This script requires Python 3.5.0 or above!"
assert version.parse(cv.__version__).major >= 4 and \
version.parse(cv.__version__).minor >= 4, \
"This script requires OpenCV 4.4.0 or above!"
assert version.parse(plt.matplotlib.__version__).major >= 3 and \
version.parse(plt.matplotlib.__version__).minor >= 3, \
"This script requires MatPlotLib 3.3.0 or above!"
assert version.parse(np.__version__).major >= 1 and \
version.parse(np.__version__).minor >= 19 and \
version.parse(np.__version__).micro < 4, \
"This script requires Numpy version >= 1.19.0 and < 1.19.4 !"
assert version.parse(click.__version__).major >= 7 and \
version.parse(click.__version__).minor >= 1, \
"This script requires Click 7.1.0 or above!"
# ------------------------------------------------------------------------------
# Load Default Pictures
# ------------------------------------------------------------------------------
_PATH_2_DATA = path.join(_CURRENT_DIRECTORY, "../../data/")
_IMG_ORIG_NAME = "img05.jpg"
_IMG_NOISE_NAME = "img05_noise.jpg"
_IMG_HARRIS_NAME = "Harris.jpg"
_FULL_PATH_ORIG = path.join(_PATH_2_DATA, _IMG_ORIG_NAME)
_FULL_PATH_NOISE = path.join(_PATH_2_DATA, _IMG_NOISE_NAME)
_FULL_PATH_HARRIS = path.join(_PATH_2_DATA, _IMG_HARRIS_NAME)
# ------------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Sobel Filter
# ------------------------------------------------------------------------------
# PASSED
@click.command()
@click.option("--image", prompt="Path",
default=_FULL_PATH_ORIG,
type=str,
help="The path to the image")
@click.option("--deriv_x",
default=str(list(range(1, 3))),
help="List with X derivatives order")
@click.option("--deriv_y",
default=str(list(range(1, 3))),
help="List with Y derivatives order")
@click.option("--ksize",
default=None,
type=int,
help="Size of the extended Sobel kernel; it must be 1, 3, 5, "
"or 7.")
@click.option("--threshold",
default=0.125,
type=float,
help="Values below threshold*max(SobelMagnitude) are set to 0.")
@click.option("--filter_params",
default=str([0]),
help="Tuple with initial filtering parameters. If None no "
"filter will be applied. If a 2 element tuple the a "
"Gaussian Blur will be applied with ksize=(filter_params["
"0], filter_params[1]). If a 3 elements tuple a bilateral "
"filtering will be applied with d=filter_params[0], "
"sigmaColor=filter_params[1] and sigmaSpace=filter_params["
"2]")
@click.option("--save",
default="output_SobelFilter",
type=str,
help="The save name(s) of the output figure(s)")
@click.option("--dpi",
default=None,
type=int,
help="Quality of the figure window generated. If None its the "
"default 100 dpi.")
@click.option("--num",
default=None,
type=int,
help="Number of the figure window generated. If None its "
"cumulative.")
def sobel_filter(image, deriv_x, deriv_y, ksize, threshold,
filter_params, save, dpi, num):
image = util.load_image_RGB(image)
deriv_x = json.loads(deriv_x)
deriv_y = json.loads(deriv_y)
filter_params = json.loads(filter_params)
if len(filter_params) == 2:
print("Applying Gaussian Filter")
image = cv.GaussianBlur(src=image,
ksize=(filter_params[0], filter_params[1]),
sigmaX=0,
sigmaY=0)
# Best bilateral_params = (6, 200, 20)
elif len(filter_params) == 3:
print("Applying Bilateral Filter")
image = cv.bilateralFilter(src=image,
d=filter_params[0],
sigmaColor=filter_params[1],
sigmaSpace=filter_params[2])
image = cv.cvtColor(src=image, code=cv.COLOR_RGB2GRAY)
# Initialize the mean_images as list. Values are assigned on the for-loop
sobel_images_dx = []
titles_images_dx = []
for dx in deriv_x:
# If not specified or set to None KSize is 3!
sobel_images_dx.append(cv.Sobel(src=image,
ddepth=cv.CV_64F,
dx=dx,
dy=0,
ksize=ksize))
titles_images_dx.append(f"dx = {dx}")
sobel_images_dy = []
titles_images_dy = []
for dy in deriv_y:
sobel_images_dy.append(cv.Sobel(src=image,
ddepth=cv.CV_64F,
dx=0,
dy=dy,
ksize=ksize))
titles_images_dy.append(f"dy = {dy}")
mag = cv.magnitude(sobel_images_dx[-1], sobel_images_dy[-1])
ang = cv.phase(sobel_images_dx[-1], sobel_images_dy[-1],
angleInDegrees=True)
# Values below threshold are set to zero.
# Needed to visualize the orientation/angle
_, mask = cv.threshold(mag, np.max(mag) * threshold, 1, cv.THRESH_BINARY)
mag = np.multiply(mask, mag)
ang = np.multiply(mask, ang)
sobel_images_dxdy = [mag, ang]
titles_images_dxdy = [f"Magnitude\ndx = {max(deriv_x)}, "
f"dy = {max(deriv_y)}",
f"Orientation\ndx = {max(deriv_x)}, "
f"dy = {max(deriv_y)}"]
# Before plotting we need to type convert
sobel_images_dx = list(np.uint8(np.abs(sobel_images_dx)))
sobel_images_dy = list(np.uint8(np.abs(sobel_images_dy)))
sobel_images_dxdy = list(np.uint8(np.abs(sobel_images_dxdy)))
# Copy the arrays with the images generated in the for-loop and adds the
# original noisy image for comparison. Also copies and adds the titles.
plot_images = sobel_images_dx + sobel_images_dy + [sobel_images_dxdy[0]]
plot_images.insert(0, image)
plot_titles = titles_images_dx + titles_images_dy + [titles_images_dxdy[0]]
plot_titles.insert(0, "Original Image")
# Plots the images.
fig = util.plotImages(plot_images,
plot_titles,
show=True,
main_title="Sobel Filter - cv.Sobel",
cmap="gray",
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save)
if num is not None:
num += 1
# Plots the images.
fig = util.plotImages(sobel_images_dxdy,
titles_images_dxdy,
show=True,
main_title="Sobel Filter - cv.Sobel",
cols=2,
cmap="turbo",
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save + "_MagAng")
# Wait for a key press to close figures
input("Press Enter to continue...")
# ------------------------------------------------------------------------------
# Sobel Filter Ddepth Problems
# ------------------------------------------------------------------------------
# PASSED
@click.command()
@click.option("--image", prompt="Path",
default=_FULL_PATH_ORIG,
type=str,
help="The path to the image")
@click.option("--ksize",
default=None,
type=int,
help="Size of the extended Sobel kernel; it must be 1, 3, 5, "
"or 7.")
@click.option("--threshold",
default=0.125,
type=float,
help="Values below threshold*max(SobelMagnitude) are set to 0.")
@click.option("--filter_params",
default=str([0]),
help="Tuple with initial filtering parameters. If None no "
"filter will be applied. If a 2 element tuple the a "
"Gaussian Blur will be applied with ksize=(filter_params["
"0], filter_params[1]). If a 3 elements tuple a bilateral "
"filtering will be applied with d=filter_params[0], "
"sigmaColor=filter_params[1] and sigmaSpace=filter_params["
"2]")
@click.option("--save",
default="output_SobelFilter_dataTypesProblems",
type=str,
help="The save name(s) of the output figure(s)")
@click.option("--dpi",
default=None,
type=int,
help="Quality of the figure window generated. If None its the "
"default 100 dpi.")
@click.option("--num",
default=None,
type=int,
help="Number of the figure window generated. If None its "
"cumulative.")
def sobel_filter_ddepth(image, ksize, threshold, filter_params, save, dpi, num):
image = util.load_image_RGB(image)
filter_params = json.loads(filter_params)
if len(filter_params) == 2:
print("Applying Gaussian Filter")
image = cv.GaussianBlur(src=image,
ksize=(filter_params[0], filter_params[1]),
sigmaX=0,
sigmaY=0)
# Best bilateral_params = (6, 200, 20)
elif len(filter_params) == 3:
print("Applying Bilateral Filter")
image = cv.bilateralFilter(src=image,
d=filter_params[0],
sigmaColor=filter_params[1],
sigmaSpace=filter_params[2])
image_gray = cv.cvtColor(src=image, code=cv.COLOR_RGB2GRAY)
sobel_1st_float64 = cv.Sobel(src=image_gray,
ddepth=cv.CV_64F,
dx=1,
dy=0,
ksize=ksize)
sobel_1st_float64 = np.uint8(np.abs(sobel_1st_float64))
sobel_1st_uint8 = cv.Sobel(src=image_gray,
ddepth=cv.CV_8U,
dx=1,
dy=0,
ksize=ksize)
sobel_2nd_float64 = cv.Sobel(src=image_gray,
ddepth=cv.CV_64F,
dx=2,
dy=0,
ksize=ksize)
sobel_2nd_float64 = np.uint8(np.abs(sobel_2nd_float64))
sobel_2nd_uint8 = cv.Sobel(src=image_gray,
ddepth=cv.CV_8U,
dx=2,
dy=0,
ksize=ksize)
plot_images = [sobel_1st_float64, sobel_1st_uint8, sobel_2nd_float64,
sobel_2nd_uint8]
plot_titles = ["dx=1 64F", "dx=1 8U", "dx=2 64F", "dx=2 8U"]
# Plots the Black and White images.
fig = util.plotImages(plot_images,
plot_titles,
show=True,
main_title="Sobel Derivatives Problems 1 - cv.Sobel",
cmap="gray",
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save + "_1")
image_1st_uint8 = np.copy(image)
image_1st_float64 = np.copy(image)
image_2nd_uint8 = np.copy(image)
image_2nd_float64 = np.copy(image)
image_1st_uint8[sobel_1st_uint8 > threshold * np.max(sobel_1st_uint8)] \
= [255, 0, 0]
image_1st_float64[sobel_1st_float64 > threshold * np.max(sobel_1st_float64)] \
= [255, 0, 0]
image_2nd_uint8[sobel_2nd_uint8 > threshold * np.max(sobel_2nd_uint8)] \
= [255, 0, 0]
image_2nd_float64[sobel_2nd_float64 > threshold * np.max(sobel_2nd_float64)] \
= [255, 0, 0]
plot_images = [image_1st_float64, image_1st_uint8,
image_2nd_float64, image_2nd_uint8]
plot_titles = ["dx=1 64F", "dx=1 8U", "dx=2 64F", "dx=2 8U"]
if num is not None:
num += 1
# Plots the images.
fig = util.plotImages(plot_images,
plot_titles,
show=True,
main_title="Sobel Derivatives Problems 2 - cv.Sobel",
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save + "_2")
# Wait for a key press to close figures
input("Press Enter to continue...")
# ------------------------------------------------------------------------------
# Scharr Filter
# ------------------------------------------------------------------------------
# PASSED
@click.command()
@click.option("--image", prompt="Path",
default=_FULL_PATH_ORIG,
type=str,
help="The path to the image")
@click.option("--filter_params",
default=str([0]),
help="Tuple with initial filtering parameters. If None no "
"filter will be applied. If a 2 element tuple the a "
"Gaussian Blur will be applied with ksize=(filter_params["
"0], filter_params[1]). If a 3 elements tuple a bilateral "
"filtering will be applied with d=filter_params[0], "
"sigmaColor=filter_params[1] and sigmaSpace=filter_params["
"2]")
@click.option("--save",
default="output_ScharrFilter",
type=str,
help="The save name(s) of the output figure(s)")
@click.option("--dpi",
default=None,
type=int,
help="Quality of the figure window generated. If None its the "
"default 100 dpi.")
@click.option("--num",
default=None,
type=int,
help="Number of the figure window generated. If None its "
"cumulative.")
def scharr_filter(image, filter_params, save, dpi, num):
image = util.load_image_RGB(image)
filter_params = json.loads(filter_params)
if len(filter_params) == 2:
print("Applying Gaussian Filter")
image = cv.GaussianBlur(src=image,
ksize=(filter_params[0], filter_params[1]),
sigmaX=0,
sigmaY=0)
# Best bilateral_params = (6, 200, 20)
elif len(filter_params) == 3:
print("Applying Bilateral Filter")
image = cv.bilateralFilter(src=image,
d=filter_params[0],
sigmaColor=filter_params[1],
sigmaSpace=filter_params[2])
image = cv.cvtColor(src=image, code=cv.COLOR_RGB2GRAY)
scharr_x = cv.Scharr(src=image, ddepth=cv.CV_64F, dx=1, dy=0)
scharr_y = cv.Scharr(src=image, ddepth=cv.CV_64F, dx=0, dy=1)
scharr_xy = cv.magnitude(scharr_x, scharr_y)
scharr_x = list(np.uint8(np.abs(scharr_x)))
scharr_y = list(np.uint8(np.abs(scharr_y)))
scharr_xy = list(np.uint8(np.abs(scharr_xy)))
scharr_images = [image,
scharr_x,
scharr_y,
scharr_xy]
titles_images = ["Original Image",
"Scharr dx=1",
"Scharr dy=1",
"Magnitude"]
# Plots the images.
fig = util.plotImages(scharr_images,
titles_images,
show=True,
main_title="Scharr Filter - cv.Scharr",
cmap="gray",
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save)
# | |
import os
import sys
import glob
import gzip
import multiprocessing
import re
import uuid
from core.log import log
from core.config import HotSOSConfig
class FileSearchException(Exception):
def __init__(self, msg):
self.msg = msg
class FilterDef(object):
def __init__(self, pattern, invert_match=False):
"""
Add a filter definition
@param pattern: regex pattern to search for
@param invert_match: return True if match is positive
"""
self.pattern = re.compile(pattern)
self.invert_match = invert_match
def filter(self, line):
ret = self.pattern.search(line)
if self.invert_match:
return ret is not None
else:
return ret is None
class SearchDef(object):
def __init__(self, pattern, tag=None, hint=None):
"""
Add a search definition
@param pattern: regex pattern or list of patterns to search for
@param tag: optional user-friendly identifier for this search term
@param hint: pre-search term to speed things up
"""
if type(pattern) != list:
self.patterns = [re.compile(pattern)]
else:
self.patterns = []
for _pattern in pattern:
self.patterns.append(re.compile(_pattern))
self.tag = tag
if hint:
self.hint = re.compile(hint)
else:
self.hint = None
def run(self, line):
"""Execute search patterns against line and return first match."""
if self.hint:
ret = self.hint.search(line)
if not ret:
return None
ret = None
for pattern in self.patterns:
ret = pattern.match(line)
if ret:
return ret
return ret
class SequenceSearchDef(object):
def __init__(self, start, tag, end=None, body=None):
"""
Define search for sequences. A sequence must match a start and end with
optional body in between. If no end defined, the sequence ends with
the start of the next or EOF.
NOTE: sequences must not overlap. This is therefore not suitable for
finding sequences generated by parallel/concurrent tasks.
@param start: SearchDef object for matching start
@param tag: tag used to identify this sequence definition
@param end: optional SearchDef object for matching end
@param body: optional SearchDef object for matching body
"""
self.s_start = start
self.s_end = end
self.s_body = body
self.tag = tag
self._mark = None
# Each section identified gets its own id. Since each file is processed
# using a separate process and memory is not shared, these values must
# be unique to avoid collisions when results are aggregated.
self._section_id = None
self._unique_id = "{}-{}".format(self.tag, uuid.uuid4())
@property
def start_tag(self):
"""Tag used to identify start of section"""
return "{}-start".format(self.tag)
@property
def end_tag(self):
"""Tag used to identify end of section"""
return "{}-end".format(self.tag)
@property
def body_tag(self):
"""Tag used to identify body of section"""
return "{}-body".format(self.tag)
@property
def id(self):
return self._unique_id
@property
def section_id(self):
""" ID of current section. A new id should be set after each
completed section. """
return self._section_id
@property
def started(self):
"""Indicate a section sequence has been started."""
return self._mark == 1
def start(self):
"""Indicate that a sequence start has been detected."""
self._section_id = str(uuid.uuid4())
self._mark = 1
def reset(self):
"""Used to restart a section. This is used e.g. if the start
expression matches midway through a sequence (and before the end).
"""
self._mark = 0
def stop(self):
"""Indicate that a sequence is complete."""
self._mark = 0
self._section_id = str(uuid.uuid4())
class SearchResultPart(object):
def __init__(self, index, value):
self.index = index
self.value = value
class SearchResult(object):
def __init__(self, linenumber, source, result, search_term_tag=None,
section_id=None, sequence_obj_id=None):
"""
@param linenumber: line number that produced a match
@param source: data source (path)
@param result: python.re match object
@param search_term_tag: SearchDef object tag
@param section_id: SequenceSearchDef object section id
@param sequence_obj_id: SequenceSearchDef object unique id
"""
self.tag = search_term_tag
self.source = source
self.linenumber = linenumber
self._parts = {}
self.sequence_obj_id = sequence_obj_id
self.section_id = section_id
num_groups = len(result.groups())
# NOTE: this does not include group(0)
if num_groups:
# To reduce memory footprint, don't store group(0) i.e. the whole
# line, if there are actual groups in the result.
for i in range(1, num_groups + 1):
self._add(i, result.group(i))
else:
self._add(0, result.group(0))
def _add(self, index, value):
self._parts[index] = SearchResultPart(index, value)
def get(self, index):
"""Retrieve a result part by its index."""
if index not in self._parts:
return None
return self._parts[index].value
class SearchResultsCollection(object):
def __init__(self):
self.reset()
def __len__(self):
return self.count
@property
def count(self):
_count = 0
for f in self.files:
_count += len(self.find_by_path(f))
return _count
@property
def files(self):
return list(self._results.keys())
def reset(self):
self._iter_idx = 0
self._results = {}
def add(self, path, results):
if path not in self._results:
self._results[path] = results
else:
self._results[path] += results
def find_by_path(self, path):
if path not in self._results:
return []
return self._results[path]
def find_by_tag(self, tag, path=None, sequence_obj_id=None):
"""Return all result tagged with tag.
If no path is provided tagged results from all paths are returned.
"""
if path:
paths = [path]
else:
paths = list(self._results.keys())
results = []
for path in paths:
for result in self._results.get(path, []):
if sequence_obj_id is None:
if result.tag == tag:
results.append(result)
else:
if (result.tag == tag and
result.sequence_obj_id == sequence_obj_id):
results.append(result)
return results
def find_sequence_sections(self, sequence_obj, path=None):
"""Return results of running the given sequence search.
Returns a dictionary keyed by section id where each is a list of
results for that section with start, body, end etc.
"""
_results = []
sections = {}
_results += self.find_by_tag(tag=sequence_obj.start_tag, path=path,
sequence_obj_id=sequence_obj.id)
_results += self.find_by_tag(tag=sequence_obj.body_tag, path=path,
sequence_obj_id=sequence_obj.id)
_results += self.find_by_tag(tag=sequence_obj.end_tag, path=path,
sequence_obj_id=sequence_obj.id)
for r in _results:
if r.section_id in sections:
sections[r.section_id].append(r)
else:
sections[r.section_id] = [r]
return sections
def __iter__(self):
return iter(self._results.items())
class FileSearcher(object):
def __init__(self):
self.paths = {}
self.filters = {}
self.results = SearchResultsCollection()
@property
def num_cpus(self):
if HotSOSConfig.MAX_PARALLEL_TASKS == 0:
cpus = 1 # i.e. no parallelism
else:
cpus = min(HotSOSConfig.MAX_PARALLEL_TASKS, os.cpu_count())
return cpus
def add_filter_term(self, filter, path):
"""Add a term to search for that will be used as a filter for the given
data source. This filter is applied to each line in a file prior to
executing the full search(es) as means of reducing the amount of full
searches we have to do by filtering out lines that do not qualify. A
negative match results in the line being skipped and no further
searches performed.
A filter definition is registered against a path which can be a
file, directory or glob. Any number of filters can be registered.
@param filtedef: FilterDef object
@param path: path to which the filter should be applied.
"""
if path in self.filters:
self.filters[path].append(filter)
else:
self.filters[path] = [filter]
def add_search_term(self, searchdef, path):
"""Add a term to search for.
A search definition is registered against a path which can be a
file, directory or glob. Any number of searches can be registered.
Searches are executed concurrently by file.
@param searchdef: SearchDef object
@param path: path that we will be searching for this key
"""
if path in self.paths:
self.paths[path].append(searchdef)
else:
self.paths[path] = [searchdef]
def _job_wrapper(self, pool, path, entry):
term_key = path
return pool.apply_async(self._search_task_wrapper,
(entry, term_key))
def _search_task_wrapper(self, path, term_key):
try:
with gzip.open(path, 'r') as fd:
try:
# test if file is gzip
fd.read(1)
fd.seek(0)
return self._search_task(term_key, fd, path)
except OSError:
pass
with open(path) as fd:
return self._search_task(term_key, fd, path)
except UnicodeDecodeError:
# ignore the file if it can't be decoded
log.debug("caught UnicodeDecodeError for path %s - skipping", path)
except EOFError as e:
msg = ("an exception occured while searching {} - {}".
format(path, e))
raise FileSearchException(msg) from e
except Exception as e:
msg = ("an unknown exception occured while searching {} - {}".
format(path, e))
raise FileSearchException(msg) from e
def line_filtered(self, term_key, line):
"""Returns True if line is to be skipped."""
for f_term in self.filters.get(term_key, []):
if f_term.filter(line):
return True
return False
def _search_task(self, term_key, fd, path):
results = []
sequence_results = {}
for ln, line in enumerate(fd, start=1):
if type(line) == bytes:
line = line.decode("utf-8")
# global filters (untagged)
if self.line_filtered(term_key, line):
continue
for s_term in self.paths[term_key]:
if type(s_term) == SequenceSearchDef:
# if the ending is defined and we match a start while
# already in a section, we start again.
if s_term.s_end:
ret = s_term.s_start.run(line)
if s_term.started:
if ret:
# reset and start again
if sequence_results:
del sequence_results[s_term.id]
s_term.reset()
else:
ret = s_term.s_end.run(line)
else:
ret = s_term.s_start.run(line)
else:
ret = s_term.run(line)
if ret:
section_id = None
sequence_obj_id = None
tag = s_term.tag
if type(s_term) == SequenceSearchDef:
if not s_term.started:
tag = s_term.start_tag
s_term.start()
section_id = s_term.section_id
else:
tag = s_term.end_tag
section_id = s_term.section_id
| |
variables in the stochastic process.
* **phi** (`ndarray`):
The random phase angles used in the simulation of the stochastic process.
The shape of the phase angles (`nsamples`, `number_of_variables`, `number_frequency_intervals[0]`, ...,
`number_frequency_intervals[number_of_dimensions-1]`)
* **b_ampl** (`ndarray`):
The amplitude of the bispectrum.
* **b_real** (`ndarray`):
The real part of the bispectrum.
* **b_imag** (`ndarray`):
The imaginary part of the bispectrum.
* **biphase** (`ndarray`):
The biphase values of the bispectrum.
* **pure_power_spectrum** (`ndarray`):
The pure part of the power spectrum.
* **bc2** (`ndarray`):
The bicoherence values of the power spectrum and bispectrum.
* **sum_bc2** (`ndarray`):
The sum of the bicoherence values for single frequencies.
**Methods**
"""
def __init__(self, nsamples, power_spectrum, bispectrum, time_interval, frequency_interval, number_time_intervals,
number_frequency_intervals, case='uni', random_state=None, verbose=False):
self.nsamples = nsamples
self.number_frequency_intervals = np.array(number_frequency_intervals)
self.number_time_intervals = np.array(number_time_intervals)
self.frequency_interval = np.array(frequency_interval)
self.time_interval = np.array(time_interval)
self.number_of_dimensions = len(power_spectrum.shape)
self.power_spectrum = power_spectrum
self.bispectrum = bispectrum
# Error checks
t_u = 2 * np.pi / (2 * self.number_frequency_intervals * self.frequency_interval)
if (self.time_interval > t_u).any():
raise RuntimeError('UQpy: Aliasing might occur during execution')
self.random_state = random_state
if isinstance(self.random_state, int):
np.random.seed(self.random_state)
elif not isinstance(self.random_state, (type(None), np.random.RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')
self.b_ampl = np.absolute(bispectrum)
self.b_real = np.real(bispectrum)
self.b_imag = np.imag(bispectrum)
self.biphase = np.arctan2(self.b_imag, self.b_real)
self.biphase[np.isnan(self.biphase)] = 0
self.phi = None
self.samples = None
self.case = case
self.verbose = verbose
if self.number_of_dimensions == len(self.power_spectrum.shape):
self.case = 'uni'
else:
self.number_of_variables = self.power_spectrum.shape[0]
self.case = 'multi'
if self.nsamples is not None:
self.run(nsamples=self.nsamples)
def _compute_bicoherence_uni(self):
if self.verbose:
print('UQpy: Stochastic Process: Computing the partial bicoherence values.')
self.bc2 = np.zeros_like(self.b_real)
self.pure_power_spectrum = np.zeros_like(self.power_spectrum)
self.sum_bc2 = np.zeros_like(self.power_spectrum)
if self.number_of_dimensions == 1:
self.pure_power_spectrum[0] = self.power_spectrum[0]
self.pure_power_spectrum[1] = self.power_spectrum[1]
if self.number_of_dimensions == 2:
self.pure_power_spectrum[0, :] = self.power_spectrum[0, :]
self.pure_power_spectrum[1, :] = self.power_spectrum[1, :]
self.pure_power_spectrum[:, 0] = self.power_spectrum[:, 0]
self.pure_power_spectrum[:, 1] = self.power_spectrum[:, 1]
if self.number_of_dimensions == 3:
self.pure_power_spectrum[0, :, :] = self.power_spectrum[0, :, :]
self.pure_power_spectrum[1, :, :] = self.power_spectrum[1, :, :]
self.pure_power_spectrum[:, 0, :] = self.power_spectrum[:, 0, :]
self.pure_power_spectrum[:, 1, :] = self.power_spectrum[:, 1, :]
self.pure_power_spectrum[:, :, 0] = self.power_spectrum[:, :, 0]
self.pure_power_spectrum[:, 0, 1] = self.power_spectrum[:, :, 1]
self.ranges = [range(self.number_frequency_intervals[i]) for i in range(self.number_of_dimensions)]
for i in itertools.product(*self.ranges):
wk = np.array(i)
for j in itertools.product(*[range(np.int32(k)) for k in np.ceil((wk + 1) / 2)]):
wj = np.array(j)
wi = wk - wj
if self.b_ampl[(*wi, *wj)] > 0 and self.pure_power_spectrum[(*wi, *[])] * \
self.pure_power_spectrum[(*wj, *[])] != 0:
self.bc2[(*wi, *wj)] = self.b_ampl[(*wi, *wj)] ** 2 / (
self.pure_power_spectrum[(*wi, *[])] * self.pure_power_spectrum[(*wj, *[])] *
self.power_spectrum[(*wk, *[])]) * self.frequency_interval ** self.number_of_dimensions
self.sum_bc2[(*wk, *[])] = self.sum_bc2[(*wk, *[])] + self.bc2[(*wi, *wj)]
else:
self.bc2[(*wi, *wj)] = 0
if self.sum_bc2[(*wk, *[])] > 1:
print('UQpy: Stochastic Process: Results may not be as expected as sum of partial bicoherences is '
'greater than 1')
for j in itertools.product(*[range(k) for k in np.ceil((wk + 1) / 2, dtype=np.int32)]):
wj = np.array(j)
wi = wk - wj
self.bc2[(*wi, *wj)] = self.bc2[(*wi, *wj)] / self.sum_bc2[(*wk, *[])]
self.sum_bc2[(*wk, *[])] = 1
self.pure_power_spectrum[(*wk, *[])] = self.power_spectrum[(*wk, *[])] * (1 - self.sum_bc2[(*wk, *[])])
def _simulate_bsrm_uni(self, phi):
coeff = np.sqrt((2 ** (
self.number_of_dimensions + 1)) * self.power_spectrum *
self.frequency_interval ** self.number_of_dimensions)
phi_e = np.exp(phi * 1.0j)
biphase_e = np.exp(self.biphase * 1.0j)
self._compute_bicoherence_uni()
b = np.sqrt(1 - self.sum_bc2) * phi_e
bc = np.sqrt(self.bc2)
phi_e = np.einsum('i...->...i', phi_e)
b = np.einsum('i...->...i', b)
for i in itertools.product(*self.ranges):
wk = np.array(i)
for j in itertools.product(*[range(np.int32(k)) for k in np.ceil((wk + 1) / 2)]):
wj = np.array(j)
wi = wk - wj
b[(*wk, *[])] = b[(*wk, *[])] + bc[(*wi, *wj)] * biphase_e[(*wi, *wj)] * phi_e[(*wi, *[])] * \
phi_e[(*wj, *[])]
b = np.einsum('...i->i...', b)
b = b * coeff
b[np.isnan(b)] = 0
samples = np.fft.fftn(b, self.number_time_intervals)
samples = samples[:, np.newaxis]
return np.real(samples)
def run(self, nsamples):
"""
Execute the random sampling in the ``BSRM`` class.
The ``run`` method is the function that performs random sampling in the ``BSRM`` class. If `nsamples` is
provided, the ``run`` method is automatically called when the ``BSRM`` object is defined. The user may also call
the ``run`` method directly to generate samples. The ``run`` method of the ``BSRM`` class can be invoked many
times and each time the generated samples are appended to the existing samples.
** Input:**
* **nsamples** (`int`):
Number of samples of the stochastic process to be simulated.
If the ``run`` method is invoked multiple times, the newly generated samples will be appended to the
existing samples.
**Output/Returns:**
The ``run`` method has no returns, although it creates and/or appends the `samples` attribute of the
``BSRM`` class.
"""
if nsamples is None:
raise ValueError('UQpy: Stochastic Process: Number of samples must be defined.')
if not isinstance(nsamples, int):
raise ValueError('UQpy: Stochastic Process: nsamples should be an integer.')
if self.verbose:
print('UQpy: Stochastic Process: Running 3rd-order Spectral Representation Method.')
samples = None
phi = None
if self.case == 'uni':
if self.verbose:
print('UQpy: Stochastic Process: Starting simulation of uni-variate Stochastic Processes.')
print('UQpy: The number of dimensions is :', self.number_of_dimensions)
phi = np.random.uniform(
size=np.append(self.nsamples, np.ones(self.number_of_dimensions, dtype=np.int32)
* self.number_frequency_intervals)) * 2 * np.pi
samples = self._simulate_bsrm_uni(phi)
if self.samples is None:
self.samples = samples
self.phi = phi
else:
self.samples = np.concatenate((self.samples, samples), axis=0)
self.phi = np.concatenate((self.phi, phi), axis=0)
if self.verbose:
print('UQpy: Stochastic Process: 3rd-order Spectral Representation Method Complete.')
class KLE:
"""
A class to simulate stochastic processes from a given auto-correlation function based on the Karhunen-Loeve
Expansion
**Input:**
* **nsamples** (`int`):
Number of samples of the stochastic process to be simulated.
The ``run`` method is automatically called if `nsamples` is provided. If `nsamples` is not provided, then the
``KLE`` object is created but samples are not generated.
* **correlation_function** (`list or numpy.ndarray`):
The correlation function of the stochastic process of size (`number_time_intervals`, `number_time_intervals`)
* **time_interval** (`float`):
The length of time discretization.
* **threshold** (`int`):
The threshold number of eigenvalues to be used in the expansion.
* **random_state** (None or `int` or ``numpy.random.RandomState`` object):
Random seed used to initialize the pseudo-random number generator. Default is None.
If an integer is provided, this sets the seed for an object of ``numpy.random.RandomState``. Otherwise, the
object itself can be passed directly.
* **verbose** (Boolean):
A boolean declaring whether to write text to the terminal.
**Attributes:**
* **samples** (`ndarray`):
Array of generated samples.
* **xi** (`ndarray`):
The independent gaussian random variables used in the expansion.
**Methods**
"""
# TODO: Test this for non-stationary processes.
def __init__(self, nsamples, correlation_function, time_interval, threshold=None, random_state=None, verbose=False):
self.correlation_function = correlation_function
self.time_interval = time_interval
if threshold:
self.number_eigen_values = threshold
else:
self.number_eigen_values = len(self.correlation_function[0])
self.random_state = random_state
if isinstance(self.random_state, int):
np.random.seed(self.random_state)
elif not isinstance(self.random_state, (type(None), np.random.RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')
self.verbose = verbose
self.nsamples = nsamples
self.samples = None
self.xi = None
if self.nsamples is not None:
self.run(nsamples=self.nsamples)
def _simulate(self, xi):
lam, phi = np.linalg.eig(self.correlation_function)
lam = np.diag(lam)
lam = lam.astype(np.float64)
samples = np.dot(phi[:, :self.number_eigen_values], np.dot(sqrtm(lam[:self.number_eigen_values]), xi))
samples = np.real(samples)
samples = samples.T
samples = samples[:, np.newaxis]
return samples
def run(self, nsamples):
"""
Execute the random sampling in the ``KLE`` class.
The ``run`` method is the function that performs random sampling in the ``KLE`` class. If `nsamples` is
provided when the ``KLE`` object is defined, the ``run`` method is automatically called. The user may also call
the ``run`` method directly to generate samples. The ``run`` method of the ``KLE`` class can be invoked many
times and each time the generated samples are appended to the existing samples.
** Input:**
* **nsamples** (`int`):
Number of samples of the stochastic process to be simulated.
If the ``run`` method is invoked multiple times, the newly generated samples will be appended to the
existing samples.
**Output/Returns:**
The ``run`` method has no returns, although it creates and/or appends the `samples` attribute of the
``KLE`` class.
"""
if nsamples is None:
raise ValueError('UQpy: Stochastic Process: Number of samples must be defined.')
if not isinstance(nsamples, int):
raise ValueError('UQpy: Stochastic Process: nsamples should be an integer.')
if self.verbose:
print('UQpy: | |
<reponame>amarallab/waldo
# coding: utf-8
# Description:
# This notebook is an attempt to implement the state behaivior model from
# The Geometry of Locomotive Behavioral States in C. elegans
# Gallagher et al.
#
### Imports
# In[2]:
# standard imports
import os
import sys
import numpy as np
import scipy
import scipy.interpolate as interpolate
import scipy.stats as stats
import pandas as pd
import matplotlib.pyplot as plt
import prettyplotlib as ppl
# Path definitions
HERE = os.path.dirname(os.path.realpath(__file__))
CODE_DIR = os.path.abspath(os.path.join(HERE, '..'))
SHARED_DIR = os.path.join(CODE_DIR, 'shared')
print CODE_DIR
print SHARED_DIR
sys.path.append(CODE_DIR)
sys.path.append(SHARED_DIR)
# nonstandard imports
from wio.file_manager import get_timeseries, write_timeseries_file, write_metadata_file
### Functions
def deskew_data(data, e=0.1):
return np.arcsinh(np.array(data)/e)
def reskew_data(data,e=0.1):
return np.sinh(np.array(data)) * e
#### Calculation Functions
def markov_measures_for_xy(x, y, dt=0.1, verbose=False):
"""
reversal
speed
acceleration
angular acceleration
"""
vs = np.zeros((2, len(x)-1))
vs[0] = np.diff(x) / dt
vs[1] = np.diff(y) / dt
vs = vs.T
data = []
v23 = vs[0]
for v in vs[1:]:
v12, v23 = v23, v
if np.dot(v12, v23) < 0:
r = 1
d = (v12 - v23) / np.linalg.norm(v12 - v23)
alpha = (v23 + v12) / dt
R = [[d[0], d[1]], [-d[1], d[0]]]
else:
r = 0
d = (v12 + v23) / np.linalg.norm(v12 + v23)
alpha = (v23 - v12) / dt
R = [[-d[0], -d[1]], [-d[1], d[0]]]
s = (np.linalg.norm(v12) + np.linalg.norm(v23)) / dt
if s == 0:
a, ar = 0, 0
else:
a = np.dot(R, alpha)
ar = a[1]
a = np.linalg.norm(a)
if verbose:
print 'r={r} | s={s} | a={a} | ar={ar}'.format(r=r, s=s, a=a, ar=ar)
if np.isnan(a):
print 'nan'
data.append((r, s, a, ar))
data = np.array(data)
for i,dat in enumerate(data):
if any(np.isnan(dat)):
print i, dat
return data
# In[8]:
def initialize_transition_mat(tau=10, dt=0.1, m=3):
if m ==1:
return np.array([1.0])
off_diag = 1.0 / ((m-1) * tau) * (np.ones((m,m)) - np.identity(m))
diag = ( - 1.0/tau) * np.identity(m)
full = off_diag + diag
return scipy.linalg.expm(full*dt)
return full
# In[9]:
def estimate_params(N_states, rel_probs, observations, verbose=False):
all_params = np.zeros((N_states, 7))
for i, ps in enumerate(rel_probs.T):
psum = np.sum(ps)
wi = ps / psum
Ci = 1 / (1 - np.sum(wi ** 2))
#pr, mean_s, mean_a, var_s, var_a, var_ar, covar_as = params
# reversals
r, s, a, ar = observations[:, 0], observations[:, 1], observations[:, 2], observations[:, 3]
pr = np.sum(r * wi)
# speeds
mean_s = np.sum(s * wi)
var_s = Ci * (np.sum((s**2) * wi) - mean_s)
# tangential acceleration
mean_a = np.sum(a * wi)
var_a = Ci * (np.sum((a**2) * wi) - mean_a)
# radial acceleration
# mean = 0
var_ar = np.sum((ar**2) * wi)
# speed acceleration coovariance
covar_as = Ci * (np.sum(a * s * wi) - (mean_a * mean_s))
if verbose:
print i
print '\tpr', pr
print '\tmean s', mean_s
print '\tmean_a', mean_a
print '\tvar s', var_s
print '\tvar a', var_a
print '\tvar ar', var_ar
print '\tcovar as', covar_as
#for dtype, xi in zip(['r', 's', 'a', 'ar'], observations.T):
# print dtype,
all_params[i, :] = pr, mean_s, mean_a, var_s, var_a, var_ar, covar_as
return all_params
# In[11]:
def probablity_of_observation(params, observation):
'''
params = [pr, mean_s, mean_a, var_s, var_a, var_ar, covar_as]
state = [r, s, a, ar]
'''
mean_ar = 0 # this is a constant
# unpack
pr, mean_s, mean_a, var_s, var_a, var_ar, covar_as = list(params)
r, s, a, ar = list(observation)
# set up reversal probablity
if r:
Pr = pr
else:
Pr = 1 - pr
# set up all other probabilities
x = np.array([[s - mean_s], [a - mean_a], [ar - mean_ar]])
E = np.array([[var_s, covar_as, 0],
[covar_as, var_a, 0],
[0, 0, var_ar]])
# calculate all parts of main probability equation
A = Pr
B = 4 / ((np.pi ** 2) * np.sqrt(np.linalg.det(E)))
C = (1 / (1 + np.dot(np.dot(x.T, np.linalg.inv(E)), x))) **3
#print A
#print B
#print C
P = A * B * float(C)
return P
def calculate_state_probabilities_no_memory(N_states, params, observations):
probs = np.zeros((len(observations), N_states))
for i, par in enumerate(params):
probs[:, i] = [probablity_of_observation(par, obs) for obs in observations]
return probs
# In[15]:
def baum_welch(probs, T, initial=None):
N_obs, N_states = probs.shape
assert (N_states, N_states) == T.shape, 'rel_probs and Transition matrix do not have matching numbers of states'
print N_states ** 2, 'transitions'
all_transition_points = np.ones(shape=(N_obs-1, N_states**2))
if initial == None:
initial = (1.0 / N_states) * np.ones(N_states)
p = probs[1, :]
for i, p_new in enumerate(probs[1:,:]):
p, p_old = p_new, p # step forward
if p_old == None: # skip first value.
continue
current = np.array(T) * p
current = (current.T * p_old * initial).T
all_transition_points[i,:] = current.flatten()
highest_prob = np.amax(all_transition_points, axis=1).sum()
new_T = (all_transition_points.sum(axis=0) / highest_prob).reshape((N_states, N_states))
row_sums = new_T.sum(axis=1)
new_T = new_T / row_sums[:, np.newaxis]
return new_T
def test_baum_welch():
# data and solution
T = np.array([[0.5, 0.5], [0.3, 0.7]])
p = np.array([[.3, .8], [.3, .8], [.3, .8], [.3, .8], [.3, .8],
[.7, .2], [.7, .2], [.3, .8], [.3, .8], [.3, .8]])
initial = np.array([0.2, 0.8])
solution_T = np.array([[0.39726027, 0.60273973],
[ 0.18333333, 0.81666667]])
T2 = baum_welch(p, T, initial)
print 'solution'
print solution_T
print 'T'
print T2
print 'difference'
print solution_T - T2
# TODO: Get baum_welch alogrithm working for re-estimating the transition matrix for real data
'''
print states
print params.shape
print observations.shape
probs = calculate_state_probabilities_no_memory(states, params, observations)
print probs.shape
print baum_welch(probs, T)
'''
# In[12]:
def forward_backward(N_states, probs, trans_mat, start_probs, end_probs):
def forward(N_states, probs, trans_mat, start_probs):
last_p = start_probs
forward_p = np.zeros(probs.shape)
for i, p in enumerate(probs[:]):
p = p * np.identity(N_states)
new_p = np.dot(np.dot(p, trans_mat), last_p)
new_p = new_p / sum(new_p) # normalize
forward_p[i] = new_p
last_p = new_p
return forward_p
def backward(N_states, probs, trans_mat, end_probs):
probs = probs[::-1,:] # reverse row order
backward_p = np.zeros(probs.shape)
last_p = end_probs
for i, p in enumerate(probs[:]):
p = p * np.identity(N_states)
new_p = np.dot(np.dot(trans_mat, p), last_p) # reverse trans and p from forward algorithm
new_p = new_p / sum(new_p) # normalize
backward_p[i] = new_p
last_p = new_p
# if 6970 < i < 6980:
# print i, p
# print i, last_p
# print i, new_p
backward_p = backward_p[::-1,:]
return backward_p
f = forward(N_states, probs, trans_mat, start_probs)
b = backward(N_states, probs, trans_mat, end_probs)
posterior = f * b
# normalize posterior
for i, p in enumerate(posterior):
posterior[i] = p / sum(p)
return posterior
# In[56]:
def closed_loop_fit(params, observations, T, start_probs, end_probs, max_iterations=1, sim_threshold=0.1):
N_states, _ = T.shape
history = []
for it in range(max_iterations):
#print 'itteration ', it
# calculate probailities each observation
# comes from each state completely independent of one another
probs = calculate_state_probabilities_no_memory(N_states, params, observations)
# use forward backward algorithm to calculate relative probailities
# of each point being in each state with transition pentality
rel_probs = forward_backward(N_states, probs, T, start_probs, end_probs)
# recalculate what the parameters should be, given the current split of the data.
params = estimate_params(N_states, rel_probs, observations, verbose=False)
# TODO! Turn into weighted average... rather than just standard
# if len(states) > 1:
# for i in range(3,7):
# params[:,i] = np.mean(params[:,i])
weights = rel_probs[:, :].sum(axis=0) / rel_probs.sum()
for i in range(3,7):
params[:,i] = np.average(params[:,i], weights=weights)
if it >= 1:
diff = history[-1] - params
if diff.sum().sum() <= 0.1:
break
history.append(params)
print it, 'itterations'
# estimate params one final time with variances unconstrained.
params = estimate_params(N_states, rel_probs, observations, verbose=False)
history.append(params)
# run baum-welch to estimate T now that we have decent state params.
probs = calculate_state_probabilities_no_memory(N_states, params, observations)
T = baum_welch(probs, T)
return params, T, rel_probs, history
# TODO: This is still nonfunctional.
def add_parameter_row(params, step=0.5):
N = 2
if not isinstance(params, np.ndarray) or not params.shape[1]:
params = np.ones((1,7)) * params
print params
p = np.zeros((N, 7))
for i in range(params.shape[1]):
print p[i, :].shape, params[i: 0].shape
p[i, :] = params[i: 0]
p[-1, :] = params[-1, :]
p[-1, 1] += step
return p
def excess_entropy(rel_probs):
N, M = rel_probs.shape
A = sum([(rel_probs[:,i] * np.log2(rel_probs[:,i])).sum() for i in range(M)]) / N
pi = rel_probs.sum(axis=0) / N
B = (pi * np.log2(pi)).sum()
return A - B
def fit_one_state(observations):
N_states = 1
T = initialize_transition_mat(tau=1, dt=0.1, m=N_states)
rel_probs = np.ones((len(observations), 1), dtype=float)
params = estimate_params(N_states, rel_probs, observations, verbose=False)
history = []
return params, T, rel_probs, history
def fit_two_states(params1, observations, tau, dt=0.1):
N_states = 2
p = np.zeros((N_states, 7))
p[0, :] = p[1, :] =params1
| |
<gh_stars>1-10
#!/usr/bin/env python3
import sys
import os
import getopt
import shutil
import json
import urllib.request
import hashlib
import tarfile
import re
exe_name = 'docker-bundle'
project_name = 'Docker-bundle'
bundle_install_dir = 'docker'
bundles_dir = 'bundles'
version_code = '0.1.0'
version_number = 1
# ------------------------------------------------------------------------
config_path = os.path.expanduser(os.path.join('~', '.' + exe_name))
file_source_list = 'sources.list'
file_config = 'config.json'
source_list_file_path = os.path.join(config_path, file_source_list)
config_file_path = os.path.join(config_path, file_config)
packages_dir = 'packages'
default_source = 'https://docker-bundle.github.io/bundles.json'
upgrade_info_url = 'https://docker-bundle.github.io/update.json'
config = {
# <version_number>: skip a version update
# 'all': don't update
'skip_version_number': '',
'upgrade_info_url': upgrade_info_url,
}
def write_config(config):
f = open(config_file_path, 'w')
f.write(json.dumps(config, indent=2))
f.flush()
f.close()
def init_config_file():
try:
os.makedirs(config_path)
except BaseException:
pass
try:
if not os.path.isfile(source_list_file_path):
sources_list = open(source_list_file_path, 'w')
sources_list.write(default_source)
sources_list.flush()
sources_list.close()
except BaseException:
print('[WARNING] Load sources list failed.')
try:
if not os.path.isfile(config_file_path):
write_config(config)
else:
config.update(json.loads(open(config_file_path).read()))
except BaseException:
print('[WARNING] Load config file failed.')
# ------------------------------------------------------------------------
def upgrade(user_call_update=False, update_with_ask=True):
skip_version_number = config['skip_version_number']
if not user_call_update and skip_version_number.lower() == 'all':
return
update_info = {
# must have
'url': '',
'hash': '',
'version_code': version_code,
'version_number': version_number,
# option
# ask: ask for update
# silent: silent update
'mode': 'normal',
}
try:
update_info.update(
json.loads(
urllib.request.urlopen(
config['upgrade_info_url']).read()))
except BaseException:
return
update_url = update_info['url']
update_hash = update_info['hash']
update_version_code = update_info['version_code']
update_version_number = update_info['version_number']
update_mode = update_info['mode'].lower()
if '' == update_url or '' == update_hash or '' == update_version_code:
return
if int(update_version_number) <= version_number:
if user_call_update:
print('-' * 80)
print(' %s is newest version.' % exe_name)
print('-' * 80)
return
is_ask_for_upgrade = update_mode == 'ask'
is_silent_upgrade = update_mode == 'silent'
if not user_call_update and not is_silent_upgrade\
and not is_ask_for_upgrade\
and skip_version_number >= str(update_version_number):
return
if not is_silent_upgrade and (is_ask_for_upgrade or update_with_ask):
answer = input(
"""--------------------------------------------------------------------------------
A newly version (%s) of %s is available. Upgrade?
[Y]= Yes
[N]= Not now
[S]= Skip this version
[D]= Disable upgrade
--------------------------------------------------------------------------------
Answer: [Y]: """ % (update_version_code, exe_name))[:1].upper()
print()
if answer == 'N':
return
elif answer == 'D':
config['skip_version_number'] = 'all'
write_config(config)
return
elif answer == 'S':
config['skip_version_number'] = str(update_version_number)
write_config(config)
return
try:
new_exe = urllib.request.urlopen(update_url).read()
if md5(new_exe) != update_hash:
if user_call_update or is_ask_for_upgrade:
print('[FAILED] Upgrade data hash error!')
return
f = open(__file__, 'wb')
f.write(new_exe)
f.flush()
f.close()
if user_call_update or is_ask_for_upgrade:
print('=' * 80)
print(
' %s (%s) is up to date in your next command.' %
(project_name, update_version_code))
print('=' * 80)
except BaseException:
if user_call_update or is_ask_for_upgrade:
print('[FAILED] upgrade:', sys.exc_info()[1])
return
# ------------------------------------------------------------------------
is_installed = False
# import bundle.py
def load_sub_bundle():
current_path = os.getcwd()
paths = []
while True:
paths.append(os.path.join(current_path, bundle_install_dir))
parent_path = os.path.dirname(current_path)
if parent_path == current_path:
break
current_path = parent_path
bak_path = sys.path
sys.path = paths + sys.path
bundles = {}
try:
import bundle
except ImportError:
pass
else:
os.chdir(os.path.dirname(bundle.__file__))
bundles.update(bundle.load_bundles())
global is_installed
is_installed = True
sys.path = bak_path
return bundles
# import from '__file__/bundles_dir'
def load_bundles(bundle_dir_path=os.path.join(os.path.dirname(__file__), bundles_dir)):
bundles = {}
try:
bundle_names = list(map(
lambda x: x[0:-3], filter(lambda x: x[-3:] == '.py', os.listdir(bundle_dir_path))))
except BaseException:
bundle_names = []
if len(bundle_names) > 0:
sys.path.insert(0, bundle_dir_path)
for bundle in bundle_names:
module = __import__(bundle)
if 'actions' in dir(module):
bundles.update(module.actions)
return bundles
BUNDLE_INSTALLED = '__bundle_installed__'
BUNDLE_MESSAGE = '__bundle_message__'
_bundle_handler = {}
def handle_actions(actions):
global _bundle_handler
if BUNDLE_INSTALLED in actions:
_bundle_handler[BUNDLE_INSTALLED] = actions[BUNDLE_INSTALLED]
del actions[BUNDLE_INSTALLED]
if BUNDLE_MESSAGE in actions:
_bundle_handler[BUNDLE_MESSAGE] = actions[BUNDLE_MESSAGE]
del actions[BUNDLE_MESSAGE]
return actions
def bundle_message(actions):
if BUNDLE_MESSAGE in _bundle_handler and _bundle_handler[BUNDLE_MESSAGE](actions):
return
print('Bundle Commands:')
for name, action in actions.items():
print(" %-30s%s" % (name, action.get('desc', '')))
print()
# ------------------------------------------------------------------------
def load_source():
return set(filter(lambda x: x != '', map(
lambda x: x.strip(), open(source_list_file_path).readlines())))
def write_source(sources):
f = open(source_list_file_path, 'w')
for source in sources:
f.write(source + '\n')
f.flush()
f.close()
def load_packages(sources=[]):
try:
sources += load_source()
except BaseException:
pass
if len(sources) == 0:
sources = [default_source]
packages = {}
for source in sources:
try:
packages.update(json.loads(urllib.request.urlopen(source).read()))
except BaseException:
pass
return packages
# ------------------------------------------------------------------------
# install
def md5(data):
if isinstance(data, str):
data = data.encode()
return hashlib.md5(data).hexdigest()
def install_from_dir(package_path, target_path):
try:
shutil.copytree(
package_path,
target_path,
ignore=shutil.ignore_patterns('.git'))
return True
except BaseException:
print(
'[ERROR] Install from dir \'%s\' failed, %s.' %
package_path, sys.exc_info()[1])
return None
def install_from_tarfile(package_path, target_path):
try:
t = tarfile.open(package_path, "r:gz")
t.extractall(target_path)
return True
except BaseException:
print('[ERROR] Package illegal.')
return None
def install_from_url(package_name, package_path, target_path):
file_name = package_name + '-' + md5(package_path) + '.bundle'
download_dir = os.path.join(config_path, packages_dir)
download_path = os.path.join(download_dir, file_name)
try:
os.makedirs(download_dir)
except BaseException:
pass
if os.path.isfile(download_path):
print('[INFO] Load from cache...')
return install_from_tarfile(download_path, target_path)
try:
urllib.request.urlretrieve(package_path, download_path)
return install_from_tarfile(download_path, target_path)
except BaseException:
print(
'[ERROR] Download \'%s\' failed, %s.' %
package_path,
sys.exc_info()[1])
return None
def install_from_git(package_name, package_path, target_path, branch='master'):
file_name = package_name + '-' + md5(package_path) + '.bundle'
download_dir = os.path.join(config_path, packages_dir)
download_path = os.path.join(download_dir, file_name)
success = False
if os.path.isdir(download_path):
if 0 == os.system(
'cd "%s" && git checkout %s && git pull -f' % (download_path, branch)):
success = True
else:
try:
shutil.rmtree(download_path)
except BaseException:
pass
if not success and not os.path.isdir(download_path):
if 0 == os.system("git clone --depth 1 \"%s\" \"%s\" -b %s" %
(package_path, download_path, branch)):
success = True
if not success:
print('[ERROR] Pull \'%s\' failed.' % package_path)
return None
return install_from_dir(download_path, target_path)
def install_from_package(package_name, package_path, target_path):
if package_path.find('http://') == 0 or package_path.find('https://') == 0:
return install_from_url(package_name, package_path, target_path)
elif os.path.isdir(package_path):
return install_from_dir(package_path, target_path)
elif os.path.isfile(package_path):
return install_from_tarfile(package_path, target_path)
else:
print(
'[ERROR] Can\'t find package \'%s\' in \'%s\'' %
(package_name, package_path))
return None
def install(argv=[]):
def install_help():
print(
"""
Usage:
%(name)s install [option] <bundle-name>
Options:
-p, --package Install bundle from package or dir, not from source.
-s, --source <source> Find bundle from given source.
Description:
<bundle-name> Run '%(name)s search <your bundle>' get bundle-names, or in '-p' for package path.
""" % {'name': exe_name})
opts, args = getopt.getopt(argv, 'ps:', ['package', 'source='])
if len(args) == 0:
install_help()
return
target_path = os.path.join(os.getcwd(), bundle_install_dir)
if is_installed or os.path.isdir(target_path):
print(
' %(project_name)s already installed, if want reinstall, manully delete \'docker\' folder first.' % {
'project_name': project_name})
return
bundle_name = args[0]
use_sources = []
from_package = False
for opt in opts:
if opt[0] == '-p' or opt[0] == '--package':
from_package = True
elif opt[0] == '-s' or opt[0] == '--source':
use_sources.append(opt[1])
result = None
if from_package:
result = install_from_package(bundle_name, bundle_name, target_path)
else:
print('[INFO] Fetching source list...')
upgrade()
packages = load_packages(use_sources)
if bundle_name not in packages:
print("[ERROR] Bundle '%s' not found." % (bundle_name))
return
print('[INFO] Bundle: ' + bundle_name)
package_info = packages[bundle_name]
package_type = package_info.get('type')
package_url = package_info.get('url')
print('[INFO] Package: ' + package_url)
if package_type == 'git':
result = install_from_git(
bundle_name,
package_url,
target_path,
package_info.get(
'branch',
'master'))
elif not package_type:
result = install_from_package(
bundle_name, package_url, target_path)
if not result:
print('[ERROR] Install bundle failed..')
return
# copy self to target
self_path = os.path.join(os.path.realpath(__file__))
shutil.copyfile(self_path, os.path.join(target_path, 'bundle.py'))
os.chdir(target_path)
print('[OK] Install bundle success.')
loaded_actions = handle_actions(load_bundles(os.path.join(target_path, bundles_dir)))
if BUNDLE_INSTALLED in _bundle_handler and _bundle_handler[BUNDLE_INSTALLED](loaded_actions):
return
bundle_message(loaded_actions)
# ------------------------------------------------------------------------
# search
def search(argv=[]):
def search_help():
print(
"""
Usage:
%(name)s search <keyword>
Options:
-n, --name Search in bundle-name only, not description
-r, --regex Keyword use Regex
-s, --source <source> Find bundle from given source.
-h, --help Show this.
""" % {'name': exe_name})
opts, args = getopt.getopt(
argv, 'nrs:h', [
'name', 'regex', 'source=', 'help'])
if len(args) == 0:
search_help()
return
keyword = args[0]
use_sources = []
name_only = False
use_regex = False
for opt in opts:
if opt[0] == '-n' or opt[0] == '--name':
name_only = True
elif opt[0] == '-r' or opt[0] == '--regex':
use_regex = True
elif opt[0] == '-s' or opt[0] == '--source':
use_sources.append(opt[1])
elif opt[0] == '-h' or opt[0] == '--help':
search_help()
exit()
upgrade()
packages = load_packages(use_sources)
if use_regex:
re_keyword = re.compile(keyword)
def check(content):
if use_regex:
return re_keyword.search(content) is not None
else:
return content.find(keyword) >= 0
print("%-40s%s" % ('NAME', 'Description'))
for name, info in packages.items():
desc = ''
if 'desc' in info:
desc = info.get('desc', '')
if check(name) or not name_only and check(desc):
print("%-40s%s" % (name, desc))
# ------------------------------------------------------------------------
# source
def source(argv=[]):
def source_help():
print(
"""
Usage:
%(name)s source [OPTIONS]
Options:
-a, --add <source> Add Source
-r, --remove <source> Remove Source
-l, --list List sources.
-h, --help Show this.
""" % {'name': exe_name})
opts, args = getopt.getopt(
argv, 'a:r:lh', [
'add=', 'remove=', 'list', 'help'])
if len(opts) == 0:
source_help()
return
sources = load_source()
modify = False
for opt in opts:
if opt[0] == '-l' or opt[0] == '--list':
for source in sources:
print(source)
elif opt[0] == '-h' or opt[0] == '--help':
source_help()
exit()
elif opt[0] == '-a' or opt[0] == '--add':
modify = True
| |
<filename>omnitool/tlib.py
"""Low level interface to Terraria data"""
from struct import unpack, pack, Struct
import sys
from .database import items, rev_items, multitiles
is_exe = hasattr(sys, "frozen")
### Parser for .wld data types ###
def decode7bit(bytes):
lbytes = list(bytes)
value = 0
shift = 0
while True:
byteval = lbytes.pop(0)
if (byteval & 128) == 0: break
value |= ((byteval & 0x7F) << shift)
shift += 7
return (value | (byteval << shift))
def encode7bit(value):
temp = value
bytes = ""
while temp >= 128:
bytes += chr(0x000000FF & (temp | 0x80))
temp >>= 7
bytes += chr(temp)
return bytes
def get_long_string(f):
namelen = decode7bit(f.read(2)) # int(unpack("<B", f.read(1))[0])
if namelen < 127:
f.seek(-1, 1)
name = unpack("<" + str(namelen) + "s", f.read(namelen))[0].decode()
return name
formats = ( # ("word" , "<H", 2),
("byte", "<B", 1),
("short", "<h", 2),
("ushort", "<H", 2),
("int", "<I", 4),
("uint", "<i", 4),
("long", "<Q", 8),
("double", "<d", 8),
("float", "<f", 4),
)
def get_short(f, num=1):
if num == 1:
return unpack("<h", f.read(2))[0]
return unpack("<" + "h" * num, f.read(2 * num))
def get_ushort(f, num=1):
if num == 1:
return unpack("<H", f.read(2))[0]
return unpack("<" + "H" * num, f.read(2 * num))
def get_uint(f, num=1):
if num == 1:
return unpack("<I", f.read(4))[0]
return unpack("<" + "I" * num, f.read(num * 4))
def get_int(f, num=1):
if num == 1:
return unpack("<i", f.read(4))[0]
return unpack("<" + "i" * num, f.read(num * 4))
def get_long(f, num=1):
if num == 1:
return unpack("<Q", f.read(8))[0]
return unpack("<" + "Q" * num, f.read(num * 8))
def get_byte(f, num=1):
if num == 1:
return unpack("<B", f.read(1))[0]
return unpack("<" + "B" * num, f.read(num))
def get_double(f, num=1):
if num == 1:
return unpack("<d", f.read(8))[0]
return unpack("<" + "d" * num, f.read(num * 8))
def get_float(f, num=1):
if num == 1:
return unpack("<f", f.read(4))[0]
return unpack("<" + "f" * num, f.read(num * 4))
def set_uint(data):
return pack("<I", data)
def set_int(data):
return pack("<i", data)
def set_ushort(data):
return pack("<H", data)
#def set_word(data):
# return pack("<H", data)
def set_byte(data):
return pack("<B", data)
def get_string(f):
namelen = int(unpack("<B", f.read(1))[0])
return unpack("<" + str(namelen) + "s", f.read(namelen))[0] #.decode()
def set_string(data):
if len(data) > 126:
return encode7bit(len(data)) + pack("<" + str(len(data)) + "s", str.encode(data))
#return encode7bit(len(data))+pack("<"+len(data)*"s",*data)
else:
return pack("<B", len(data)) + pack("<" + str(len(data)) + "s", str.encode(data))
#return pack("<B", len(data))+pack("<"+len(data)*"s",*data)
def set_double(data):
return pack("<d", data)
def set_float(data):
return pack("<f", data)
for name, form, l in formats:
s = Struct(form)
def make_local(s, l):
def generic(f):
return s.unpack(f.read(l))[0]
return generic
globals()["get_g%s" % name] = make_local(s, l)
### Parser end ###
### content parser for .wld structures ###
def get_item(f):
amount = get_gbyte(f)
if amount:
return amount, items[get_int(f)], get_gbyte(f)
else:
return 0, None
def set_items(f, items):
for item in items:
f.write(set_byte(item[0])) #amount
if item[0]:
f.write(set_int(rev_items[item[1]])) #itemid
if len(item) > 2:
f.write(set_byte(item[2])) #prefix
else:
f.write(zero)
def set_items_id(f, items):
for item in items:
f.write(set_byte(item[0])) #amount
if item[0]:
f.write(set_uint(item[1])) #itemid
if len(item) > 2:
f.write(set_byte(item[2])) #prefix
else:
f.write(zero)
def set_items_uni(f, items):
for item in items:
f.write(set_byte(item[0]))
if item[0]:
f.write(set_string(str(item[1])))
f.write(set_byte(item[2]))
def get_chest(f):
if get_gbyte(f): #if exists
return get_uint(f, 2), [get_item(f) for x in range(20)]
# return none if no chest exists
# otherwise return (pos, [(amount, name), (amount, name) ....]
def set_chests(f, chests):
for chest in chests:
if chest == None:
f.write(zero)
else:
f.write(one)
f.write(set_uint(chest[0][0]) + set_uint(chest[0][1]))
set_items(f, chest[1])
def get_npc_names(f):
return [get_string(f) for x in range(10)]
def set_npc_names(f, names):
[f.write(set_string(name)) for name in names]
def set_chests_uni(f, chests):
for chest in chests:
if chest == None:
f.write(zero)
else:
f.write(one)
f.write(set_uint(chest[0][0]) + set_uint(chest[0][1]))
set_items_uni(f, chest[1])
def get_sign(f):
if get_gbyte(f):
return get_long_string(f), get_uint(f, 2)
def set_sign(f, sign):
if sign != None:
f.write(one + set_string(sign[0]) + set_uint(sign[1][0]) + set_uint(sign[1][1]))
else:
f.write(zero)
def set_signs(f, signs):
[set_sign(f, sign) for sign in signs]
def get_npc(f):
#get_byte(f)
if get_gbyte(f):
return get_string(f), get_float(f, 2), get_gbyte(f), get_uint(f, 2)
#return none if no npc exists
#otherwise return (name, (current_x, current_y), homeless, (home_X, home_Y))
def get_trail(f):
return (get_gbyte(f), get_string(f), get_gint(f)) #(1,name,ID)
def set_trail(f, trail):
f.write(set_byte(trail[0]) + set_string(trail[1]) + set_uint(trail[2]))
def set_npc(f, npc):
if npc != None:
f.write(
one + set_string(str(npc[0])) + set_float(npc[1][0]) + set_float(npc[1][1]) + set_byte(npc[2]) + set_uint(
npc[3][0]) + set_uint(npc[3][1]))
else:
f.write(zero)
def get_name(f):
header = get_header(f)[0]
return header["name"].decode()
def get_header(f):
""" returns a dict of header data"""
sectiondata = None
version = get_gint(f)
if version >= 87:
if version > 140:
if get_long(f) != 172097103742133618:
raise Exception("Invalid world header. (Typecheck number mismatch)")
rev = get_uint(f)
get_long(f)
#get sections
sectiondata = {"sections" : get_uint(f, get_gshort(f)),
"tiletypes" : get_gshort(f)}
multitiles_ = set()
mask = 0x80
for x in range(sectiondata["tiletypes"]):
if mask == 0x80:
flags = get_gbyte(f)
mask = 0x01
else: mask <<=1
if flags & mask == mask:
multitiles_.add(x)
if f.tell() != sectiondata["sections"][0]:
print("Warning: SectionHeader for world", self.header["name"].decode(),
"of different size than expected, errors may occur.")
else:
multitiles_ = multitiles
if version <= 36:
d = {"name": get_string(f),
"ID": get_gint(f),
"worldrect": get_uint(f, 4),
"height": get_gint(f),
"width": get_gint(f),
"spawn": get_uint(f, 2),
"groundlevel": get_gdouble(f),
"rocklevel": get_gdouble(f),
"time": get_gdouble(f),
"is_day": get_gbyte(f),
"moonphase": get_gint(f),
"is_bloodmoon": get_gbyte(f),
"dungeon_xy": get_uint(f, 2),
"bosses_slain": get_byte(f, 3),
"npcs_saved": get_byte(f, 3),
"special_slain": get_byte(f, 2),
"is_a_shadow_orb_broken": get_gbyte(f),
"is_meteor_spawned": get_gbyte(f),
"shadow_orbs_broken": get_gbyte(f),
"altars_broken": get_gint(f),
"hardmode": get_gbyte(f),
"gob_inv_time": get_gint(f),
"gob_inv_size": get_gint(f),
"gob_inv_type": get_gint(f),
"gob_inv_x": get_gdouble(f)}
elif version < 68:
d = {"name": get_string(f),
"ID": get_gint(f),
"worldrect": get_uint(f, 4),
"height": get_gint(f),
"width": get_gint(f),
"spawn": get_uint(f, 2),
"groundlevel": get_gdouble(f),
"rocklevel": get_gdouble(f),
"time": get_gdouble(f),
"is_day": get_gbyte(f),
"moonphase": get_gint(f),
"is_bloodmoon": get_gbyte(f),
"dungeon_xy": get_uint(f, 2),
"bosses_slain": get_byte(f, 3),
"npcs_saved": get_byte(f, 3),
"special_slain": get_byte(f, 3),
"is_a_shadow_orb_broken": get_gbyte(f),
"is_meteor_spawned": get_gbyte(f),
"shadow_orbs_broken": get_gbyte(f),
"altars_broken": get_gint(f),
"hardmode": get_gbyte(f),
"gob_inv_time": get_gint(f),
"gob_inv_size": get_gint(f),
"gob_inv_type": get_gint(f),
"gob_inv_x": get_gdouble(f)}
elif version < 71:
d = {"name": get_string(f),
"ID": get_gint(f),
"worldrect": get_uint(f, 4),
"height": get_gint(f),
"width": get_gint(f),
"moontype": get_byte(f),
"treedata": get_uint(f, 7),
"cavedata": get_uint(f, 10),
"spawn": get_uint(f, 2),
"groundlevel": get_gdouble(f),
"rocklevel": get_gdouble(f),
"time": get_gdouble(f),
"is_day": get_gbyte(f),
"moonphase": get_gint(f),
"is_bloodmoon": get_gbyte(f),
"dungeon_xy": get_uint(f, 2),
"is_crimson": get_gbyte(f),
"bosses_slain": get_byte(f, 10),
"npcs_saved": get_byte(f, 3),
"special_slain": get_byte(f, 4),
"is_a_shadow_orb_broken": get_gbyte(f),
"is_meteor_spawned": get_gbyte(f),
"shadow_orbs_broken": get_gbyte(f),
"altars_broken": get_gint(f),
"hardmode": get_gbyte(f),
"gob_inv_time": get_gint(f),
"gob_inv_size": get_gint(f),
"gob_inv_type": get_gint(f),
"gob_inv_x": get_gdouble(f),
"raining": get_gbyte(f),
"raintime": get_gint(f),
"maxrain": get_float(f),
"oretiers": get_int(f, 3),
"background_styles": get_byte(f, 8),
"clouds": get_uint(f),
"cloudcount": get_gushort(f),
"windspeed": get_float(f),
}
elif version < 80:
d = {"name": get_string(f),
"ID": get_gint(f),
"worldrect": get_uint(f, 4),
"height": get_gint(f),
"width": get_gint(f),
"moontype": get_byte(f),
"treedata": get_uint(f, 7),
"cavedata": get_uint(f, 10),
"spawn": get_uint(f, 2),
"groundlevel": get_gdouble(f),
"rocklevel": get_gdouble(f),
"time": get_gdouble(f),
"is_day": get_gbyte(f),
"moonphase": get_gint(f),
"is_bloodmoon": get_gbyte(f),
"is_eclipse": get_gbyte(f),
"dungeon_xy": get_uint(f, 2),
"is_crimson": get_gbyte(f),
"bosses_slain": get_byte(f, 10),
"npcs_saved": get_byte(f, 3),
"special_slain": get_byte(f, 4),
"is_a_shadow_orb_broken": get_gbyte(f),
"is_meteor_spawned": get_gbyte(f),
"shadow_orbs_broken": get_gbyte(f),
"altars_broken": get_gint(f),
"hardmode": get_gbyte(f),
"gob_inv_time": get_gint(f),
"gob_inv_size": get_gint(f),
"gob_inv_type": get_gint(f),
"gob_inv_x": get_gdouble(f),
"raining": get_gbyte(f),
"raintime": get_gint(f),
"maxrain": get_float(f),
"oretiers": get_int(f, 3),
"background_styles": get_byte(f, 8),
"clouds": get_uint(f),
"cloudcount": get_gushort(f),
"windspeed": get_float(f),
}
elif version < 103:# version 102+
d = {"name": get_string(f),
"ID": get_gint(f),
"worldrect": get_uint(f, 4),
"height": get_gint(f),
"width": get_gint(f),
"moontype": get_byte(f),
"treedata": get_uint(f, 7),
"cavedata": get_uint(f, 10),
"spawn": get_uint(f, 2),
"groundlevel": get_gdouble(f),
"rocklevel": get_gdouble(f),
"time": get_gdouble(f),
"is_day": get_gbyte(f),
"moonphase": get_gint(f),
"is_bloodmoon": get_gbyte(f),
"is_eclipse": get_gbyte(f),
"dungeon_xy": get_uint(f, 2),
"is_crimson": get_gbyte(f),
"bosses_slain": get_byte(f, 10),
"npcs_saved": get_byte(f, 3),
"special_slain": get_byte(f, 4),
"is_a_shadow_orb_broken": get_gbyte(f),
"is_meteor_spawned": get_gbyte(f),
"shadow_orbs_broken": get_gbyte(f),
"altars_broken": get_gint(f),
"hardmode": get_gbyte(f),
"gob_inv_time": get_gint(f),
"gob_inv_size": get_gint(f),
"gob_inv_type": get_gint(f),
"gob_inv_x": get_gdouble(f),
"raining": get_gbyte(f),
"raintime": get_gint(f),
"maxrain": get_float(f),
"oretiers": get_int(f, 3),
"background_styles": get_byte(f, 8),
"clouds": get_uint(f),
"cloudcount": get_gushort(f),
"windspeed": get_float(f),
"anglerstrings" : [get_string(f) for _ in range(get_gint(f))],
"angler_saved" : get_gbyte(f),
"angler_quest" : get_gint(f)
}
elif version < 148:#version 147
d = {"name": get_string(f),
"ID": get_gint(f),
"worldrect": get_uint(f, 4),
"height": get_gint(f),
"width": get_gint(f),
"expert" : get_gbyte(f),
"creationtime" : get_glong(f),
"moontype": get_gbyte(f),
"treedata": get_uint(f, 7),
"cavedata": get_uint(f, 10),
"spawn": get_uint(f, 2),
"groundlevel": get_gdouble(f),
"rocklevel": get_gdouble(f),
"time": get_gdouble(f),
"is_day": get_gbyte(f),
"moonphase": get_gint(f),
"is_bloodmoon": get_gbyte(f),
"is_eclipse": get_gbyte(f),
"dungeon_xy": get_uint(f, 2),
"is_crimson": get_gbyte(f),
"bosses_slain": get_byte(f, 11),
"npcs_saved": get_byte(f, 3),
"special_slain": get_byte(f, 4),
"is_a_shadow_orb_broken": get_gbyte(f),
"is_meteor_spawned": get_gbyte(f),
"shadow_orbs_broken": get_gbyte(f),
"altars_broken": get_gint(f),
"hardmode": get_gbyte(f),
"gob_inv_time": get_gint(f),
"gob_inv_size": get_gint(f),
"gob_inv_type": get_gint(f),
"gob_inv_x": get_gdouble(f),
"slime_rain_time" : get_gdouble(f),
"sundial_cooldown" : get_gbyte(f),
"raining": get_gbyte(f),
"raintime": get_gint(f),
"maxrain": get_float(f),
"oretiers": get_int(f, 3),
"background_styles": get_byte(f, 8),
"clouds": get_uint(f),
"cloudcount": get_gushort(f),
"windspeed": get_float(f),
"anglerstrings" : [get_string(f) for _ in range(get_gint(f))],
"angler_saved" : get_gbyte(f),
"angler_quest" : get_gint(f),
"saved_stylist" : get_gbyte(f),
"saved_collector" : get_gbyte(f),
"invasionsize" : get_gint(f),
"cultist_delay" : get_gint(f),
"mobkills" : get_uint(f, get_ushort(f)),
| |
"""
Functions for creating a graph from a skeletonized image (produce_graph) and
for simplifying graphs in various ways.
Graphs are NetworkX graphs where nodes are points, represented as (x,y) pairs.
Some vocabulary:
A juncture is a node with less than 2 or more than 2 edges.
A bridge is a node with exactly 2 edges.
A clump is a set of junctures that are all close to each other.
A path is a list of nodes, each one connects to the next.
"""
import networkx as nx
import itertools
import math
import numpy as np
import infrastructure.helper as helper
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ...
via https://docs.trans-module imports.org/2/library/itertools.html
"""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def neighbor_coords((x,y), (width,height)):
"""Returns up to 8 pixel neighbor coordinates of (x,y)."""
neighbors = []
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
if i == 0 and j == 0:
continue
neighbor_x = x + i
neighbor_y = y + j
if 0 < neighbor_x < width and 0 < neighbor_y < height:
neighbors.append((neighbor_x, neighbor_y))
return neighbors
def quadrance((x1,y1), (x2,y2)):
"""Returns distance-squared between two points."""
dx = x2 - x1
dy = y2 - y1
return dx*dx + dy*dy
def distance(p1, p2):
return math.sqrt(quadrance(p1, p2))
def point_line_distance(point, start, end):
if (start == end):
return distance(point, start)
else:
n = abs(
(end[0] - start[0]) * (start[1] - point[1]) -
(start[0] - point[0]) * (end[1] - start[1])
)
d = math.sqrt(
(end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
)
return n / d
def rdp(points, epsilon):
"""Reduces a series of points to a simplified version that loses detail, but
maintains the general shape of the series.
The Ramer-Douglas-Peucker algorithm roughly ported from the pseudo-code
provided by http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
via https://github.com/sebleier/RDP/
"""
dmax = 0.0
index = 0
for i in range(1, len(points) - 1):
d = point_line_distance(points[i], points[0], points[-1])
if d > dmax:
index = i
dmax = d
if dmax >= epsilon:
results = rdp(points[:index+1], epsilon)[:-1] + \
rdp(points[index:], epsilon)
else:
results = [points[0], points[-1]]
return results
def produce_graph(skeleton_img, hsv_image = None):
"""Takes in a skeletonized image (with foreground as 255 and background as
0) and produces a graph of appropriately connected pixel locations.
"""
graph = nx.Graph()
(rows, cols) = skeleton_img.shape
for y in xrange(rows):
for x in xrange(cols):
pixel = skeleton_img.item(y, x)
if pixel == 255:
point = (x, y)
attribute_dict = None
if hsv_image is not None:
hsv_pixel = tuple( hsv_image[y][x] )
color = helper.Colors.get_color_compartment(hsv_pixel)
attribute_dict = {'color': color}
graph.add_node(point, attribute_dict)
for neighbor in neighbor_coords(point, (cols, rows)):
neighbor_pixel = skeleton_img.item(neighbor[1], neighbor[0])
if neighbor_pixel == 255:
graph.add_edge(point, neighbor)
return graph
def find_junctures(graph):
return find_nodes_with_degree(graph, lambda degree: degree != 2)
def find_termination_junctures(graph):
return find_nodes_with_degree(graph, lambda degree: degree == 1)
def find_nodes_with_degree(graph, filter_function):
junctures = []
for node in nx.nodes_iter(graph):
degree = nx.degree(graph, node)
if filter_function(degree):
junctures.append(node)
return junctures
def find_clumps(graph, epsilon):
"""Returns a list of "clumps". Each clump is a set of junctures which are
within epsilon of each other."""
max_quadrance = epsilon * epsilon
junctures = find_junctures(graph)
clump_graph = nx.Graph()
for juncture in junctures:
clump_graph.add_node(juncture)
for (i, j) in itertools.combinations(junctures, 2):
if quadrance(i, j) < max_quadrance:
clump_graph.add_edge(i, j)
return nx.connected_components(clump_graph)
def get_path_color(graph, path):
color_occurrence = {}
for node in path:
if 'color' in graph.node[node]:
color = graph.node[node]['color']
if color not in color_occurrence:
color_occurrence[color] = 1
else:
color_occurrence[color] += 1
if len(color_occurrence) == 0:
return helper.Colors.Black
return max(color_occurrence)
def find_same_length_constraints(graph):
# find_paths from t-junctions!
termination_junctures = find_termination_junctures(graph)
paths_from_t_junctures = find_paths_from_junctures(graph, termination_junctures)
constraint_junctions = []
for (p1, p2) in itertools.combinations(paths_from_t_junctures, 2):
last_node_p1 = p1[len(p1)-1]
last_node_p2 = p2[len(p2)-1]
path_end_degree = nx.degree(graph, last_node_p1)
color_p1 = get_path_color(graph, p1)
color_p2 = get_path_color(graph, p2)
if last_node_p1 == last_node_p2 and color_p1 == color_p2 and path_end_degree == 4:
print "found same length constraint!"
constraint_junctions.append(last_node_p1)
graph.node[last_node_p1]["constraint"] = "same_length"
mygraph = nx.Graph()
for path in paths_from_t_junctures:
path_color = get_path_color(graph, path)
start = path[0]
end = path[len(path)-1]
mygraph.add_node(start, {'color': path_color})
mygraph.add_node(end, {'color': path_color})
mygraph.add_edge(start, end, {'color': path_color})
if path_color != helper.Colors.Black:
for i in range(len(path)):
# do not delete path end juncture if it implements a constraint
node = path[i]
if i != len(path)-1 and "constraint" not in graph.node[node]:
graph.remove_node(node)
else:
print "left constraint juncture in place"
return mygraph, constraint_junctions
def simplify_junctures(graph, epsilon=5):
"""Simplifies clumps by replacing them with a single juncture node. For
each clump, any nodes within epsilon of the clump are deleted. Remaining
nodes are connected back to the simplified junctures appropriately."""
graph = graph.copy()
max_quadrance = epsilon * epsilon
clumps = find_clumps(graph, epsilon)
for clump in clumps:
to_delete = set([])
for node in graph.nodes_iter():
for juncture in clump:
if quadrance(node, juncture) < max_quadrance:
to_delete.add(node)
to_join = set([])
for node in to_delete:
for neighbor in nx.all_neighbors(graph, node):
if not (neighbor in to_delete):
to_join.add(neighbor)
clump_center = (0, 0)
for juncture in clump:
clump_center = (
clump_center[0]+juncture[0], clump_center[1]+juncture[1])
clump_center = (
clump_center[0] / len(clump), clump_center[1] / len(clump))
for node in to_delete:
graph.remove_node(node)
for node in to_join:
graph.add_edge(node, clump_center)
return graph
def find_paths(graph):
return find_paths_from_junctures(graph, find_junctures(graph))
def find_paths_from_junctures(graph, junctures):
"""Returns a list of paths between junctures. Each path is a list of
nodes. The first and last node in the path is a juncture, and each
intermediate node in the path is a bridge.
"""
# TODO: This should also find cyclical paths, that is circular paths which
# are not connected to any junctures. Or perhaps there should be a
# separate function for finding cyclical paths.
paths = []
visited_nodes = set([])
def follow_path(path, current_node, previous_node):
path.append(current_node)
visited_nodes.add(current_node)
if nx.degree(graph, current_node) == 2:
neighbors = list(nx.all_neighbors(graph, current_node))
if neighbors[0] == previous_node:
next_node = neighbors[1]
else:
next_node = neighbors[0]
follow_path(path, next_node, current_node)
for juncture in junctures:
neighbors = nx.all_neighbors(graph, juncture)
for neighbor in neighbors:
if not (neighbor in visited_nodes):
path = [juncture]
follow_path(path, neighbor, juncture)
paths.append(path)
return paths
def simplify_paths(graph, epsilon=3):
"""Finds all paths and simplifies them using the RDP algorithm for
reducing the number of points on a curve. All remaining nodes will be
within epsilon of the original curve."""
# TODO: Should also simplify cyclical paths. See find_paths. But RDP needs
# endpoints. A robust way to deal with cycles might be to use the potrace
# algorithm instead of RDP.
# http://potrace.sourceforge.net/potrace.pdf
graph = graph.copy()
paths = find_paths(graph)
for path in paths:
simplified_path = rdp(path, epsilon)
# Delete original path.
edge_attributes = {"same_length_strokes": 0}
for index, node in enumerate(path):
if "constraint" in graph.node[node] and graph.node[node]["constraint"] == "same_length":
edge_attributes["same_length_strokes"] += 1
print "added strokes", edge_attributes["same_length_strokes"]
if index == 0 or index == len(path)-1:
continue
graph.remove_node(node)
for (a, b) in pairwise(simplified_path):
graph.add_edge(a, b, edge_attributes)
return graph
def is_horizontal((x1,y1), (x2,y2)):
"""Returns True if points are more horizontal to each other than vertical
to each other."""
dx = abs(x2 - x1)
dy = abs(y2 - y1)
return dx > dy
def get_bridge_ends(graph, node):
"""Returns the nodes on either side of a bridge (a node with 2 edges)."""
neighbors = list(nx.all_neighbors(graph, node))
p1 = neighbors[0]
p2 = neighbors[1]
return (p1, p2)
def merge_attributes(dict1, dict2):
for key in dict2:
if key in dict1:
dict1[key] += dict2[key]
else:
dict1[key] = dict2[key]
return dict1
def remove_bridge(graph, node):
"""Mutates graph. Assumes node is a bridge (has 2 edges). Removes bridge
node and connects either side of the bridge to each other."""
(p1, p2) = get_bridge_ends(graph, node)
# preserve constraints from old edges to new edge
edge1attributes = graph.get_edge_data(p1, node)
edge2attributes = graph.get_edge_data(p2, node)
graph.remove_node(node)
graph.add_edge(p1, p2, merge_attributes(edge1attributes, edge2attributes))
def hv_lines(graph):
"""Removes any nodes of two edges that are redundant assuming the graph
only represents horizontal and vertical lines."""
graph = graph.copy()
to_delete = set([])
for node in graph.nodes_iter():
if graph.degree(node) == 2:
(p1, p2) = get_bridge_ends(graph, node)
if is_horizontal(p1, node) == is_horizontal(node, p2):
to_delete.add(node)
for node in to_delete:
remove_bridge(graph, node)
return graph
def straighten_lines(graph, max_angle):
"""Removes any nodes of two edges that form an angle less than max_angle
from pi radians.
"""
# TODO: This could use cleanup. The formulation of max_angle is awkward.
graph | |
color = 'r', rasterized = True)
ax1.plot(t, quasars_total / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, linewidth = PlotScripts.global_linewidth)
p = np.where((ZZ < 15))[0]
#ax1.plot(ZZ[p], quasars_total[p] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax3.plot(t, boost_total, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, label = title, linewidth = PlotScripts.global_linewidth)
w = np.where((gal_count_total > 0.0))[0] # Since we're doing a division, need to only plot those redshifts that actually have galaxies.
ax5.plot(t[w], np.divide(boost_total[w], gal_count_total[w]), color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax6.plot(t[w], gal_count_total[w] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
#ax6.plot(ZZ[p], gal_count_total[p] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax1.plot(np.nan, np.nan, color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[0], label = "Quasar Ejection Density")
ax1.plot(np.nan, np.nan, color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[1], label = "Galaxy Density")
ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[0], label = "Count")
ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[1], label = "Fraction of Galaxies")
ax7.set_xlabel(r'$\log_{10}\ M_\mathrm{vir}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax7.set_ylabel(r'$\mathrm{Mean \: Quasar \: Activity}$', size = PlotScripts.global_fontsize)
ax50.set_xlabel(r'$\log_{10}\ M_\mathrm{vir}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
#ax50.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax50.set_ylabel(r'$\mathrm{Number \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax55.set_xlabel(r'$\log_{10}\ M_\mathrm{*}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax55.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
#ax55.set_ylabel(r'$\mathrm{Number \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax56.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
ax56.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
#ax56.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Quasar \: Activity}$', size = PlotScripts.global_fontsize)
ax56.set_yscale('log', nonposy='clip')
ax50.axvline(np.log10(32.0*AllVars.PartMass / AllVars.Hubble_h), color = 'k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(PlotScripts.time_tickinterval))
ax1.set_xlim(PlotScripts.time_xlim)
ax1.set_yscale('log', nonposy='clip')
ax3.xaxis.set_minor_locator(mtick.MultipleLocator(PlotScripts.time_tickinterval))
ax3.set_xlim(PlotScripts.time_xlim)
ax3.set_yscale('log', nonposy='clip')
## Create a second axis at the top that contains the corresponding redshifts. ##
## The redshift defined in the variable 'z_plot' will be displayed. ##
ax2 = ax1.twiny()
ax4 = ax3.twiny()
ax57 = ax56.twiny()
t_plot = (AllVars.t_BigBang - AllVars.cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding time values on the bottom.
z_labels = ["$%d$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax2.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax2.set_xticklabels(z_labels) # But label them as redshifts.
ax4.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax4.set_xlim(PlotScripts.time_xlim)
ax4.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax4.set_xticklabels(z_labels) # But label them as redshifts.
ax57.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax57.set_xlim(PlotScripts.time_xlim)
ax57.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax57.set_xticklabels(z_labels) # But label them as redshifts.
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
#ax1.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$N_\mathrm{Quasars} \: [\mathrm{Mpc}^{-3}]$', fontsize = PlotScripts.global_fontsize)
ax6.set_ylabel(r'$N_\mathrm{Gal} \: [\mathrm{Mpc}^{-3}]$', fontsize = PlotScripts.global_fontsize)
ax3.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
ax3.set_ylabel(r'$N_\mathrm{Boosted}$', fontsize = PlotScripts.global_fontsize)
ax5.set_ylabel(r'$\mathrm{Fraction \: Boosted}$', fontsize = PlotScripts.global_fontsize)
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax3.legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax7.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax50.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax55.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
fig.tight_layout()
fig2.tight_layout()
fig3.tight_layout()
fig5.tight_layout()
fig6.tight_layout()
outputFile1 = './{0}_quasardensity{1}'.format(output_tag, output_format)
outputFile2 = './{0}_boostedcount{1}'.format(output_tag, output_format)
outputFile3 = './{0}_quasar_activity_halo{1}'.format(output_tag, output_format)
outputFile4 = './{0}_mergercount_global{1}'.format(output_tag, output_format)
outputFile5 = './{0}_mergercount_global_stellarmass{1}'.format(output_tag, output_format)
outputFile6 = './{0}_mergercount_total{1}'.format(output_tag, output_format)
fig.savefig(outputFile1) # Save the figure
fig2.savefig(outputFile2) # Save the figure
fig3.savefig(outputFile3) # Save the figure
fig4.savefig(outputFile4) # Save the figure
fig5.savefig(outputFile5) # Save the figure
fig6.savefig(outputFile6) # Save the figure
print("Saved to {0}".format(outputFile1))
print("Saved to {0}".format(outputFile2))
print("Saved to {0}".format(outputFile3))
print("Saved to {0}".format(outputFile4))
print("Saved to {0}".format(outputFile5))
print("Saved to {0}".format(outputFile6))
plt.close(fig)
plt.close(fig2)
plt.close(fig3)
##
def plot_photon_quasar_fraction(snapshot, filenr, output_tag, QuasarFractionalPhoton, QuasarActivityToggle, NumSubsteps):
ax1 = plt.subplot(111)
counts, bin_edges, bin_middle = AllVars.Calculate_Histogram(QuasarFractionalPhoton, 0.05, 0, 0, 1)
ax1.plot(bin_middle, counts, lw = PlotScripts.global_linewidth, color = 'r')
ax1.axvline(np.mean(QuasarFractionalPhoton[QuasarFractionalPhoton != 0]), lw = 0.5, ls = '-')
ax1.set_yscale('log', nonposy='clip')
ax1.set_xlabel(r"$\mathrm{Fractional \: Photon \: Boost}$")
ax1.set_ylabel(r"$\mathrm{Count}$")
ax1.set_ylim([1e1, 1e5])
outputFile1 = './photonfraction/file{0}_snap{1}_{2}{3}'.format(filenr, snapshot, output_tag, output_format)
plt.tight_layout()
plt.savefig(outputFile1)
print("Saved to {0}".format(outputFile1))
plt.close()
###
def plot_quasar_substep(snapshot, filenr, output_tag, substep):
ax1 = plt.subplot(111)
counts, bin_edges, bin_middle = AllVars.Calculate_Histogram(substep, 0.1, 0, 0, 10)
ax1.plot(bin_middle, counts, lw = PlotScripts.global_linewidth, color = 'r')
ax1.axvline(np.mean(substep[substep != -1]), lw = 0.5, ls = '-')
ax1.set_yscale('log', nonposy='clip')
ax1.set_xlabel(r"$\mathrm{Substep \: Quasar \: Activity}$")
ax1.set_ylabel(r"$\mathrm{Count}$")
# ax1.set_ylim([1e1, 1e5])
outputFile1 = './substep_activity/file{0}_snap{1}_{2}{3}'.format(filenr, snapshot, output_tag, output_format)
plt.tight_layout()
plt.savefig(outputFile1)
print("Saved to {0}".format(outputFile1))
plt.close()
###
def plot_post_quasar_SFR(PlotSnapList, model_number, Gal, output_tag):
ax1 = plt.subplot(111)
ax2 = ax1.twinx()
count = 0
snapshot_thickness = 20 # How many snapshots before/after the quasar event do we want to track?
for snapshot_idx in PlotSnapList[model_number]:
w = np.where((G.QuasarActivity[:, snapshot_idx] == 1) & (G.LenHistory[:, snapshot_idx] > 200.0) & (G.GridStellarMass[:, snapshot_idx] > 0.001))[0]
w_slice_gridhistory = G.GridHistory[w,snapshot_idx-snapshot_thickness:snapshot_idx+snapshot_thickness]
potential_gal = []
for i in range(len(w_slice_gridhistory)):
ww = np.where((w_slice_gridhistory[i] >= 0))[0]
if (len(ww) == snapshot_thickness * 2):
potential_gal.append(w[i])
if (len(potential_gal) == 0):
return
count += 1
print("There were {0} galaxies that had an energetic quasar wind event at snapshot {1} (z = {2:.3f})".format(len(potential_gal), snapshot_idx, AllVars.SnapZ[snapshot_idx]))
chosen_gal = potential_gal[1]
lenhistory_array = np.empty((int(snapshot_thickness*2 + 1)))
SFR_array = np.empty((int(snapshot_thickness*2 + 1)))
gridhistory_array = np.empty((int(snapshot_thickness*2 + 1)))
coldgas_array = np.empty((int(snapshot_thickness*2 + 1)))
t = np.empty((int(snapshot_thickness*2 + 1)))
for i in range(-snapshot_thickness, snapshot_thickness+1):
#print("SFR {0} {1}".format(snapshot_idx + i, G.GridSFR[chosen_gal, snapshot_idx+i]))
#print("ColdGas {0} {1}".format(snapshot_idx + i, G.GridColdGas[chosen_gal, snapshot_idx+i]))
lenhistory_array[i+snapshot_thickness] = (G.LenHistory[chosen_gal, snapshot_idx+i])
SFR_array[i+snapshot_thickness] = (G.GridSFR[chosen_gal, snapshot_idx+i]) #- (G.GridSFR[chosen_gal, snapshot_idx])
gridhistory_array[i+snapshot_thickness] = (G.GridHistory[chosen_gal, snapshot_idx+i])
coldgas_array[i+snapshot_thickness] = (G.GridColdGas[chosen_gal, snapshot_idx+i] * 1.0e10 / AllVars.Hubble_h) #- (G.GridColdGas[chosen_gal, snapshot_idx])
t[i+snapshot_thickness] = (-AllVars.Lookback_Time[snapshot_idx+i] + AllVars.Lookback_Time[snapshot_idx]) * 1.0e3
print("Len History {0}".format(lenhistory_array))
print("Grid History {0}".format(gridhistory_array))
print("Cold Gas {0}".format(coldgas_array))
print("SFR {0}".format(SFR_array))
stellarmass_text = r"$log M_* = {0:.2f} \: M_\odot$".format(np.log10(G.GridStellarMass[chosen_gal, snapshot_idx] * 1.0e10 / AllVars.Hubble_h))
Ndym_text = "Dynamical Time = {0:.2f} Myr".format(G.DynamicalTime[chosen_gal, snapshot_idx])
z_text = "z = {0:.2f}".format(AllVars.SnapZ[snapshot_idx])
ax1.text(0.05, 0.95, z_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.text(0.05, 0.9, stellarmass_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.text(0.05, 0.85, Ndym_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.plot(t, SFR_array, color = 'r', lw = PlotScripts.global_linewidth)
ax2.plot(t, coldgas_array, color = 'b', lw = PlotScripts.global_linewidth)
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Quasar \: Event \: [Myr]}$", size = PlotScripts.global_labelsize - 10)
# ax1.set_ylabel(r"$\mathrm{Fractional \: SFR \: Relative \: To \: SFR_{Quasar}}$", size = PlotScripts.global_labelsize - 10)
# ax2.set_ylabel(r"$\mathrm{Difference \: Cold \: Gas \: Mass \: Relative \: To \: Cold_{Quasar}}$", size = PlotScripts.global_labelsize - 10)
ax1.set_ylabel(r"$\mathrm{SFR} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$", size = PlotScripts.global_labelsize - 10)
ax2.set_ylabel(r"$\mathrm{Cold \: Gas \: Mass \: [\mathrm{M}_\odot]}$",size = PlotScripts.global_labelsize - 10)
ax1.set_yscale('log', nonposy='clip')
ax2.set_yscale('log', nonposy='clip')
ax1.plot(np.nan, np.nan, color = 'r', label = r"$\mathrm{SFR}$")
ax1.plot(np.nan, np.nan, color = 'b', label = r"$\mathrm{Cold \: Gas}$")
leg = ax1.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = "{0}_galaxy{2}{1}".format(output_tag, output_format, chosen_gal)
plt.tight_layout()
plt.savefig(outputFile)
print("Saved to {0}".format(outputFile))
plt.close()
exit()
###
def plot_stellarmass_blackhole(SnapList, simulation_norm, mean_galaxy_BHmass,
std_galaxy_BHmass, N_galaxy_BHmass, FirstFile,
LastFile, NumFile, model_tags, output_tag):
master_mean_SMBH, master_std_SMBH, master_N, master_bin_middle = \
collect_across_tasks(mean_galaxy_BHmass, std_galaxy_BHmass,
N_galaxy_BHmass, SnapList, SnapList, True,
m_gal_low, m_gal_high)
if rank == 0:
fig = plt.figure()
| |
determines the heading along the line, using a "thinned"
version of the line. The degree of thinning is based on the size of the
offset; the larger the offset, the greater the distance between sample
locations used to construct the thinned lined used for determining headings.
The thinned line is splined at a frequency greater than the sample
frequency, and the heading at any given point is determined from the
vector formed by the closest two points on the splined line. The
correction (behind, in front, left or right) is determined with respect
to the heading, and added to the original location.
IF this method fails, no dummies, no duplicated locations, no reversals
are produced.
The algorithm:
1. Determine average distance between each point = D
2. Smoothing interval = MAX(2*D, Offset distance) = I
3. Thin input points to be at least the smoothing interval I apart from each other.
4. Smoothly re-interpolate the thinned points at five times the
original average distance D.
5. For each input point, calculate the bearing using the nearest points
on the smoothed curve
"""
gxapi_cy.WrapVVU._offset_correct(GXContext._get_tls_geo(), vv_xi, vv_yi, dist, heading, v_vxo, v_vyo)
@classmethod
def offset_correct2(cls, vv_xi, vv_yi, dist, azimuth, vv_xo, vv_yo):
"""
Same as `offset_correct <geosoft.gxapi.GXVVU.offset_correct>`, but for an arbitrary offset angle.
:param vv_xi: Input X
:param vv_yi: Input Y
:param dist: Offset distance
:param azimuth: Offset azimuth (degrees counter-clockwise from straight ahead)
:param vv_xo: Output X
:param vv_yo: Output Y
:type vv_xi: GXVV
:type vv_yi: GXVV
:type dist: float
:type azimuth: float
:type vv_xo: GXVV
:type vv_yo: GXVV
.. versionadded:: 5.1.3
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
"""
gxapi_cy.WrapVVU._offset_correct2(GXContext._get_tls_geo(), vv_xi, vv_yi, dist, azimuth, vv_xo, vv_yo)
@classmethod
def offset_correct3(cls, vv_xi, vv_yi, dist, azimuth, interval, vv_xo, vv_yo):
"""
Same as `offset_correct2 <geosoft.gxapi.GXVVU.offset_correct2>`, but specify smoothing interval.
:param vv_xi: Input X
:param vv_yi: Input Y
:param dist: Offset distance
:param azimuth: Offset azimuth (degrees counter-clockwise from straight ahead)
:param interval: Averaging interval - `rDUMMY <geosoft.gxapi.rDUMMY>` for default
:param vv_xo: Output X
:param vv_yo: Output Y
:type vv_xi: GXVV
:type vv_yi: GXVV
:type dist: float
:type azimuth: float
:type interval: float
:type vv_xo: GXVV
:type vv_yo: GXVV
.. versionadded:: 5.1.4
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
**Note:** See the algorithm note #2 above for the default smoothing interval.
"""
gxapi_cy.WrapVVU._offset_correct3(GXContext._get_tls_geo(), vv_xi, vv_yi, dist, azimuth, interval, vv_xo, vv_yo)
@classmethod
def offset_correct_xyz(cls, vv_xi, vv_yi, vv_zi, x_off, y_off, z_off, interval, v_vxo, v_vyo, v_vzo):
"""
Correct locations based on heading and fixed offset.
:param vv_xi: Input X
:param vv_yi: Input Y
:param vv_zi: Input Z
:param x_off: Offset along-track (+ve forward)
:param y_off: Offset across-track (+ve to the right)
:param z_off: Vertical Offset (+ve up)
:param interval: Sampling interval - `rDUMMY <geosoft.gxapi.rDUMMY>` for default
:param v_vxo: Output X
:param v_vyo: Output Y
:param v_vzo: Output Z
:type vv_xi: GXVV
:type vv_yi: GXVV
:type vv_zi: GXVV
:type x_off: float
:type y_off: float
:type z_off: float
:type interval: float
:type v_vxo: GXVV
:type v_vyo: GXVV
:type v_vzo: GXVV
.. versionadded:: 9.0
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
**Note:** In many applications, measurements are taken with an instrument which
is towed behind, or pushed ahead of where the locations are recorded.
Use this function to estimate the actual location of the instrument.
The method determines the heading along the line, using a "thinned"
version of the line. The default degree of thinning is based on the size of the
offset; the larger the offset, the greater the distance between sample
locations used to construct the thinned lined used for determining headings.
The thinned line is splined at a frequency greater than the sample
frequency, and the heading at any given point is determined from the
vector formed by the closest two points on the splined line. The
correction (behind, in front, left or right) is determined with respect
to the heading, and added to the original location.
IF this method fails, no dummies, no duplicated locations, no reversals
are produced.
The algorithm:
1. Determine average distance between each point = D
2. Default smoothing interval = MAX(2*D, Offset distance) = I
3. Thin input points to be at least the smoothing interval I apart from each other.
4. Smoothly re-interpolate the thinned points at five times the
original average distance D.
5. For each input point, calculate the bearing using the nearest points
on the smoothed curve
"""
gxapi_cy.WrapVVU._offset_correct_xyz(GXContext._get_tls_geo(), vv_xi, vv_yi, vv_zi, x_off, y_off, z_off, interval, v_vxo, v_vyo, v_vzo)
@classmethod
def offset_rectangles(cls, vv_xi, vv_yi, offset, size_x, size_y, vv_xo, vv_yo):
"""
Get non-overlapping offset location for rectangular symbols.
:param vv_xi: Input X locations
:param vv_yi: Input Y locations
:param offset: Minimum offset distance
:param size_x: Symbol X size (width)
:param size_y: Symbol Y size (height)
:param vv_xo: Output (offset) X locations
:param vv_yo: Output (offset) Y locations
:type vv_xi: GXVV
:type vv_yi: GXVV
:type offset: float
:type size_x: float
:type size_y: float
:type vv_xo: GXVV
:type vv_yo: GXVV
.. versionadded:: 5.0.7
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
**Note:** Often on maps plotted symbols and text overlap each other.
This routine accepts of `GXVV <geosoft.gxapi.GXVV>` of locations and returns a new
set of locations offset from the originals, and guaranteed
not to overlap, given the size of the original symbols.
The returned offset X, Y
locations are offset from the original locations by
the minimum of a) the input offset, b) the input symbol
X or Y size. This is to ensure that the original location is
never covered by the offset symbol. In addition, the offset
symbol is never place directly below the original location,
to make it easier to draw a connecting line.
Care should be taken when choosing the symbol size, because
if the point density is too high, all the points will get
pushed to the outside edge and your plot will look like a
hedgehog (it also takes a lot longer!).
"""
gxapi_cy.WrapVVU._offset_rectangles(GXContext._get_tls_geo(), vv_xi, vv_yi, offset, size_x, size_y, vv_xo, vv_yo)
@classmethod
def pick_peak(cls, vv_i, vv_o, pr_tol, width):
"""
Find peaks in a `GXVV <geosoft.gxapi.GXVV>` - method one.
:param vv_i: Input `GXVV <geosoft.gxapi.GXVV>`
:param vv_o: Returned peak `GXVV <geosoft.gxapi.GXVV>`, all dummies except peak points.
:param pr_tol: Minimum value to accept (0.0 to find all)
:param width: Minimum width to accept (1 to find all)
:type vv_i: GXVV
:type vv_o: GXVV
:type pr_tol: float
:type width: int
.. versionadded:: 5.0
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
**Note:** Peaks are the maximum point within a sequence of
positive values in the input `GXVV <geosoft.gxapi.GXVV>`. The width is the
number of points in the positive sequence.
A `GXVV <geosoft.gxapi.GXVV>` may have to be pre-filtered before finding
the peak values:
Use `bp_filt <geosoft.gxapi.GXVVU.bp_filt>` to smooth the data as required.
Use `filter <geosoft.gxapi.GXVVU.filter>` to apply a Laplace filter
"-0.5,1.0,-0.5" to make curvature data.
"""
gxapi_cy.WrapVVU._pick_peak(GXContext._get_tls_geo(), vv_i, vv_o, pr_tol, width)
@classmethod
def pick_peak2(cls, vv_i, vv_o, pr_base_lvl, pr_ampl):
"""
Find peaks in a `GXVV <geosoft.gxapi.GXVV>` - method two.
:param vv_i: Input `GXVV <geosoft.gxapi.GXVV>`
:param vv_o: Returned peak `GXVV <geosoft.gxapi.GXVV>`, all dummies except peak points.
:param pr_base_lvl: Base level to accept (0.0 to find all)
:param pr_ampl: Minimum amplitude to accept
:type vv_i: GXVV
:type vv_o: GXVV
:type pr_base_lvl: float
:type pr_ampl: float
.. versionadded:: 5.0
**License:** `Geosoft End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-end-user-lic>`_
**Note:** Peaks are the maximum point within a sequence of
values in the input `GXVV <geosoft.gxapi.GXVV>`. Maximum points must be above
the base level and have a local amplitude greater
than the minimum amplitude specified.
A `GXVV <geosoft.gxapi.GXVV>` may have to be pre-filtered before finding
the peak values.
"""
gxapi_cy.WrapVVU._pick_peak2(GXContext._get_tls_geo(), vv_i, vv_o, pr_base_lvl, pr_ampl)
@classmethod
def pick_peak3(cls, vv_i, vv_x, vv_y, pr_base_lvl, pr_ampl, v_vind, v_vamp, v_vwid, v_vhawid):
"""
Find peaks in a `GXVV <geosoft.gxapi.GXVV>` - method two, returning width and half-amplitude widths.
:param vv_i: [i] data `GXVV <geosoft.gxapi.GXVV>`
:param vv_x: | |
# stdlib
from io import StringIO
# lib
from django.core.management import call_command
from django.urls import reverse
from rest_framework import status
# local
from api.models import BISList, Character, Gear, Team, Tier
from api.serializers import BISListSerializer
from .test_base import SavageAimTestCase
class BISListCollection(SavageAimTestCase):
"""
Test the creation of new BISLists
"""
def setUp(self):
"""
Create a character for use in the test
"""
self.char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1234567890,
user=self._get_user(),
name='Char 1',
world='Lich',
verified=True,
)
call_command('job_seed', stdout=StringIO())
call_command('gear_seed', stdout=StringIO())
self.gear_id_map = {g.name: g.id for g in Gear.objects.all()}
def tearDown(self):
"""
Clean up the DB after each test
"""
Character.objects.all().delete()
def test_create(self):
"""
Create a new BIS List for the character
"""
url = reverse('api:bis_collection', kwargs={'character_id': self.char.pk})
self.client.force_authenticate(self.char.user)
# Try one with PLD first
data = {
'job_id': 'PLD',
'bis_mainhand_id': self.gear_id_map['Divine Light'],
'bis_offhand_id': self.gear_id_map['Moonward'],
'bis_head_id': self.gear_id_map['Limbo'],
'bis_body_id': self.gear_id_map['Limbo'],
'bis_hands_id': self.gear_id_map['Limbo'],
'bis_legs_id': self.gear_id_map['Limbo'],
'bis_feet_id': self.gear_id_map['Limbo'],
'bis_earrings_id': self.gear_id_map['Limbo'],
'bis_necklace_id': self.gear_id_map['Limbo'],
'bis_bracelet_id': self.gear_id_map['Limbo'],
'bis_right_ring_id': self.gear_id_map['Limbo'],
'bis_left_ring_id': self.gear_id_map['Limbo'],
'current_mainhand_id': self.gear_id_map['Moonward'],
'current_offhand_id': self.gear_id_map['Moonward'],
'current_head_id': self.gear_id_map['Moonward'],
'current_body_id': self.gear_id_map['Moonward'],
'current_hands_id': self.gear_id_map['Moonward'],
'current_legs_id': self.gear_id_map['Moonward'],
'current_feet_id': self.gear_id_map['Moonward'],
'current_earrings_id': self.gear_id_map['Moonward'],
'current_necklace_id': self.gear_id_map['Moonward'],
'current_bracelet_id': self.gear_id_map['Moonward'],
'current_right_ring_id': self.gear_id_map['Moonward'],
'current_left_ring_id': self.gear_id_map['Moonward'],
'external_link': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
self.assertEqual(BISList.objects.count(), 1)
obj = BISList.objects.first()
self.assertNotEqual(obj.bis_offhand_id, obj.bis_mainhand_id)
self.assertIsNone(obj.external_link)
obj.delete()
# Do one for a different job, ensure that offhand and mainhand are actually the same
data['job_id'] = 'SGE'
data['external_link'] = 'https://etro.gg'
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
self.assertEqual(BISList.objects.count(), 1)
obj = BISList.objects.first()
self.assertEqual(obj.bis_offhand_id, obj.bis_mainhand_id)
self.assertEqual(obj.bis_offhand_id, data['bis_mainhand_id'])
self.assertEqual(obj.external_link, data['external_link'])
def test_create_400(self):
"""
Test all the kinds of errors that can come from the create endpoint;
- Invalid number for a gear type
- Gear pk doesn't exist
- Gear category is incorrect
- Job ID doesn't exist
- Job already has a BIS List
- Data missing
- External link isn't a url
"""
url = reverse('api:bis_collection', kwargs={'character_id': self.char.pk})
self.client.force_authenticate(self.char.user)
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
for field in content:
self.assertEqual(content[field], ['This field is required.'])
self.assertEqual(len(content), 25)
# All gear errors will be run at once since there's only one actual function to test
data = {
'job_id': 'abc',
'bis_mainhand_id': 'abcde',
'bis_body_id': -1,
'bis_head_id': self.gear_id_map['Eternal Dark'],
'bis_earrings_id': self.gear_id_map['Divine Light'],
'current_mainhand_id': self.gear_id_map['The Last'],
'external_link': 'abcde',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
# Since we checked all the "this field is required" errors above, just check the ones we send now
content = response.json()
invalid_gear = 'The chosen category of Gear does not have an item for this slot.'
self.assertEqual(content['job_id'], ['Please select a valid Job.'])
self.assertEqual(content['bis_mainhand_id'], ['A valid integer is required.'])
self.assertEqual(content['bis_body_id'], ['Please ensure your value corresponds with a valid type of Gear.'])
self.assertEqual(content['bis_head_id'], [invalid_gear])
self.assertEqual(content['bis_earrings_id'], [invalid_gear])
self.assertEqual(content['current_mainhand_id'], [invalid_gear])
self.assertEqual(content['external_link'], ['Enter a valid URL.'])
# Create a BIS List for a job, then send a request to make one for the same job
bis_gear = Gear.objects.first()
curr_gear = Gear.objects.last()
BISList.objects.create(
bis_body=bis_gear,
bis_bracelet=bis_gear,
bis_earrings=bis_gear,
bis_feet=bis_gear,
bis_hands=bis_gear,
bis_head=bis_gear,
bis_left_ring=bis_gear,
bis_legs=bis_gear,
bis_mainhand=bis_gear,
bis_necklace=bis_gear,
bis_offhand=bis_gear,
bis_right_ring=bis_gear,
current_body=curr_gear,
current_bracelet=curr_gear,
current_earrings=curr_gear,
current_feet=curr_gear,
current_hands=curr_gear,
current_head=curr_gear,
current_left_ring=curr_gear,
current_legs=curr_gear,
current_mainhand=curr_gear,
current_necklace=curr_gear,
current_offhand=curr_gear,
current_right_ring=curr_gear,
job_id='DRG',
owner=self.char,
)
data = {'job_id': 'DRG'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
# Since we checked all the "this field is required" errors above, just check the ones we send now
content = response.json()
self.assertEqual(content['job_id'], ['Currently SavageAim only supports one BISList per job.'])
def test_404(self):
"""
Test all situations where the endpoint would respond with a 404;
- Invalid ID
- Character is not owned by the requesting user
- Character is not verified
"""
user = self._get_user()
self.client.force_authenticate(user)
# ID doesn't exist
url = reverse('api:bis_collection', kwargs={'character_id': 0000000000000000000000})
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
# Character belongs to a different user
self.char.user = self._create_user()
self.char.save()
url = reverse('api:bis_collection', kwargs={'character_id': self.char.id})
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
# Character is not verified
self.char.verified = False
self.char.user = user
self.char.save()
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
class BISListResource(SavageAimTestCase):
"""
Test the update of existing BIS Lists
"""
def setUp(self):
"""
Create a character for use in the test
"""
self.char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1234567890,
user=self._get_user(),
name='Char 1',
world='Lich',
verified=True,
)
call_command('job_seed', stdout=StringIO())
call_command('gear_seed', stdout=StringIO())
self.gear_id_map = {g.name: g.id for g in Gear.objects.all()}
bis_gear = Gear.objects.first()
curr_gear = Gear.objects.last()
self.bis = BISList.objects.create(
bis_body=bis_gear,
bis_bracelet=bis_gear,
bis_earrings=bis_gear,
bis_feet=bis_gear,
bis_hands=bis_gear,
bis_head=bis_gear,
bis_left_ring=bis_gear,
bis_legs=bis_gear,
bis_mainhand=bis_gear,
bis_necklace=bis_gear,
bis_offhand=bis_gear,
bis_right_ring=bis_gear,
current_body=curr_gear,
current_bracelet=curr_gear,
current_earrings=curr_gear,
current_feet=curr_gear,
current_hands=curr_gear,
current_head=curr_gear,
current_left_ring=curr_gear,
current_legs=curr_gear,
current_mainhand=curr_gear,
current_necklace=curr_gear,
current_offhand=curr_gear,
current_right_ring=curr_gear,
job_id='DRG',
owner=self.char,
external_link='https://etro.gg/',
)
def tearDown(self):
"""
Clean up the DB after each test
"""
Character.objects.all().delete()
def test_read(self):
"""
Read a BIS List via the API and ensure it is correctly returned
"""
url = reverse('api:bis_resource', kwargs={'character_id': self.char.pk, 'pk': self.bis.pk})
self.client.force_authenticate(self.char.user)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
self.assertDictEqual(response.json(), BISListSerializer(self.bis).data)
def test_update(self):
"""
Update the existing BIS List with a PUT request
"""
url = reverse('api:bis_resource', kwargs={'character_id': self.char.pk, 'pk': self.bis.pk})
self.client.force_authenticate(self.char.user)
# Try one with PLD first
data = {
'job_id': 'PLD',
'bis_mainhand_id': self.gear_id_map['Divine Light'],
'bis_offhand_id': self.gear_id_map['Moonward'],
'bis_head_id': self.gear_id_map['Limbo'],
'bis_body_id': self.gear_id_map['Limbo'],
'bis_hands_id': self.gear_id_map['Limbo'],
'bis_legs_id': self.gear_id_map['Limbo'],
'bis_feet_id': self.gear_id_map['Limbo'],
'bis_earrings_id': self.gear_id_map['Limbo'],
'bis_necklace_id': self.gear_id_map['Limbo'],
'bis_bracelet_id': self.gear_id_map['Limbo'],
'bis_right_ring_id': self.gear_id_map['Limbo'],
'bis_left_ring_id': self.gear_id_map['Limbo'],
'current_mainhand_id': self.gear_id_map['Moonward'],
'current_offhand_id': self.gear_id_map['Moonward'],
'current_head_id': self.gear_id_map['Moonward'],
'current_body_id': self.gear_id_map['Moonward'],
'current_hands_id': self.gear_id_map['Moonward'],
'current_legs_id': self.gear_id_map['Moonward'],
'current_feet_id': self.gear_id_map['Moonward'],
'current_earrings_id': self.gear_id_map['Moonward'],
'current_necklace_id': self.gear_id_map['Moonward'],
'current_bracelet_id': self.gear_id_map['Moonward'],
'current_right_ring_id': self.gear_id_map['Moonward'],
'current_left_ring_id': self.gear_id_map['Moonward'],
'external_link': None,
}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.content)
self.bis.refresh_from_db()
self.assertEqual(self.bis.job_id, 'PLD')
self.assertNotEqual(self.bis.bis_mainhand, self.bis.bis_offhand)
self.assertIsNone(self.bis.external_link)
def test_update_400(self):
"""
Test all the kinds of errors that can come from the update endpoint;
- Invalid number for a gear type
- Gear pk doesn't exist
- Gear category is incorrect
- Job ID doesn't exist
- Job already has a BIS List
- Data missing
"""
url = reverse('api:bis_resource', kwargs={'character_id': self.char.pk, 'pk': self.bis.pk})
self.client.force_authenticate(self.char.user)
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
for field in content:
self.assertEqual(content[field], ['This field is required.'])
self.assertEqual(len(content), 25)
# All gear errors will be run at once since there's only one actual function to test
data = {
'job_id': 'abc',
'bis_mainhand_id': 'abcde',
'bis_body_id': -1,
'bis_head_id': self.gear_id_map['Eternal Dark'],
'bis_earrings_id': self.gear_id_map['Divine Light'],
'current_mainhand_id': self.gear_id_map['The Last'],
}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
# Since we checked all the "this field is required" errors above, just check the ones we send now
content = response.json()
invalid_gear = 'The chosen category of Gear does not have an item for this slot.'
self.assertEqual(content['job_id'], ['Please select a valid Job.'])
self.assertEqual(content['bis_mainhand_id'], ['A valid integer is required.'])
self.assertEqual(content['bis_body_id'], ['Please ensure your value corresponds with a valid type of Gear.'])
self.assertEqual(content['bis_head_id'], [invalid_gear])
self.assertEqual(content['bis_earrings_id'], [invalid_gear])
self.assertEqual(content['current_mainhand_id'], [invalid_gear])
# Create a BIS List for a job, then send a request to make one for the same job
bis_gear = Gear.objects.first()
curr_gear = Gear.objects.last()
BISList.objects.create(
bis_body=bis_gear,
bis_bracelet=bis_gear,
bis_earrings=bis_gear,
bis_feet=bis_gear,
bis_hands=bis_gear,
bis_head=bis_gear,
bis_left_ring=bis_gear,
bis_legs=bis_gear,
bis_mainhand=bis_gear,
bis_necklace=bis_gear,
bis_offhand=bis_gear,
bis_right_ring=bis_gear,
current_body=curr_gear,
current_bracelet=curr_gear,
current_earrings=curr_gear,
current_feet=curr_gear,
current_hands=curr_gear,
current_head=curr_gear,
current_left_ring=curr_gear,
current_legs=curr_gear,
current_mainhand=curr_gear,
current_necklace=curr_gear,
current_offhand=curr_gear,
current_right_ring=curr_gear,
job_id='RPR',
owner=self.char,
)
data = {'job_id': 'RPR'}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
# Since we checked all the "this field is required" errors above, just check the ones we send now
content = response.json()
self.assertEqual(content['job_id'], ['Currently SavageAim only supports one BISList per job.'])
def test_404(self):
"""
Test all situations where the endpoint would respond with a 404;
- Invalid Character ID
- Character is not owned by the requesting user
- Character is not verified
- Invalid BISList ID
- Character doesn't own BISList
"""
user = self._get_user()
self.client.force_authenticate(user)
# ID doesn't exist
url = reverse('api:bis_resource', kwargs={'character_id': 0000000000000000000000, 'pk': self.bis.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
# Character belongs to a different user
self.char.user = self._create_user()
self.char.save()
url = reverse('api:bis_resource', kwargs={'character_id': self.char.id, 'pk': self.bis.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
# Character is not verified
self.char.verified = False
self.char.user = user
self.char.save()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
# Invalid BISList ID
self.char.verified = True
self.char.save()
url = reverse('api:bis_resource', kwargs={'character_id': self.char.id, 'pk': 99999})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
# Character doesn't own BIS List
char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1234567890,
user=self._get_user(),
name='Char 2',
world='Lich',
verified=True,
)
self.bis.owner |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.