metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jht0664/CNN_WR",
"score": 2
} |
#### File: CNN_WR/machine/block.py
```python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='generate data block for machine learning input')
## args
parser.add_argument('-i', '--input', default='target.list', nargs='?',
help='input list file (format $file_index $temperature/density)')
parser.add_argument('-ipf', '--input_prefix', default='grid', nargs='?',
help='prefix of input grid .npy file')
parser.add_argument('-s1', '--select1', default=0.5, nargs='?', type=float,
help='select temperature/density1 (< args.select2) for training set')
parser.add_argument('-s2', '--select2', default=1.0, nargs='?', type=float,
help='select temperature/density2 (> args.select1) for training set')
parser.add_argument('-prop', '--prop', default=-1.0, nargs='?', type=float,
help='the proportion [0:1] of training set for getting accuracy of modeling (< 0. means nothing test set)')
parser.add_argument('-nb', '--n_blocks', default=0, nargs='?', type=int,
help='# of blocks for training set (zero means no block average sets)')
parser.add_argument('-nbe', '--n_blocks_eval', default=0, nargs='?', type=int,
help='# of blocks for eval set (zero means no block average sets)')
parser.add_argument('-net', '--ne_train', default=-1, nargs='?', type=int,
help='# of ensembles for train set per grid.npy (-1 to use all)')
parser.add_argument('-nee', '--ne_eval', default=-1, nargs='?', type=int,
help='# of ensembles for eval set per grid.npy (-1 to use all)')
parser.add_argument('-ng', '--n_grids', default=15, nargs='?', type=int,
help='# of grids for data sets')
parser.add_argument('-seed', '--seed', default=1985, nargs='?', type=int,
help='random seed to shuffle for test sets and block sets')
parser.add_argument('-o1', '--out_train', default='train', nargs='?',
help='prefix of output training set .npy file like train.(coord/temp/cat).$i.npy')
parser.add_argument('-o2', '--out_test', default='test', nargs='?',
help='prefix of output test set .npy file for accuracy like test.(coord/temp/cat).npy')
parser.add_argument('-o3', '--out_eval', default='eval', nargs='?',
help='prefix of output Tc evaluation set .npy file like eval.(coord/temp).npy')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 2.2')
# read args
args = parser.parse_args()
# check args
print(" input arguments: {0}".format(args))
# import modules
import numpy as np
import scipy as sc
import math
import copy
np.random.seed(args.seed)
# step1: read list file and split to train, test, and eval sets.
list_file = np.loadtxt(args.input)
list_temp = np.array(list_file[:,0],dtype=float)
list_file_idx = np.array(list_file[:,1],dtype=int)
train_set1 = np.where(list_temp <= args.select1)[0] # indices for temp1 of training
train_set2 = np.where(list_temp >= args.select2)[0] # indices for temp2 of training
eval_set = np.delete(np.arange(len(list_file_idx)), np.append(train_set1,train_set2)) # indices for eval
# make train_set and test_set with proportion and shuffle
if args.prop > 0.0:
if args.prop >= 0.5:
raise ValueError("args.prop {} is too high unlike purpose".format(args.prop))
n_test1 = int(len(train_set1)*args.prop)
n_test2 = int(len(train_set2)*args.prop)
np.random.shuffle(train_set1)
np.random.shuffle(train_set2)
test_set = np.append(train_set1[0:n_test1],train_set2[0:n_test2])
train_set1 = train_set1[n_test1:]
train_set2 = train_set2[n_test2:]
else:
print(" Not make test set")
np.random.shuffle(train_set1)
np.random.shuffle(train_set2)
test_set = np.array([],dtype=int)
print("Based on {} list file: ".format(args.input))
print(" total #train data: {} for temp <= {}, {} for temp >= {}".format(len(train_set1),args.select1,len(train_set2),args.select2))
print(" #test data: {}".format(len(test_set)))
print(" #eval data: {}".format(len(eval_set)))
# step2: make blocks for training sets.
if args.n_blocks > 0:
remain_1 = len(train_set1)%args.n_blocks
remain_2 = len(train_set2)%args.n_blocks
print(" trim ({},{}) elements from two training sets for equal size of block sets".format(remain_1,remain_2))
if remain_1 > 0:
train_set1 = train_set1[remain_1:]
if remain_2 > 0:
train_set2 = train_set2[remain_2:]
block_sets1 = np.split(train_set1,args.n_blocks)
block_sets2 = np.split(train_set2,args.n_blocks)
print(" #blocks for training set = {}".format(args.n_blocks))
else:
print(" no blocks for training sets")
block_sets1 = train_set1
block_sets2 = train_set2
# step3: make blocks for evaluation sets:
if args.n_blocks_eval > 0:
if len(eval_set)%args.n_blocks_eval != 0 :
raise ValueError("n_blocks_eval value is not good to splite eval_set ({} % {} != 0)".format(len(eval_set),args.n_blocks_eval))
block_sets_eval = np.split(eval_set,args.n_blocks_eval)
print(" #blocks for eval set = {}".format(args.n_blocks_eval))
else:
print(" no blocks for eval sets")
block_sets_eval = eval_set
# without padding
def make_npy_files_mode_ver0(mode, i_block, idx_array, input_prefix, output_prefix):
# mode = test/eval/train
if ("test" in mode) or ("train" in mode):
gen_cat = True
else:
gen_cat = False # eval case
# initialzie arrays
# As for eval set, we only use original grid info excluding ensembles or copies by trans, rot, and flip.
n_data = len(idx_array)
if gen_cat:
set_coord=np.empty((n_data,n_ensembles*pow(args.n_grids,3)))
set_temp=np.empty((n_data,n_ensembles))
set_cat=np.empty((n_data,n_ensembles))
esti_n_sets = n_ensembles
else: # eval case
set_coord=np.empty((n_data,n_eval_ensembles*pow(args.n_grids,3)))
set_temp=np.empty((n_data,n_eval_ensembles))
esti_n_sets = n_eval_ensembles
print(" collecting sets for {} mode".format(mode))
# run each sample
for i_data in np.arange(n_data):
# load data
i_set = list_file_idx[idx_array[i_data]]
filename = input_prefix+"."+str(i_set)+".npy"
try:
tmp_data = np.load(filename)
except FileNotFoundError:
raise ValueError("{} file does not found. Please remove the filename in list file".format(filename))
# check #ensembles
n_sets=int(len(tmp_data)/args.n_grids/args.n_grids/args.n_grids)
if (esti_n_sets < n_sets) and gen_cat:
raise RuntimeError("#ensembles sizes are different in {} file like {} != {}".format(filename, n_ensembles, n_sets))
# assign coord data
if gen_cat:
set_coord[i_data]=copy.copy(tmp_data[0:pow(args.n_grids,3)*esti_n_sets])
else:
#set_coord[i_data]=copy.copy(tmp_data[0:pow(args.n_grids,3)]) # for single ensemble
set_coord[i_data]=copy.copy(tmp_data[0:pow(args.n_grids,3)*n_eval_ensembles])
# assign cat and temp data
tmp_temp = list_temp[idx_array[i_data]]
if gen_cat:
if tmp_temp <= args.select1:
set_cat[i_data]=np.repeat(0.,esti_n_sets) # mixed
elif tmp_temp >= args.select2:
set_cat[i_data]=np.repeat(1.,esti_n_sets) # separation
else:
raise ValueError("mixed or seperated? see temperature {} != ({} or {})".format(
tmp_temp, args.select1, args.select2))
set_temp[i_data]=np.repeat(tmp_temp,esti_n_sets)
# save compressed npy files
if i_block is None:
np.save(output_prefix+'.coord', set_coord.flatten())
np.save(output_prefix+'.temp', set_temp.flatten())
if gen_cat:
np.save(output_prefix+'.cat', set_cat.flatten())
print("#{} samples = {}".format(mode, n_data))
else:
np.save(output_prefix+'.'+str(i_block)+'.coord', set_coord.flatten())
np.save(output_prefix+'.'+str(i_block)+'.temp', set_temp.flatten())
if gen_cat:
np.save(output_prefix+'.'+str(i_block)+'.cat', set_cat.flatten())
print("#{} {} samples = {}".format(mode, i_block, n_data))
# with PBC padding
def make_npy_files_mode(mode, i_block, idx_array, input_prefix, output_prefix):
# mode = test/eval/train
if ("test" in mode) or ("train" in mode):
gen_cat = True
else:
gen_cat = False # eval case
# initialzie arrays
# As for eval set, we only use original grid info excluding ensembles or copies by trans, rot, and flip.
n_data = len(idx_array)
if gen_cat:
esti_n_sets = args.ne_train
set_coord=np.empty((n_data,esti_n_sets*pow(args.n_grids+2,3)))
set_temp=np.empty((n_data,esti_n_sets))
set_cat=np.empty((n_data,esti_n_sets))
else: # eval case
esti_n_sets = args.ne_eval
set_coord=np.empty((n_data,esti_n_sets*pow(args.n_grids+2,3)))
set_temp=np.empty((n_data,esti_n_sets))
print(" collecting sets for {} mode".format(mode))
# run each sample
for i_data in np.arange(n_data):
# load data
i_set = list_file_idx[idx_array[i_data]]
filename = input_prefix+"."+str(i_set)+".npy"
try:
tmp_data = np.load(filename)
except FileNotFoundError:
raise ValueError("{} file does not found. Please remove the filename in list file".format(filename))
# check #ensembles
n_sets=int(len(tmp_data)/args.n_grids/args.n_grids/args.n_grids)
if esti_n_sets > n_sets:
raise RuntimeError("#ensembles sizes you asked are less than #sets in {} file like {} > {}".format(filename, esti_n_sets, n_sets))
tmp_data = tmp_data.reshape(n_sets,args.n_grids,args.n_grids,args.n_grids)
# add padding on tmp_data for each ensemble
# if load_input_eval_file has more ensembles than esti_n_sets, only save first esti_n_sets data in block files
for esti_i_sets in range(esti_n_sets):
tmp_org = tmp_data[esti_i_sets]
tmp_pad1 = np.empty((args.n_grids+2,args.n_grids,args.n_grids)) # add yz layer
for ix in range(args.n_grids+2):
tmp_pad1[ix] = tmp_org[(ix-1)%args.n_grids]
tmp_pad2 = np.empty((args.n_grids+2,args.n_grids+2,args.n_grids)) # add xz layer
for iy in range(args.n_grids+2):
tmp_pad2[:,iy] = tmp_pad1[:,(iy-1)%args.n_grids]
tmp_pad3 = np.empty((args.n_grids+2,args.n_grids+2,args.n_grids+2)) # add xz layer
for iz in range(args.n_grids+2):
tmp_pad3[:,:,iz] = tmp_pad2[:,:,(iz-1)%args.n_grids]
# assign coord data
start_idx=esti_i_sets*pow(args.n_grids+2,3)
end_idx=(esti_i_sets+1)*pow(args.n_grids+2,3)
set_coord[i_data,start_idx:end_idx]=copy.copy(tmp_pad3.flatten())
# assign cat and temp data
tmp_temp = list_temp[idx_array[i_data]]
if gen_cat:
if tmp_temp <= args.select1:
set_cat[i_data]=np.repeat(0.,esti_n_sets) # select1
elif tmp_temp >= args.select2:
set_cat[i_data]=np.repeat(1.,esti_n_sets) # select2
else:
raise ValueError("mixed or seperated? see temperature {} != ({} or {})".format(
tmp_temp, args.select1, args.select2))
set_temp[i_data]=np.repeat(tmp_temp,esti_n_sets)
# save compressed npy files
if i_block is None:
np.save(output_prefix+'.coord', set_coord.flatten())
np.save(output_prefix+'.temp', set_temp.flatten())
if gen_cat:
np.save(output_prefix+'.cat', set_cat.flatten())
print("#{} samples = {}".format(mode, n_data))
else:
np.save(output_prefix+'.'+str(i_block)+'.coord', set_coord.flatten())
np.save(output_prefix+'.'+str(i_block)+'.temp', set_temp.flatten())
if gen_cat:
np.save(output_prefix+'.'+str(i_block)+'.cat', set_cat.flatten())
print("#{} {} samples = {}".format(mode, i_block, n_data))
# step3: make .npy files for train, test, and eval with blocks.
# test_set
if len(test_set) > 0:
make_npy_files_mode("test", None, test_set, args.input_prefix, args.out_test)
else:
print(" not generated test set output")
# eval set
if args.n_blocks_eval > 0:
print(" collecting block sets for eval")
for i_block in range(args.n_blocks_eval):
print(" ... {}th block ... ".format(i_block))
make_npy_files_mode("eval", i_block, block_sets_eval[i_block], args.input_prefix, args.out_eval)
else:
print(" collecting total (no block) sets for eval")
make_npy_files_mode("train", None, block_sets_eval, args.input_prefix, args.out_train)
# training set
if args.n_blocks > 0:
print(" collecting block sets for training")
for i_block in range(args.n_blocks):
print(" ... {}th block ... ".format(i_block))
tmp_set = np.append(block_sets1[i_block],block_sets2[i_block])
make_npy_files_mode("train", i_block, tmp_set, args.input_prefix, args.out_train)
else:
print(" collecting total (not block) sets for training")
tmp_set = np.append(block_sets1,block_sets2)
make_npy_files_mode("train", None, tmp_set, args.input_prefix, args.out_train)
print("Done: make data sets for machine learning")
```
#### File: CNN_WR/machine/train.py
```python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='supervised machine learning for phase separation of two phases')
## args
parser.add_argument('-i', '--input', default='train.0', nargs='?',
help='prefix of input .npy train file like $input.$i.(coord/temp/cat).npy')
parser.add_argument('-it', '--input_test', default='NONE', nargs='?',
help='prefix of input .npy test file like $input_test.(coord/temp/cat).npy, otherwise put NONE.')
parser.add_argument('-ng', '--n_grids', default=15, nargs='?', type=int,
help='# grids in input_prefix.coord.npy ')
parser.add_argument('-config', '--config_model', default='model.config', nargs='?',
help='test mode for the structure of network layers (format: dropout \n conv1 \n conv2 \n pool \n dense*)')
parser.add_argument('-nb', '--nbatch', default=50, nargs='?', type=int,
help='# images per epoch to train')
parser.add_argument('-nc', '--n_class', default=2, nargs='?', type=int,
help='# classes to train')
parser.add_argument('-loss', '--loss_fn', default='binary_crossentropy', nargs='?',
help='loss function (binary_crossentropy/categorical_crossentropy)')
parser.add_argument('-seed', '--seed', default=-1, nargs='?', type=int,
help='set random seed (negative or zero value means random without seed)')
parser.add_argument('-o', '--output', default='model.h5', nargs='?',
help='output network model file (.h5)')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 3.4')
# read args
args = parser.parse_args()
# check args
print(" input arguments: {0}".format(args))
# import module
import numpy as np
# Just disables the warning, doesn't enable AVX/FMA
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # avoid not-risk error messages
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
## start machine learning
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv3D, MaxPooling3D, AveragePooling3D
if args.seed > 0:
from numpy.random import seed
seed(args.seed)
from tensorflow import set_random_seed
set_random_seed(args.seed+1)
n_grids = args.n_grids + 2 # due to PBC padding
## load train data
print(" load train data with prefix {}".format(args.input))
train_coord_sets = np.load(args.input+'.coord.npy')
train_coord_sets = train_coord_sets.reshape(-1, n_grids, n_grids, n_grids, 1)
n_sets = train_coord_sets.shape[0]
train_cat_sets = np.load(args.input+'.cat.npy')
if train_cat_sets.shape[0] != n_sets:
raise ValueError(" inconsistent size for cat_sets with coord_sets, {} != {}".format(
train_cat_sets.shape[0],n_sets))
train_cat_sets = keras.utils.to_categorical(train_cat_sets, args.n_class)
train_temp_sets = np.load(args.input+'.temp.npy')
if train_temp_sets.shape[0] != n_sets:
raise ValueError(" inconsistent size for temp_sets with coord_sets, {} != {}".format(
train_temp_sets.shape[0],n_sets))
## prepare for CNN input layer and out layer
## modeling (construct CNN layers)
# see details:
# https://liufuyang.github.io/2017/04/01/just-another-tensorflow-beginner-guide-3.html
# http://cs231n.github.io/convolutional-networks/
input_shape = (n_grids, n_grids, n_grids, 1)
# original model for preliminary result of WR model
def modeling_ver0():
model = Sequential()
# first hidden layer;
# 32 feature maps (or filter), which with the filter size of 3x3x3
# and a rectifier activation function
# Note that the CONV layer’s parameters consist of a set of learnable filters
model.add(Conv3D(32, kernel_size=(3, 3, 3),
strides=(1, 1, 1), padding='same',
activation='relu', input_shape=input_shape)) # activate by higher size, 32 -> 64
# second hidden layer;
# 64 feature maps, which with the size of 3x3x3
# and a rectifier activation function
model.add(Conv3D(64, (3, 3, 3),
strides=(1, 1, 1), padding='same',
activation='relu'))
# pooling layer
# with pool size of 2x2x2
# which means with a stride of 2 downsamples every depth slice in the input by 2
# along both width and height, discarding 75% of the activations
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
# a regularization layer using dropout
# andomly exclude 20% of neurons in the layer
# in order to reduce overfitting.
model.add(Dropout(0.2))
# converts the 3D matrix data to a vector
# It allows the output to be processed
# by standard fully connected layers
model.add(Flatten())
# a fully connected layer with 128 neurons
# and rectifier activation function.3
# How to decide #nuerons? see https://www.heatonresearch.com/2017/06/01/hidden-layers.html
# 1. The number of hidden neurons should be between the size of the input layer and the size of the output layer.
# 2. The number of hidden neurons should be 2/3 the size of the input layer, plus the size of the output layer.
# 3. The number of hidden neurons should be less than twice the size of the input layer.
# from (1), should be [2:15*15*15] = [2:3375]
# from (2), should be 15*15*15*2/3+2 ~ 2048
# from (3), should be < 15*15*15*2
model.add(Dense(128, activation='relu'))
# As there will be many weights generated on the previous layer,
# it is configured to randomly exclude 30% of neurons in the layer
# in order to reduce overfitting.
model.add(Dropout(0.3))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.1))
# the output layer has 2 neurons for the 2 classes
# and a softmax activation function
# to output probability-like predictions for each class
model.add(Dense(2, activation='softmax'))
model.summary()
model.compile(loss='binary_crossentropy',
optimizer="adam",
metrics=['accuracy'])
return model
def user_model(config_file):
print("construct machine learning model by {} file".format(config_file))
model = Sequential()
config_array = []
config=open(config_file, 'r')
# read dropout probablity
prob_drop = float(config.readline().split()[0])
if (prob_drop > 1.0) or (prob_drop < 0.0):
raise ValueError(" prob_drop is too big or small, {}".format(prob_drop))
elif prob_drop == 0.:
bool_drop = False
print(" deactivate dropout fn")
else:
bool_drop = True
print(" activate dropout fn")
# read 1st conv. layer
filter_size, n_conv = np.int_(config.readline().split()[0:2])
if n_conv > 0:
model.add(Conv3D(n_conv, kernel_size=(filter_size, filter_size, filter_size),
strides=(1, 1, 1), padding='valid',
activation='relu', input_shape=input_shape))
print(" add 1st conv3D layer {}".format(n_conv))
config_array.append("conv "+str(n_conv))
else:
raise ValueError(" wrong value for 1st n_conv, {}".format(n_conv))
# read 2nd conv. layer
filter_size, n_conv = np.int_(config.readline().split()[0:2])
if n_conv > 0:
model.add(Conv3D(n_conv, kernel_size=(filter_size, filter_size, filter_size),
strides=(1, 1, 1), padding='valid',
activation='relu'))
print(" add 2nd conv3D layer {}".format(n_conv))
config_array.append("conv "+str(n_conv))
elif n_conv == 0:
print(" pass 2nd conv3D layer")
else:
raise ValueError(" wrong value for 2nd n_conv, {}".format(n_conv))
# read avg. pooling layer
n_stride = int(config.readline().split()[0])
if n_stride > 0:
model.add(AveragePooling3D(pool_size=(n_stride, n_stride, n_stride)))
print(" add ave. pooling layer")
config_array.append("pool "+str(n_stride))
if bool_drop:
model.add(Dropout(prob_drop))
config_array.append("dropout "+str(prob_drop))
elif n_stride == 0:
print(" pass avg. pooling layer")
else:
raise ValueError(" wrong value for max. pooling layer, {}".format(n_conv))
# fully connected arrays
model.add(Flatten())
# read dense layers (exclude output layer)
tmp = config.readlines()
n_dense = len(tmp)
for i in range(n_dense):
try:
n_neurons=int(tmp[i].split()[0])
except IndexError:
raise IndexError("Probably you put whitespace somewhere in the file, {}".format(tmp))
if n_neurons != 0:
model.add(Dense(n_neurons, activation='relu'))
print(" add Dense layer {}".format(n_neurons))
config_array.append("Dense "+str(n_neurons))
if bool_drop:
model.add(Dropout(prob_drop))
config_array.append("dropout "+str(prob_drop))
else:
continue
if n_dense == 0:
print(" pass any Dense layer")
# add output layer
model.add(Dense(args.n_class, activation='softmax'))
model.summary()
print("config model = {}".format(config_array))
model.compile(loss=args.loss_fn,
optimizer="adam",
metrics=['accuracy'])
return model
# fiting with training sets
#cnn_model = modeling_ver0() # 1,082 k parameters: 74s/epoch, 3 epoch -> 97.4% accuracy, 4 epoch -> 99.7%
cnn_model = user_model(args.config_model)
history = cnn_model.fit(train_coord_sets, train_cat_sets,
batch_size=args.nbatch,
epochs=30,
verbose=1,
shuffle=True)
print("Done by fitting to training set")
# release memory
del train_coord_sets
del train_cat_sets
del train_temp_sets
## load test data if available
if 'NONE' not in args.input_test:
print(" load test data with prefix {} to calculate accuracy".format(args.input_test))
try:
test_coord_sets = np.load(args.input_test+'.coord.npy')
test_coord_sets = test_coord_sets.reshape(-1, n_grids, n_grids, n_grids, 1)
n_sets = test_coord_sets.shape[0]
test_cat_sets = np.load(args.input_test+'.cat.npy')
if test_cat_sets.shape[0] != n_sets:
raise ValueError(" inconsistent size for cat_sets with coord_sets, {} != {}".format(
test_cat_sets.shape[0],n_sets))
test_cat_sets = keras.utils.to_categorical(test_cat_sets, args.n_class)
test_temp_sets = np.load(args.input_test+'.temp.npy')
if test_temp_sets.shape[0] != n_sets:
raise ValueError(" inconsistent size for temp_sets with coord_sets, {} != {}".format(
test_temp_sets.shape[0],n_sets))
# check results with test sets
score = cnn_model.evaluate(test_coord_sets, test_cat_sets, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
del test_coord_sets
del test_temp_sets
del test_cat_sets
except IOError:
print("not found the file. Skip test data")
args.input_test="NONE"
pass
else:
print(" No test for accuracy")
# save network model
cnn_model.save(args.output)
print("Done: construct machine learning model")
``` |
{
"source": "jht0664/StructureFactor_SPME",
"score": 3
} |
#### File: jht0664/StructureFactor_SPME/smooth.py
```python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='simply smooth 1d line with n windows')
parser.add_argument('-i', '--input', default='sq_tot', nargs='?',
help='the text file name with two columns (x,y)')
parser.add_argument('-nw', '--nw', default=4, nargs='?', type=int,
help='number of windows to average')
parser.add_argument('-o', '--output', default='.smooth', nargs='?',
help='output surfix for smooth data')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
import numpy as np
# from https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
# smooth the data using a window with requested size.
# This method is based on the convolution of a scaled window with the signal.
# The signal is prepared by introducing reflected copies of the signal
# (with the window size) in both ends so that transient parts are minimized
# in the begining and end part of the output signal.
def smooth(x,window_len=11,window='flat'):
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y[int(window_len/2.0):len(x)+int(window_len/2.0)]
data=np.loadtxt(args.input)
rem_zero = np.where(data[:,1] == 0)
skim_out = np.delete(data,rem_zero,axis=0)
skim_out_interp = np.interp(data[:,0],skim_out[:,0],skim_out[:,1])
skim_out_interp_smooth = smooth(skim_out_interp,args.nw)
np.savetxt(args.input+args.output,np.column_stack((data[:,0],skim_out_interp_smooth)))
``` |
{
"source": "jht0664/Utility_python_gromacs",
"score": 2
} |
#### File: Utility_python_gromacs/python/conv_align_w_dist.py
```python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Convolution alignment for 1d mass fraction and total mass profiles')
## args
parser.add_argument('-imf', '--in_massf', default='traj.massf', nargs='?',
help='raw mass fraction profile (npy file format) and exclude .npy in argument')
parser.add_argument('-itm', '--in_tmass', default='traj.tmass', nargs='?',
help='raw totmal mass or mass profile (npy file format) and exclude .npy in argument')
parser.add_argument('-rm', '--remove', default='YES', nargs='?',
help='Remove multi-layers trajectory? (YES/any)')
parser.add_argument('-half', '--half', default='YES', nargs='?',
help='calculate domain size and save profiles of only last half or all? (YES/any)')
parser.add_argument('-o', '--output', default='.align', nargs='?',
help='output surfix for aligned profiles')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.3')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
## import modules
import hjung
from hjung import *
import numpy as np
import matplotlib
matplotlib.use('Agg') # avoid to show figures when running bash shell script
import matplotlib.pyplot as plt
from scipy import stats
import copy
# default for args
args.omassf = args.in_massf + args.output # save aligned mass fraction profiles
args.otmass = args.in_tmass + args.output # save aligned total mass profiles
args.oacf = args.in_massf + '.acf' # save autocorrelation function in 1D
args.odsize = args.in_massf + '.dsize' # save domain sizes
#args.omulti = args.in_massf + '.multi' # save iframes when multilayers happen
args.in_massf = args.in_massf + '.npy'
args.in_tmass = args.in_tmass + '.npy'
## timer
start_proc, start_prof = hjung.time.init()
## load data files
massfrac_1d_t = np.load(args.in_massf)
totalmass_1d_t = np.load(args.in_tmass)
if massfrac_1d_t.size != totalmass_1d_t.size:
raise ValueError("the size of two data files are different.")
nbin = len(massfrac_1d_t[0])
## calculate autocorrelation function
acf_1d_t_wrap = hjung.analyze.autocorr_1d_t(massfrac_1d_t, 'wrap')
slab_shift = int(len(acf_1d_t_wrap[0])/2.0)
np.savetxt(args.oacf, acf_1d_t_wrap,
header='spatial autocorr(slab_lag,i_frame) (%d,%d) for delta_number, Plot u ($1-%d):2:3'
%(len(acf_1d_t_wrap),len(acf_1d_t_wrap[0]),slab_shift), fmt='%f', comments='# ')
## convert to autocorrelation function to step-function and align density profiles
step_1d_t_wrap = np.where(acf_1d_t_wrap < 0.0, -1., 1.) # when we define domain size as zero points in acf
align_massfrac_1d_t, align_totalmass_1d_t = hjung.analyze.align_acf_w_data2(massfrac_1d_t, totalmass_1d_t, step_1d_t_wrap, 'wrap')
## multilayer check
def multilayer_in_step_fn(step_1d_t_wrap):
step_diff = np.diff(step_1d_t_wrap) # difference of neighbor element
# save iframes when multilayer occurs
multilayer_iframes = []
for i_frame in range(len(step_diff)):
step_diff_iframe = step_diff[i_frame]
step_n_up = (step_diff_iframe > 0.0).sum()
step_n_down = (step_diff_iframe < 0.0).sum()
if (step_n_up > 1.0) or (step_n_down > 1.0):
print("Probably it has two or more layers (multi-domains) at {}. We remove them in profiles!".format(i_frame))
multilayer_iframes.append(i_frame)
multilayer_iframes = np.array(multilayer_iframes,dtype=np.int)
# determine interface
step_up = np.argmax(step_diff,axis=1)
step_down = np.argmin(step_diff,axis=1)
return step_up, step_down, multilayer_iframes
def remove_data(step_up, step_down, multilayer_iframes, ask_half):
# remove all of multilayers
step_up = np.delete(step_up,multilayer_iframes,axis=0)
step_down = np.delete(step_down,multilayer_iframes,axis=0)
# remove first half trajectories
if 'YES' in ask_half:
remove_range = np.arange(len(step_up)/2)
step_up = np.delete(step_up,remove_range,axis=0)
step_down = np.delete(step_down,remove_range,axis=0)
return step_up, step_down
def print_domain_size(step_up, step_down, text1):
domain_size = step_down - step_up
domain_size_avg = np.average(domain_size)
domain_size_std = np.std(domain_size)
print("domain size {} (avg,std) = {} +- {}".format(text1, domain_size_avg, domain_size_std))
return domain_size
def main_domain_size_step_fn(acf_1d_t, criteria_massf, ask_half, text_print):
step_1d_t = np.where(acf_1d_t - criteria_massf < 0.0, -1., 1.) # when we define domain size as zero points in acf
step_up, step_down, multilayer_iframes = multilayer_in_step_fn(step_1d_t)
step_up, step_down = remove_data(step_up, step_down, multilayer_iframes, ask_half)
domain_size = print_domain_size(step_up, step_down, text_print)
return domain_size, multilayer_iframes
domain_size, multilayer_iframes = main_domain_size_step_fn(acf_1d_t_wrap, 0.2, args.half, "(20%)")
domain_size, multilayer_iframes = main_domain_size_step_fn(acf_1d_t_wrap, 0.8, args.half, "(80%)")
domain_size, multilayer_iframes = main_domain_size_step_fn(acf_1d_t_wrap, 0.5, args.half, "(50%)")
## relationship between domain size (50%), massf, and tmass
# however, when I checked the result, no correlation because of Monte Carlo simulation.
# (only average is meaningful and one movement does not affect rest of part of system, i.e. environment)
center = int(len(align_massfrac_1d_t[0])/2 - 1)
massf_center = copy.copy(align_massfrac_1d_t[:,center])
tmass_center = copy.copy(align_totalmass_1d_t[:,center])
massf_center, tmass_center = remove_data(massf_center,tmass_center,multilayer_iframes,args.half)
## domain size - massf (not necessary jobs)
#plt.figure()
#dm_s, dm_i, dm_r, dm_p, dm_err = stats.linregress(domain_size, massf_center)
#print("r-squared (domain-massf) = {}".format(dm_r**2.))
#plt.plot(domain_size, massf_center, 'o', label='data')
#plt.plot(domain_size, dm_i + dm_s*domain_size, 'r', label='fit')
#plt.legend()
#plt.savefig(args.odsize+'.dm.png')
## domain size - tmass
#plt.figure()
#dm_s, dm_i, dm_r, dm_p, dm_err = stats.linregress(domain_size, tmass_center)
#print("r-squared (domain-tmass) = {}".format(dm_r**2.))
#plt.plot(domain_size, tmass_center, 'o', label='data')
#plt.plot(domain_size, dm_i + dm_s*domain_size, 'r', label='fit')
#plt.legend()
#plt.savefig(args.odsize+'.dt.png')
## tmass - massf
#plt.figure()
#dm_s, dm_i, dm_r, dm_p, dm_err = stats.linregress(tmass_center, massf_center)
#print("r-squared (tmass-massf) = {}".format(dm_r**2.))
#plt.plot(tmass_center, massf_center, 'o', label='data')
#plt.plot(tmass_center, dm_i + dm_s*tmass_center, 'r', label='fit')
#plt.legend()
#plt.savefig(args.odsize+'.tm.png')
## save array stacks for output
domainsize_massf_tmass = np.column_stack((domain_size, massf_center, tmass_center))
## remove all of multilayers
domain_size, multilayer_iframes = main_domain_size_step_fn(acf_1d_t_wrap, 0., args.half, "(0%)")
if 'YES' in args.remove:
align_massfrac_1d_t = np.delete(align_massfrac_1d_t,multilayer_iframes,axis=0)
align_totalmass_1d_t = np.delete(align_totalmass_1d_t,multilayer_iframes,axis=0)
## remove first half trajectories
if 'YES' in args.half:
remove_range = np.arange(len(align_massfrac_1d_t)/2)
align_massfrac_1d_t = np.delete(align_massfrac_1d_t,remove_range,axis=0)
align_totalmass_1d_t = np.delete(align_totalmass_1d_t,remove_range,axis=0)
## write
np.savetxt(args.omassf, align_massfrac_1d_t,
header='%d, %d, aligned massf fraction by ACF and molecules in nbins' \
%(len(align_massfrac_1d_t),nbin), fmt='%f', comments='# ')
#np.save(args.omassf, align_massfrac_1d_t)
np.savetxt(args.otmass, align_totalmass_1d_t,
header='%d, %d, aligned (total or selected) mass by ACF and molecules in nbins' \
%(len(align_totalmass_1d_t),nbin), fmt='%f', comments='# ')
#np.save(args.otmass, align_totalmass_1d_t)
np.savetxt(args.odsize, domainsize_massf_tmass,
header='domain size, mass fraction and total mass in center of domain', fmt='%f', comments='# ')
#np.save(args.odsize, domainsize_massf_tmass)
#np.savetxt(args.omulti, multilayer_iframes,
# header='iframes when multilayers occurs', fmt='%d \n', comments='# ')
## timer
hjung.time.end_print(start_proc, start_prof)
```
#### File: Utility_python_gromacs/python/fit_massf.py
```python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='fitting density profile with tanh and erf function')
## args
parser.add_argument('-i', '--input', default='traj.massf.align.avg', nargs='?',
help='mass fraction profile (npy file format, exclude .npy)')
parser.add_argument('-g', '--guess', default='CENTER', nargs='?',
help='initial guess in center value or highest values (CENTER/any)')
parser.add_argument('-symm', '--symmetry', default='YES', nargs='?',
help='Use symmetry or no symmetry of coexistent mole fractions (YES/any)')
parser.add_argument('-show', '--show', default='YES', nargs='?',
help='Save plotting (YES/any)')
parser.add_argument('-o', '--output', default='.fit', nargs='?',
help='output surfix for fitting result')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
## import modules
import hjung
from hjung import *
import numpy as np
from scipy.special import erf
from scipy.optimize import curve_fit
import matplotlib
matplotlib.use('Agg') # avoid to show figures when running bash shell script
import matplotlib.pyplot as plt
# default for args
args.input = args.input + '.npy'
args.output = args.input + args.output
args.output_png = args.output + '.png'
## timer
start_proc, start_prof = hjung.time.init()
## load data files
massfrac_1d = np.load(args.input)
massfrac_1d = np.transpose(massfrac_1d)
massfrac_1d_avg = massfrac_1d[0]
massfrac_1d_std = massfrac_1d[1]
#print(massfrac_1d_avg)
#print(massfrac_1d_std)
curve_fit_std_off = False
if len(np.nonzero(massfrac_1d_std)) != len(massfrac_1d_std):
print("mass fraction std elements have zeros. Turned off curve_fit using std.")
curve_fit_std_off = True
nbin = len(massfrac_1d_avg)
## fitting functional form
# wr: mole fraction in A-rich phase
# wp: mole fraction in A-poor phase
# b: center of A-rich phase
# 2c: half-width of A-rich phase
# 2lamda: half-width of interface
def tanh_symm(x, wr, b, c, lamda):
return 1.0-wr+0.50*(2.0*wr-1.0)*(np.tanh((x-b+c)/lamda)-np.tanh((x-b-c)/lamda))
def erf_symm(x, wr, b, c, lamda):
return 1.0-wr+0.50*(2.0*wr-1.0)*(erf((x-b+c)/lamda)-erf((x-b-c)/lamda))
def tanh_nosymm(x, wr, wp, b, c, lamda):
return wp+0.50*(wr-wp)*(np.tanh((x-b+c)/lamda)-np.tanh((x-b-c)/lamda))
def erf_nosymm(x, wr, wp, b, c, lamda):
return wp+0.50*(wr-wp)*(erf((x-b+c)/lamda)-erf((x-b-c)/lamda))
## initial guess
if 'CENTER' in args.guess:
b = int(nbin/2 - 1)
wr = massfrac_1d_avg[b]
wp = massfrac_1d_avg[0]
print("center wr (avg,std) = {} +- {}".format(wr,massfrac_1d_std[b]))
print("center wp (avg,std) = {} +- {}".format(wp,massfrac_1d_std[0]))
else:
b = np.argmax(massfrac_1d_avg)
wr = np.max(massfrac_1d_avg)
wp = np.min(massfrac_1d_avg)
c = int(nbin/4)
lamda = int(nbin/10)
## curve fit
x_data = np.linspace(1, nbin, num=nbin, endpoint=True)
if 'YES' in args.symmetry:
tanh_opt, tanh_cov = curve_fit(tanh_symm,x_data,massfrac_1d_avg,p0=[wr,b,c,lamda],sigma=massfrac_1d_std,bounds=(0,[1., nbin, nbin/2., nbin/2.]))
erf_opt, erf_cov = curve_fit(erf_symm,x_data,massfrac_1d_avg,p0=[wr,b,c,lamda],sigma=massfrac_1d_std,bounds=(0,[1., nbin, nbin/2., nbin/2.]))
else:
if curve_fit_std_off:
tanh_opt, tanh_cov = curve_fit(tanh_nosymm,x_data,massfrac_1d_avg,p0=[wr,wp,b,c,lamda],bounds=(0,[1., 1., nbin, nbin/2., nbin/2.]))
erf_opt, erf_cov = curve_fit(tanh_nosymm,x_data,massfrac_1d_avg,p0=[wr,wp,b,c,lamda],bounds=(0,[1., 1., nbin, nbin/2., nbin/2.]))
else:
tanh_opt, tanh_cov = curve_fit(tanh_nosymm,x_data,massfrac_1d_avg,p0=[wr,wp,b,c,lamda],sigma=massfrac_1d_std,bounds=(0,[1., 1., nbin, nbin/2., nbin/2.]))
erf_opt, erf_cov = curve_fit(tanh_nosymm,x_data,massfrac_1d_avg,p0=[wr,wp,b,c,lamda],sigma=massfrac_1d_std,bounds=(0,[1., 1., nbin, nbin/2., nbin/2.]))
## plotting
if 'YES' in args.show:
plt.plot(x_data, massfrac_1d_avg, 'b-', label='data')
if 'YES' in args.symmetry:
plt.plot(x_data, tanh_symm(x_data,*tanh_opt), 'r--',label='fit:tanh_symm')
plt.plot(x_data, erf_symm(x_data,*erf_opt), 'g--',label='fit:erf_symm')
else:
plt.plot(x_data, tanh_nosymm(x_data,*tanh_opt), 'r--',label='fit:tanh_nosymm')
plt.plot(x_data, erf_nosymm(x_data,*erf_opt), 'g--',label='fit:erf_nosymm')
plt.legend()
#plt.show()
plt.savefig(args.output_png)
## display all information
if 'YES' in args.symmetry:
print("tanh wr = {} +- {}".format(tanh_opt[0],tanh_cov[0][0]))
print("tanh b = {} +- {}".format(tanh_opt[1],tanh_cov[1][1]))
print("tanh c = {} +- {}".format(tanh_opt[2],tanh_cov[2][2]))
print("tanh lamda = {} +- {}".format(tanh_opt[3],tanh_cov[3][3]))
print("erf wr = {} +- {}".format(erf_opt[0],erf_cov[0][0]))
print("erf b = {} +- {}".format(erf_opt[1],erf_cov[1][1]))
print("erf c = {} +- {}".format(erf_opt[2],erf_cov[2][2]))
print("erf lamda = {} +- {}".format(erf_opt[3],erf_cov[3][3]))
else:
print("tanh wr = {} +- {}".format(tanh_opt[0],tanh_cov[0][0]))
print("tanh wp = {} +- {}".format(tanh_opt[1],tanh_cov[1][1]))
print("tanh b = {} +- {}".format(tanh_opt[2],tanh_cov[2][2]))
print("tanh c = {} +- {}".format(tanh_opt[3],tanh_cov[3][3]))
print("tanh lamda = {} +- {}".format(tanh_opt[4],tanh_cov[4][4]))
print("erf wr = {} +- {}".format(erf_opt[0],erf_cov[0][0]))
print("erf wp = {} +- {}".format(erf_opt[1],erf_cov[1][1]))
print("erf b = {} +- {}".format(erf_opt[2],erf_cov[2][2]))
print("erf c = {} +- {}".format(erf_opt[3],erf_cov[3][3]))
print("erf lamda = {} +- {}".format(erf_opt[4],erf_cov[4][4]))
## timer
hjung.time.end_print(start_proc, start_prof)
```
#### File: python/hjung/initial.py
```python
import numpy as np
# global variable random set
rndset = np.random.RandomState(1985)
# print process percentage
# input: istep is the current nstep
# nstep is the final nstep
# modn is mode number to print
# output: modn is mode number to print
# Example: modn = print_process(istep, tstep, modn)
def print_process(text, istep, nstep, modn):
#print('{0} {1} {2}'.format(istep, nstep, modn))
if istep%modn == 0:
print("... {0}/{1} th ({2:.0%}) for {3}...".format(istep,nstep,istep/nstep,text))
if (istep/modn)%10 == 0:
modn = modn*10
return modn
# set default value of tolerance of block average unless argparse does not support default
# input: nmon is # total monomers (or beads).
# ndens is number density of monomers (or beads)
# ratio is (x, y, z) ratio (= [int, int, int])
# output: initial box size array, [box_x, box_y, box_z]
# Example: lj_box = lj_box_init_w_ndens_ratio(nmon, ndens, [1, 1, 5])
def lj_box_init_w_ndens_ratio(nmon, ndens, ratio):
print("lj_box_init_w_ndens_ratio: ")
print(" total number of LJ particles = %s" %nmon)
boxl = (float(nmon)/ndens/np.prod(ratio))**(1.0/3.0)
box = np.multiply(ratio,boxl)
print(" box array = [{0:8.3f}, {1:8.3f}, {2:8.3f}]".format(box[0],box[1],box[2]))
return box
# get a pre-separated coordinate using lattice insertion
# To reduce cost to check overlapping, use cell list with 0 (empty), 1 (occupied A), and 2 (oocupied B).
# input: nmol_a is # A chins
# mon_a is # monomers of a A chain
# cell_length is the length of lattice (cubic) cell
# box is box dimension [box_x, box_y, box_z]
# frac_a is fraction of A in one of coexisting two phases
# seed is seed number for random number generator
# output: coordinates of beads ((x1,y1,z1),(x2,y2,z2),...)
# Example: coordinates = hjung.initial.insertion_lattice_sep(args.nmola, args.mona, args.nmolb, args.monb, cell_length, box, args.frac, args.seed)
def insertion_lattice_sep(nmol_a, mon_a, nmol_b, mon_b, guess_cell_length, box, frac_a, seed):
print("insertion_lattice_sep:")
# assign random set
global rndset
rndset = np.random.RandomState(seed)
if box.shape != (3,):
raise ValueError(" the input 'box' is not (3,) array!")
# check validity of lattice insertion method
cell = np.floor(np.divide(box,guess_cell_length)).astype(int)
print(" cell dimension = [{0}, {1}, {2}]".format(cell[0],cell[1],cell[2]))
cell_length = np.divide(box,cell.astype(float))
print(" cell length = [{0:8.3f}, {1:8.3f}, {2:8.3f}]".format(cell_length[0],cell_length[1],cell_length[2]))
ncell = np.prod(cell)
nmtot = nmol_a*mon_a + nmol_b*mon_b
nctot = nmol_a + nmol_b
if ncell < nmtot:
raise ValueError(" Not possible to use lattice insertion because #particles > #cells. Reduce cell_length.")
# pre-determine number of A and B chains in a phase
sep_maxa = int(nmol_a*frac_a)
print(" one phase has {0} of A chains".format(sep_maxa))
sep_maxb = int(nmol_b*(1.0-frac_a))
print(" also, the phase has {0} of B chains".format(sep_maxb))
# lattice insertion
cell_list = np.zeros(cell,dtype=int) # Cell list with 0 (empty), 1 (occupied A), and 2 (oocupied B).
modn = 10
icell_a, cell_list = insert_polymer_sep("monomers of success insertion A", 1, sep_maxa, nmol_a, mon_a, box, cell_list, modn)
icell_b, cell_list = insert_polymer_sep("monomers of success insertion B", 2, sep_maxb, nmol_b, mon_b, box, cell_list, modn)
# save coordinate
coordinates = np.zeros((nmtot, 3))
index_coord = 0
for itr_icell_a in icell_a:
coordinates[index_coord] = (itr_icell_a+0.50)*cell_length
index_coord += 1
for itr_icell_b in icell_b:
coordinates[index_coord] = (itr_icell_b+0.50)*cell_length
index_coord += 1
if index_coord != nmtot:
raise RuntimeError(" not match index of total #particles.")
return coordinates
# insert polymer chains of a component
# input: text_a is for print_process
# monomer_type is numbering of monomer type, int.
# sep_max is # monomers in a pre-separated phase
# nmol is # chains of the polymer
# mon is # monomers of the polymer
# box is box dimension [box_x, box_y, box_z]
# cell_list is the 3d array with information for empty or occupied by monomer_type
# output: coordinates of beads ((x1,y1,z1),(x2,y2,z2),...)
# cell_list is the 3d array with information for empty or occupied by monomer_type
# Example: coordinates, cell_list = insert_component("Trials of Insertion A", sep_maxa, nmol_a, box, cell_length, cell, cell_list, coordinates, 10, seed)
def insert_polymer_sep(text, monomer_type, sep_max, nmol, mon, box, cell_list, moden):
print("insert_component:")
polymer = np.zeros((nmol*mon,3),dtype=int)
sep_layer1 = np.array([1.0, 1.0, 0.50]) # for ratio of phases
sep_layer2 = np.array([0.0, 0.0, 0.50]) # for translation
imol = 0
curr_index = 0
while (imol < nmol):
# print process
moden = print_process(text,imol,nmol,moden)
generate_mol = False
while generate_mol is False:
# make pre-separated phase, left and right
if imol < sep_max:
monomer_seed = generate_seed_sep('lattice',sep_layer1,np.array([0.0, 0.0, 0.0]),cell_list.shape)
else:
monomer_seed = generate_seed_sep('lattice',sep_layer1,sep_layer2,cell_list.shape)
# make a cell list and check validity of generated a cell list
#monomer_seed = np.trunc(monomer_seed*box/cell_length)
if check_monomer(monomer_seed, cell_list):
cell_list = update_monomer(monomer_seed, monomer_type, cell_list)
else:
continue
# start growth of polymer
success, icell_new_chain, new_cell_list = insert_chain(monomer_seed, monomer_type, mon, cell_list) # 1 = monomer type A
if success is True:
# update cell list
cell_list = new_cell_list
# save the coordinate of the new chain
for itr_new_chain in icell_new_chain:
polymer[curr_index] = itr_new_chain
curr_index += 1
imol += 1
generate_mol = True
else:
# get a new monomer seed again
#print(" fail to grow. redo generate seed.")
continue
# check index
if curr_index != nmol*mon:
raise ValueError(" total # particles of the component is not the same with your setting and algorithm.")
return polymer, cell_list
# generator a monomer seed for growth of polymer
# input: sep_layer_ratio is separated layer ratio (all elements are <= 1),
# sep_layer_trans is separated layer translation (all elements are <= 1),
# max_val is maximum value (exclusive) of random sample for lattice mode, [max_x, max_y, max_z] for lattice mode
# output: monomer_seed is random position of [0,1) with separation presets
# Example: success, icell_new_chain, new_cell_list = insert_chain(iclist, 1, mon_a, cell_list, seed)
#################################################################################
# For example, ratio [1.0, 1.0, 0.50] and trans [0.0, 0.0, 0.50]
# no separation on x and y, but left 50% of box space is picked for the monomer
# no translation of phases on x and y, but the left 50% of box_z is translated by 0.50 amount of 50% length of box_z
# In other words, the monomer seed is picked in the space of right 50% of box_z.
###################################################################################
def generate_seed_sep(mode,sep_layer_ratio,sep_layer_trans,max_val):
#print("generate_seed_sep:")
global rndset
monomer_seed = np.zeros(3)
# get random monomer_seed depending on separation presets
monomer_seed = rndset.random_sample(3)
monomer_seed = monomer_seed * sep_layer_ratio
monomer_seed = monomer_seed + sep_layer_trans
if mode == 'lattice':
if max_val is None:
raise ValueError(" need max_val argument.")
monomer_seed = np.array(monomer_seed*max_val, dtype=int)
else:
raise ValueError(" wrong mode argument.")
return monomer_seed
# check validity of monomer
# input: monomer is a given position in cell_list
# cell_list is the 3d array with information for empty or occupied by monomer_type
# output: true if no overlapping. False if already occupied
# Example: success = check_monomer(monomer,cell_list)
def check_monomer(monomer,cell_list):
#print("check_seed:")
# check overlapping of monomer
if cell_list[monomer[0]][monomer[1]][monomer[2]] != 0:
# already occupied the monomer position
return False
# no overlaps
return True
# update monomer in cell_list
# input: monomer is a given position in cell_list
# monomer_type is type of monomer (int.)
# cell_list is the 3d array with information for empty or occupied by monomer_type
# output: cell_list is a updated cell_list
# Example: cell_list = update_monomer(monomer,monomer_type,cell_list)
def update_monomer(monomer,monomer_type,cell_list):
cell_list[monomer[0]][monomer[1]][monomer[2]] = monomer_type
return cell_list
# grow a chain
# input: monomer_seed is icell array [int, int, int] of first monomer of the chain
# monomer_type is numbering of monomer type, int.
# chain_length is degree of polymerization of the chain
# cell_list is the 3d array with information for empty or occupied by monomer_type
# output: cell_list (updated)
# new_chain is the icell array for all monomers of the chain
# True if success. Otherwise, False
# Example: success, icell_new_chain, new_cell_list = insert_chain(iclist, 1, mon_a, cell_list, seed)
def insert_chain(monomer_seed,monomer_type,chain_length,cell_list):
#print("insert_chain:")
global rndset
new_chain = np.zeros((chain_length,3),dtype=int)
new_chain[0] = monomer_seed
#print(" monomer seed = {0}".format(monomer_seed))
ibead = 1
curr_monomer = monomer_seed
max_trials = 1000*chain_length
ntrials = 0
while (ibead < chain_length):
# check # trials
ntrials = ntrials + 1
if ntrials > max_trials:
break
# determine direction
rnd_direction = rndset.random_sample(1)*6
if rnd_direction < 1.0:
direc = np.array([1, 0, 0],dtype=int)
elif rnd_direction < 2.0:
direc = np.array([-1, 0, 0],dtype=int)
elif rnd_direction < 3.0:
direc = np.array([0, 1, 0],dtype=int)
elif rnd_direction < 4.0:
direc = np.array([0, -1, 0],dtype=int)
elif rnd_direction < 5.0:
direc = np.array([0, 0, 1],dtype=int)
elif rnd_direction < 6.0:
direc = np.array([0, 0, -1],dtype=int)
# check overlapping
try_index = curr_monomer + direc
try_index = np.mod(try_index,cell_list.shape) # periodic boundary condition
if check_monomer(try_index,cell_list):
#print(' before {0}'.format(cell_list[try_index[0]][try_index[1]][try_index[2]]))
#print(' next monomer = {0}'.format(try_index))
cell_list = update_monomer(try_index,monomer_type,cell_list)
#print(' after {0}'.format(cell_list[try_index[0]][try_index[1]][try_index[2]]))
new_chain[ibead] = try_index
curr_monomer = try_index
ibead = ibead + 1
else:
continue
if ntrials > max_trials:
#print(" fails to grow.")
return False, None, None
else:
# check overlapping within itself
for imonomer in range(new_chain.shape[0]):
for jmonomer in range(imonomer+1,new_chain.shape[0]):
if np.all(np.equal(new_chain[imonomer],new_chain[jmonomer])):
print(new_chain[imonomer])
print(new_chain[jmonomer])
raise RuntimeError(" algorithm problem due to overlapping!")
#print(" success!")
return True, new_chain, cell_list
```
#### File: python/simple/surfacetensor.py
```python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Calculate instantaneous surface tension from pressure tensor')
# args
parser.add_argument('-box', '--box', type=float, nargs=3,
help='x, y, z (nm) of box dimension in NVT')
parser.add_argument('-s', '--structure', nargs='?',
help='structure file with box dimension at the last')
parser.add_argument('-i', '--input', default='energy.xvg', nargs='?',
help='input file to read pressure tensors [bar], (time, Pxx, Pyy, Pzz)')
parser.add_argument('-o', '--output', default='surfacetension.xvg', nargs='?',
help='output file')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
# read args
args = parser.parse_args()
# default for args
args.input = args.input if args.input is not None else 'energy.xvg'
args.output = args.output if args.output is not None else 'surfacetension.xvg'
# assign the longest box length
if args.structure is not None and args.box is None:
import subprocess
# byte string to string
lastline = subprocess.check_output(['tail','-1',args.structure]).decode()
x, y, z = lastline.split()
x = float(x)
y = float(y)
z = float(z)
elif args.structure is None and args.box is not None:
x = args.box[1]
y = args.box[2]
z = args.box[3]
else:
print("Box dimension is required!")
exit()
max_box_length = max(x, y, z)
# Check arguments for log
print("the longest box_length = ", max_box_length)
print("input filename = ", args.input)
print("output filename = ", args.output)
# Surface tenstion Calculation using pressure tensor
tensor = []
try:
xvg = open(args.input, 'r')
except IOError:
print("Problem with opening ",args.input)
exit()
# surfacetension function
def surfacetension(z_length, Pz, Px, Py):
return 0.5*z_length*(float(Pz)-0.5*(float(Px)+float(Py)))/10.0
# calculation
for line in xvg:
line = line.strip()
if not line or line.startswith('#'): # line is blank or comment line
continue
time, xx, yy, zz = line.split()
# which axis is the longest
# pressure tensor and box length units are bar and nm in gromacs
# convert [bar nm] to [mN / m]
if max_box_length == x:
tensor.append(surfacetension(max_box_length,xx,yy,zz))
elif max_box_length == y:
tensor.append(surfacetension(max_box_length,yy,xx,zz))
elif max_box_length == z:
tensor.append(surfacetension(max_box_length,zz,yy,xx))
else:
print("max_box_length does not match box dimension")
exit()
xvg.close()
# save data
with open(args.output, 'w') as output_file:
output_file.write("# instantaneous surface tension [mN/m]")
for item in tensor:
output_file.write("{}\n".format(item))
# result
print("number of values: ", len(tensor))
``` |
{
"source": "jhtimmins/test-ipinfo",
"score": 3
} |
#### File: ipinfo/cache/interface.py
```python
import abc
class CacheInterface(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __contains__(self, key):
pass
@abc.abstractmethod
def __setitem__(self, key, value):
pass
@abc.abstractmethod
def __getitem__(self, key):
pass
``` |
{
"source": "jhtitor/secp256k1prp-py",
"score": 2
} |
#### File: jhtitor/secp256k1prp-py/setup.py
```python
import errno
import os
import os.path
import shutil
import subprocess
import tarfile
from distutils import log
from distutils.command.build_clib import build_clib as _build_clib
from distutils.command.build_ext import build_ext as _build_ext
from distutils.errors import DistutilsError
from io import BytesIO
import sys
from setuptools import Distribution as _Distribution, setup, find_packages, __version__ as setuptools_version
from setuptools.command.develop import develop as _develop
from setuptools.command.egg_info import egg_info as _egg_info
from setuptools.command.sdist import sdist as _sdist
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
except ImportError:
_bdist_wheel = None
pass
try:
from urllib2 import urlopen, URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import URLError
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from setup_support import absolute, build_flags, has_system_lib
# Version of libsecp256k1 to download if none exists in the `libsecp256k1`
# directory
LIB_TARBALL_URL = "https://github.com/sipa/secp256k1-zkp/archive/35932bb24e83257b737a8ab4da0816972f4c252a.tar.gz"
# We require setuptools >= 3.3
if [int(i) for i in setuptools_version.split('.')] < [3, 3]:
raise SystemExit(
"Your setuptools version ({}) is too old to correctly install this "
"package. Please upgrade to a newer version (>= 3.3).".format(setuptools_version)
)
# Ensure pkg-config is available
#try:
# subprocess.check_call(['pkg-config', '--version'])
#except OSError:
# raise SystemExit(
# "'pkg-config' is required to install this package. "
# "Please see the README for details."
# )
def download_library(command):
if command.dry_run:
return
libdir = absolute("libsecp256k1")
if os.path.exists(os.path.join(libdir, "autogen.sh")):
# Library already downloaded
return
if not os.path.exists(libdir):
command.announce("downloading libsecp256k1 source code", level=log.INFO)
try:
r = urlopen(LIB_TARBALL_URL)
if r.getcode() == 200:
content = BytesIO(r.read())
content.seek(0)
with tarfile.open(fileobj=content) as tf:
dirname = tf.getnames()[0].partition('/')[0]
tf.extractall()
shutil.move(dirname, libdir)
else:
raise SystemExit(
"Unable to download secp256k1 library: HTTP-Status: %d",
r.getcode()
)
except URLError as ex:
raise SystemExit("Unable to download secp256k1 library: %s",
ex.message)
class egg_info(_egg_info):
def run(self):
# Ensure library has been downloaded (sdist might have been skipped)
download_library(self)
_egg_info.run(self)
class sdist(_sdist):
def run(self):
download_library(self)
_sdist.run(self)
if _bdist_wheel:
class bdist_wheel(_bdist_wheel):
def run(self):
download_library(self)
_bdist_wheel.run(self)
else:
bdist_wheel = None
class Distribution(_Distribution):
def has_c_libraries(self):
return not has_system_lib()
class build_clib(_build_clib):
def initialize_options(self):
_build_clib.initialize_options(self)
self.build_flags = None
def finalize_options(self):
_build_clib.finalize_options(self)
if self.build_flags is None:
self.build_flags = {
'include_dirs': [],
'library_dirs': [],
'define': [],
}
self.build_flags["include_dirs"] = [
'libsecp256k1/include',
]
def get_source_files(self):
# Ensure library has been downloaded (sdist might have been skipped)
download_library(self)
return [
absolute(os.path.join(root, filename))
for root, _, filenames in os.walk(absolute("libsecp256k1"))
for filename in filenames
]
#def build_libraries(self, libraries):
# raise Exception("build_libraries")
def check_library_list(self, libraries):
raise Exception("check_library_list")
#def get_library_names(self):
# return build_flags('libsecp256k1', 'l', os.path.abspath(self.build_temp))
def run(self):
self.libraries = [
( 'secp256k1prp', {'sources': [
'libsecp256k1/src/secp256k1.c'
], 'include_dirs': [ 'libsecp256k1/src', 'libsecp256k1' ]} )
]
self.define = [
('USE_NUM_NONE', 1),
('USE_FIELD_10X26', 1),
# ('USE_FIELD_5X52', 1),
('USE_FIELD_INV_BUILTIN', 1),
# ('USE_FIELD_INV_NU', 1),
# ('USE_SCALAR_4X64', 1),
('USE_SCALAR_8X32', 1),
('USE_SCALAR_INV_BUILTIN', 1),
# ('USE_SCALAR_INV_NUM', )
('ENABLE_MODULE_ECDH', 1),
('ENABLE_MODULE_SCHNORR', 1),
('ENABLE_MODULE_RECOVERY', 1),
('ENABLE_MODULE_RANGEPROOF', 1),
]
return _build_clib.run(self)
# Note: bypass UNIX-only build path
if has_system_lib():
log.info("Using system library")
return
build_temp = os.path.abspath(self.build_temp)
try:
os.makedirs(build_temp)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.exists(absolute("libsecp256k1/configure")):
# configure script hasn't been generated yet
autogen = absolute("libsecp256k1/autogen.sh")
os.chmod(absolute(autogen), 0o755)
subprocess.check_call(
[autogen],
cwd=absolute("libsecp256k1"),
)
for filename in [
"libsecp256k1/configure",
"libsecp256k1/build-aux/compile",
"libsecp256k1/build-aux/config.guess",
"libsecp256k1/build-aux/config.sub",
"libsecp256k1/build-aux/depcomp",
"libsecp256k1/build-aux/install-sh",
"libsecp256k1/build-aux/missing",
"libsecp256k1/build-aux/test-driver",
]:
try:
os.chmod(absolute(filename), 0o755)
except OSError as e:
# some of these files might not exist depending on autoconf version
if e.errno != errno.ENOENT:
# If the error isn't "No such file or directory" something
# else is wrong and we want to know about it
raise
cmd = [
absolute("libsecp256k1/configure"),
"--disable-shared",
"--enable-static",
"--disable-dependency-tracking",
"--with-pic",
"--enable-module-recovery",
"--prefix",
os.path.abspath(self.build_clib)
]
if os.environ.get('SECP_BUNDLED_WITH_BIGNUM'):
log.info("Building with bignum support (requires libgmp)")
cmd.extend(["--with-bignum=gmp"])
else:
cmd.extend(["--without-bignum"])
if os.environ.get('SECP_BUNDLED_EXPERIMENTAL') or True:
log.info("Building experimental")
cmd.extend([
"--enable-experimental",
"--enable-module-ecdh",
"--enable-module-schnorr",
"--enable-module-rangeproof",
])
log.debug("Running configure: {}".format(" ".join(cmd)))
subprocess.check_call(
cmd,
cwd=build_temp,
)
subprocess.check_call(["make"], cwd=build_temp)
subprocess.check_call(["make", "install"], cwd=build_temp)
self.build_flags['include_dirs'].extend(build_flags('libsecp256k1', 'I', build_temp))
self.build_flags['library_dirs'].extend(build_flags('libsecp256k1', 'L', build_temp))
if not has_system_lib():
self.build_flags['define'].append(('CFFI_ENABLE_RECOVERY', None))
else:
pass
class build_ext(_build_ext):
def run(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command("build_clib")
self.include_dirs.append(
os.path.join(build_clib.build_clib, "include"),
)
self.include_dirs.extend(build_clib.build_flags['include_dirs'])
self.library_dirs.append(
os.path.join(build_clib.build_clib, "lib"),
)
self.library_dirs.extend(build_clib.build_flags['library_dirs'])
self.define = build_clib.build_flags['define']
return _build_ext.run(self)
class develop(_develop):
def run(self):
if not has_system_lib():
raise DistutilsError(
"This library is not usable in 'develop' mode when using the "
"bundled libsecp256k1. See README for details.")
_develop.run(self)
setup(
name="secp256k1prp",
version="0.13.3",
description='FFI bindings to libsecp256k1-zkp',
url='https://github.com/jhtitor/secp256k1prp-py',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
setup_requires=['cffi>=1.3.0', 'pytest-runner==2.6.2'],
install_requires=['cffi>=1.3.0'],
tests_require=['pytest==2.8.7'],
packages=find_packages(exclude=('_cffi_build', '_cffi_build.*', 'libsecp256k1')),
ext_package="secp256k1prp",
cffi_modules=[
"_cffi_build/build.py:ffi"
],
cmdclass={
'build_clib': build_clib,
'build_ext': build_ext,
'develop': develop,
'egg_info': egg_info,
'sdist': sdist,
'bdist_wheel': bdist_wheel
},
distclass=Distribution,
zip_safe=False,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries",
"Topic :: Security :: Cryptography"
]
)
``` |
{
"source": "jhtschultz/acme",
"score": 2
} |
#### File: jax/impala/learning.py
```python
import time
from typing import Callable, Dict, Iterator, List, NamedTuple, Tuple, Optional, Sequence
import acme
from acme import specs
from acme.jax import losses
from acme.jax import networks
from acme.jax import utils
from acme.utils import counting
from acme.utils import loggers
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import reverb
_PMAP_AXIS_NAME = 'data'
class TrainingState(NamedTuple):
"""Training state consists of network parameters and optimiser state."""
params: hk.Params
opt_state: optax.OptState
class IMPALALearner(acme.Learner, acme.Saveable):
"""Learner for an importanced-weighted advantage actor-critic."""
def __init__(
self,
obs_spec: specs.Array,
unroll_fn: networks.PolicyValueRNN,
initial_state_fn: Callable[[], hk.LSTMState],
iterator: Iterator[reverb.ReplaySample],
optimizer: optax.GradientTransformation,
rng: hk.PRNGSequence,
discount: float = 0.99,
entropy_cost: float = 0.,
baseline_cost: float = 1.,
max_abs_reward: float = np.inf,
counter: counting.Counter = None,
logger: loggers.Logger = None,
devices: Optional[Sequence[jax.xla.Device]] = None,
prefetch_size: int = 2,
num_prefetch_threads: Optional[int] = None,
):
self._devices = devices or jax.local_devices()
# Transform into pure functions.
unroll_fn = hk.without_apply_rng(hk.transform(unroll_fn, apply_rng=True))
initial_state_fn = hk.without_apply_rng(
hk.transform(initial_state_fn, apply_rng=True))
loss_fn = losses.impala_loss(
unroll_fn,
discount=discount,
max_abs_reward=max_abs_reward,
baseline_cost=baseline_cost,
entropy_cost=entropy_cost)
@jax.jit
def sgd_step(
state: TrainingState, sample: reverb.ReplaySample
) -> Tuple[TrainingState, Dict[str, jnp.ndarray]]:
"""Computes an SGD step, returning new state and metrics for logging."""
# Compute gradients.
grad_fn = jax.value_and_grad(loss_fn)
loss_value, gradients = grad_fn(state.params, sample)
# Average gradients over pmap replicas before optimizer update.
gradients = jax.lax.pmean(gradients, _PMAP_AXIS_NAME)
# Apply updates.
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
metrics = {
'loss': loss_value,
}
new_state = TrainingState(params=new_params, opt_state=new_opt_state)
return new_state, metrics
def make_initial_state(key: jnp.ndarray) -> TrainingState:
"""Initialises the training state (parameters and optimiser state)."""
dummy_obs = utils.zeros_like(obs_spec)
dummy_obs = utils.add_batch_dim(dummy_obs) # Dummy 'sequence' dim.
initial_state = initial_state_fn.apply(None)
initial_params = unroll_fn.init(key, dummy_obs, initial_state)
initial_opt_state = optimizer.init(initial_params)
return TrainingState(params=initial_params, opt_state=initial_opt_state)
# Initialise training state (parameters and optimiser state).
state = make_initial_state(next(rng))
self._state = utils.replicate_in_all_devices(state, self._devices)
if num_prefetch_threads is None:
num_prefetch_threads = len(self._devices)
self._prefetched_iterator = utils.sharded_prefetch(
iterator,
buffer_size=prefetch_size,
devices=devices,
num_threads=num_prefetch_threads,
)
self._sgd_step = jax.pmap(
sgd_step, axis_name=_PMAP_AXIS_NAME, devices=self._devices)
# Set up logging/counting.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
def step(self):
"""Does a step of SGD and logs the results."""
samples = next(self._prefetched_iterator)
# Do a batch of SGD.
start = time.time()
self._state, results = self._sgd_step(self._state, samples)
# Take results from first replica.
results = utils.first_replica(results)
# Update our counts and record it.
counts = self._counter.increment(steps=1, time_elapsed=time.time() - start)
# Snapshot and attempt to write logs.
self._logger.write({**results, **counts})
def get_variables(self, names: Sequence[str]) -> List[hk.Params]:
# Return first replica of parameters.
return [utils.first_replica(self._state.params)]
def save(self) -> TrainingState:
# Serialize only the first replica of parameters and optimizer state.
return jax.tree_map(utils.first_replica, self._state)
def restore(self, state: TrainingState):
self._state = utils.replicate_in_all_devices(state, self._devices)
```
#### File: acme/tf/variable_utils.py
```python
from concurrent import futures
from typing import Mapping, Optional, Sequence
from acme import core
import tensorflow as tf
import tree
class VariableClient(core.VariableClient):
"""A variable client for updating variables from a remote source."""
def __init__(self,
client: core.VariableSource,
variables: Mapping[str, Sequence[tf.Variable]],
update_period: int = 1):
self._keys = list(variables.keys())
self._variables = tree.flatten(list(variables.values()))
self._call_counter = 0
self._update_period = update_period
self._client = client
self._request = lambda: client.get_variables(self._keys)
# Create a single background thread to fetch variables without necessarily
# blocking the actor.
self._executor = futures.ThreadPoolExecutor(max_workers=1)
self._async_request = lambda: self._executor.submit(self._request)
# Initialize this client's future to None to indicate to the `update()`
# method that there is no pending/running request.
self._future: Optional[futures.Future] = None
def update(self, wait: bool = False):
"""Periodically updates the variables with the latest copy from the source.
This stateful update method keeps track of the number of calls to it and,
every `update_period` call, sends a request to its server to retrieve the
latest variables.
If wait is True, a blocking request is executed. Any active request will be
cancelled.
If wait is False, this method makes an asynchronous request for variables
and returns. Unless the request is immediately fulfilled, the variables are
only copied _within a subsequent call to_ `update()`, whenever the request
is fulfilled by the `VariableSource`. If there is an existing fulfilled
request when this method is called, the resulting variables are immediately
copied.
Args:
wait: if True, executes blocking update.
"""
# Track the number of calls (we only update periodically).
if self._call_counter < self._update_period:
self._call_counter += 1
period_reached: bool = self._call_counter >= self._update_period
if period_reached and wait:
# Cancel any active request.
self._future: Optional[futures.Future] = None
self.update_and_wait()
self._call_counter = 0
return
if period_reached and self._future is None:
# The update period has been reached and no request has been sent yet, so
# making an asynchronous request now.
self._future = self._async_request()
self._call_counter = 0
if self._future is not None and self._future.done():
# The active request is done so copy the result and remove the future.
self._copy(self._future.result())
self._future: Optional[futures.Future] = None
else:
# There is either a pending/running request or we're between update
# periods, so just carry on.
return
def update_and_wait(self):
"""Immediately update and block until we get the result."""
self._copy(self._request())
def _copy(self, new_variables: Sequence[Sequence[tf.Variable]]):
"""Copies the new variables to the old ones."""
new_variables = tree.flatten(new_variables)
if len(self._variables) != len(new_variables):
raise ValueError('Length mismatch between old variables and new.')
for new, old in zip(new_variables, self._variables):
old.assign(new)
``` |
{
"source": "jhtut/acidipy",
"score": 2
} |
#### File: acidipy/acidipy/session.py
```python
import json
import string
from pygics import Rest
from .static import *
class Session(Rest):
def __init__(self, ip, usr, pwd, **kargs):
Rest.__init__(self, 'https://' + ip, usr, pwd, **kargs)
self.ip = ip
def __login__(self, session):
try: resp = session.post(self.url + '/api/aaaLogin.json',
json={'aaaUser': {'attributes': {'name': self.usr, 'pwd': self.pwd}}},
verify=False, timeout=2.0)
except: raise ExceptAcidipySession(self)
if resp.status_code == 200:
self.cookie = resp.cookies['APIC-cookie']
if self.debug: print('APIC Session Connect to %s with %s' % (self.url, self.cookie))
return 'APIC-cookie=%s' % self.cookie
raise ExceptAcidipySession(self)
def __refresh__(self, session):
try: resp = session.get(self.url + '/api/aaaRefresh.json',
cookies=self.__cookie__(),
verify=False, timeout=2.0)
except: raise ExceptAcidipySession(self)
if resp.status_code == 200:
self.cookie = resp.cookies['APIC-cookie']
if self.debug: print('APIC Session Refresh to %s with %s' % (self.url, self.cookie))
return 'APIC-cookie=%s' % self.cookie
raise ExceptAcidipySession(self)
def __cookie__(self): return {'Set-Cookie' : self.token}
def get(self, url):
for _ in range(0, self.retry):
resp = Rest.get(self, url)
if resp.status_code == 200:
try: return resp.json()['imdata']
except Exception as e:
try: return json.loads(''.join(x for x in resp.text if x in string.printable))['imdata']
except: raise ExceptAcidipyResponse(self, resp.status_code, str(e))
elif resp.status_code == 403: self.refresh()
else:
try:
error = resp.json()['imdata'][0]['error']['attributes']
code = error['code']; text = error['text']
except: raise ExceptAcidipyResponse(self, resp.status_code, url)
else: raise ExceptAcidipyResponse(self, code, text)
raise ExceptAcidipySession(self)
def post(self, url, data):
for _ in range(0, self.retry):
resp = Rest.post(self, url, data)
if resp.status_code == 200: return True
elif resp.status_code == 403: self.refresh()
else:
try:
error = resp.json()['imdata'][0]['error']['attributes']
code = error['code']; text = error['text']
except: raise ExceptAcidipyResponse(self, resp.status_code, url)
else: raise ExceptAcidipyResponse(self, code, text)
raise ExceptAcidipySession(self)
def put(self, url, data):
for _ in range(0, self.retry):
resp = Rest.put(self, url, data)
if resp.status_code == 200: return True
elif resp.status_code == 403: self.refresh()
else:
try:
error = resp.json()['imdata'][0]['error']['attributes']
code = error['code']; text = error['text']
except: raise ExceptAcidipyResponse(self, resp.status_code, url)
else: raise ExceptAcidipyResponse(self, code, text)
raise ExceptAcidipySession(self)
def delete(self, url):
for _ in range(0, self.retry):
resp = Rest.delete(self, url)
if resp.status_code == 200: return True
elif resp.status_code == 403: self.refresh()
else:
try:
error = resp.json()['imdata'][0]['error']['attributes']
code = error['code']; text = error['text']
except: raise ExceptAcidipyResponse(self, resp.status_code, url)
else: raise ExceptAcidipyResponse(self, code, text)
raise ExceptAcidipySession(self)
``` |
{
"source": "jhu-alistair/image_utilities",
"score": 3
} |
#### File: jhu-alistair/image_utilities/image_metadata_file_maker.py
```python
import os
from pathlib import Path
import imghdr
import sys
class ImageMetadataFileMaker:
def __init__(self, directory_name, params):
self.folder_path = Path.home() / directory_name
self.image_names = set() # empty set to hold unique image file names without extensions
# self.yaml_fields = ("Event", "People")
self.add_lines = params
try:
# only proceed if path is valid
assert Path.exists(self.folder_path)
# create a set of the unique image file base names
for img in os.listdir(self.folder_path):
img_path = os.path.join(self.folder_path, img)
if os.path.isfile(img_path):
if (imghdr.what(img_path) != None): # has a valid image extension
# build nested dictorary of image file names and attributes
img_name = os.path.splitext(img)[0]
# build a set of unique image names
self.image_names.add(img_name)
except AssertionError:
print ("***", self.__class__.__name__, "encountered an error ***")
print ("Invalid path for the image folder:", self.folder_path)
print()
# raise
# iterator for list of image file names with extensions
def image_file_names(self):
for img in sorted(self.image_names):
yield img
def make_md_file(self):
for fname in self.image_file_names():
full_name = fname + ".txt"
new_file_path = os.path.join(self.folder_path, full_name)
try:
with open(new_file_path,'x') as new_file:
new_file.write("ID: {0}\n".format(fname))
for add_fld in self.add_lines:
new_file.write("{0}\n".format(add_fld))
print("Writing {0}".format(full_name))
except FileExistsError:
print ("Class", self.__class__.__name__ , "Error message: file named", fname, "already exists. Skipping file creation.")
except:
print ("Error in class", self.__class__.__name__ , sys.exc_info()[0])
```
#### File: jhu-alistair/image_utilities/image_renamer.py
```python
import os
from pathlib import Path
import imghdr
import datetime
import re
class ImageRenamer:
def __init__(self, directory_name, new_prefix_code = None):
self.image_files_attrs = {} # nested dictionary of image file names and their attributes
self.image_names = set() # image file names without extensions
self.folder_path = Path.home() / directory_name
if new_prefix_code:
self.new_prefix = new_prefix_code + "_" + datetime.datetime.now().strftime("%Y-%m-%d")
else:
self.new_prefix = datetime.datetime.now().strftime("%Y-%m-%d")
self.name_map = {} # maps current image file names (without extension) to new names
try:
# only proceed if path is valid
assert Path.exists(self.folder_path)
# create a set of the unique image file base names
# and a set of the full image file names
for img in os.listdir(self.folder_path):
img_path = os.path.join(self.folder_path, img)
if os.path.isfile(img_path):
if (imghdr.what(img_path) != None): # has a valid image extension
# build nested dictorary of image file names and attributes
pref, suf = os.path.splitext(img)
typ = imghdr.what(img_path)
self.image_files_attrs[img] = {'prefix': pref, 'suffix': suf, 'img_type': typ}
# build a set of unique image names
self.image_names.add(pref)
except AssertionError:
print ("***", self.__class__.__name__, "encountered an error ***")
print ("Invalid path for the image folder:", self.folder_path)
print()
# raise
# iterator for list of image file names with extensions
def image_files(self):
for f in sorted(self.image_files_attrs.keys()):
yield f
# replace image file names with date stamp plus counter
def rename_image_files(self):
ctr = self.set_ctr()
for img in sorted(self.image_names):
if not self.is_a_datestamp(img):
ctr += 1
self.name_map[img] = self.new_prefix + "_" + f"{ctr:04}"
if self.name_map:
for old_file_name in self.image_files():
old_prefix = self.image_files_attrs[old_file_name]['prefix']
# since we skipped over files names that are already dates stamps do a check
if self.name_map.get(old_prefix, False):
new_file_name = self.name_map[old_prefix] + self.image_files_attrs[old_file_name]['suffix']
print(old_file_name + " => " + new_file_name)
old_file_path = os.path.join(self.folder_path, old_file_name)
new_file_path = os.path.join(self.folder_path, new_file_name)
os.rename(old_file_path, new_file_path)
else: print( self.__class__.__name__ , " -- There are no image files that do no already have names based on date stamp in", self.folder_path)
# check that string contains a valid date in iso format
def is_a_datestamp(self, val):
match_obj = re.search(r"(\d{4}-\d{2}-\d{2})(_\d{4})", val)
if match_obj != None:
try:
match_string = match_obj.group(1)
test_date = datetime.datetime.strptime(match_string, "%Y-%m-%d")
return True
except ValueError:
return False
else: return False
# set the counter to the highest number that has already been used with the new file name pattern and today's date
def set_ctr(self):
ctr_list = []
for f in self.image_files():
match_obj = re.search(re.escape(self.new_prefix) + r"(_)(\d{4})", f)
if match_obj != None:
ctr_list.append(match_obj.group(2))
if ctr_list:
return int(max(ctr_list))
else: return 0
```
#### File: jhu-alistair/image_utilities/local_tools.py
```python
def get_config(key):
import yaml
configs = []
with open('config.yaml', 'r') as stream:
try:
configs = yaml.safe_load(stream)
return configs[key]
except yaml.YAMLError as exc:
print(exc)
def confirm_config(key):
print('\n\nUsing ' + key +':')
print(get_config(key)+'\n')
path_ok = input('OK to proceed? Y/N?: ')
if path_ok.lower().strip() == 'y':
return True
else:
return
def get_template():
try:
with open('template.txt', 'r') as template:
return template.read().splitlines()
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
except: #handle other exceptions such as attribute errors
print("Unexpected error:", sys.exc_info()[0])
def confirm_template():
lines = []
lines = get_template()
print('\n\nUsing file template:\n')
for ln in lines:
print(ln)
template_ok = input('\nOK to proceed? Y/N: ')
if template_ok.lower().strip() == 'y':
return True
else:
return False
``` |
{
"source": "jhuang448/LyricsAlignment-MTL",
"score": 2
} |
#### File: jhuang448/LyricsAlignment-MTL/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
import warnings
from utils import notes_to_pc
# following FFT parameters are designed for a 22.5k sampling rate
sr = 22050
n_fft = 512
resolution = 256/22050*3
with warnings.catch_warnings():
warnings.simplefilter("ignore")
train_audio_transforms = nn.Sequential(
torchaudio.transforms.MelSpectrogram(sample_rate=sr, n_mels=128, n_fft=n_fft),
)
def data_processing(data):
spectrograms = []
phones = []
pcs = []
input_lengths = []
phone_lengths = []
for (waveform, _, _, phone, notes) in data:
waveform = torch.Tensor(waveform)
# convert to Mel
spec = train_audio_transforms(waveform).squeeze(0).transpose(0, 1) # time x n_mels
spectrograms.append(spec)
# get phoneme list (mapped to integers)
phone = torch.Tensor(phone)
phones.append(phone)
# get the pitch contour
# the number 3 here and below is due the the maxpooling along the frequency axis
pc = notes_to_pc(notes, resolution, spec.shape[0] // 3)
pcs.append(pc)
input_lengths.append(spec.shape[0]//3)
phone_lengths.append(len(phone))
spectrograms = nn.utils.rnn.pad_sequence(spectrograms, batch_first=True).unsqueeze(1).transpose(2, 3)
phones = nn.utils.rnn.pad_sequence(phones, batch_first=True)
return spectrograms, phones, input_lengths, phone_lengths, torch.LongTensor(pcs)
class CNNLayerNorm(nn.Module):
'''Layer normalization built for cnns input'''
def __init__(self, n_feats):
super(CNNLayerNorm, self).__init__()
self.layer_norm = nn.LayerNorm(n_feats)
def forward(self, x):
# x (batch, channel, feature, time)
x = x.transpose(2, 3).contiguous() # (batch, channel, time, feature)
x = self.layer_norm(x)
return x.transpose(2, 3).contiguous() # (batch, channel, feature, time)
class ResidualCNN(nn.Module):
'''Residual CNN inspired by https://arxiv.org/pdf/1603.05027.pdf
except with layer norm instead of batch norm
'''
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super(ResidualCNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels, out_channels, kernel, stride, padding=kernel // 2)
self.cnn2 = nn.Conv2d(out_channels, out_channels, kernel, stride, padding=kernel // 2)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.layer_norm1 = CNNLayerNorm(n_feats)
self.layer_norm2 = CNNLayerNorm(n_feats)
def forward(self, x):
residual = x # (batch, channel, feature, time)
x = self.layer_norm1(x)
x = F.gelu(x)
x = self.dropout1(x)
x = self.cnn1(x)
x = self.layer_norm2(x)
x = F.gelu(x)
x = self.dropout2(x)
x = self.cnn2(x)
x += residual
return x # (batch, channel, feature, time)
class BidirectionalLSTM(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout, batch_first):
super(BidirectionalLSTM, self).__init__()
self.BiLSTM = nn.LSTM(
input_size=rnn_dim, hidden_size=hidden_size,
num_layers=1, batch_first=batch_first, bidirectional=True)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x, _ = self.BiLSTM(x)
x = self.dropout(x)
return x
class AcousticModel(nn.Module):
'''
The acoustic model: baseline and MTL share the same class,
the only difference is the target dimension of the last fc layer
'''
def __init__(self, n_cnn_layers, rnn_dim, n_class, n_feats, stride=1, dropout=0.1):
super(AcousticModel, self).__init__()
self.n_class = n_class
if isinstance(n_class, int):
target_dim = n_class
else:
target_dim = n_class[0] * n_class[1]
self.cnn_layers = nn.Sequential(
nn.Conv2d(1, n_feats, 3, stride=stride, padding=3 // 2),
nn.ReLU()
)
self.rescnn_layers = nn.Sequential(*[
ResidualCNN(n_feats, n_feats, kernel=3, stride=1, dropout=dropout, n_feats=128)
for _ in range(n_cnn_layers)
])
self.maxpooling = nn.MaxPool2d(kernel_size=(2, 3))
self.fully_connected = nn.Linear(n_feats * 64, rnn_dim)
self.bilstm = nn.Sequential(
BidirectionalLSTM(rnn_dim=rnn_dim, hidden_size=rnn_dim, dropout=dropout, batch_first=True),
BidirectionalLSTM(rnn_dim=rnn_dim * 2, hidden_size=rnn_dim, dropout=dropout, batch_first=False),
BidirectionalLSTM(rnn_dim=rnn_dim * 2, hidden_size=rnn_dim, dropout=dropout, batch_first=False)
)
self.classifier = nn.Sequential(
nn.Linear(rnn_dim * 2, target_dim)
)
def forward(self, x):
x = self.cnn_layers(x)
x = self.rescnn_layers(x)
x = self.maxpooling(x)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # (batch, feature, time)
x = x.transpose(1, 2) # (batch, time, feature)
x = self.fully_connected(x)
x = self.bilstm(x)
x = self.classifier(x)
if isinstance(self.n_class, tuple):
x = x.view(sizes[0], sizes[3], self.n_class[0], self.n_class[1])
return x
class MultiTaskLossWrapper(nn.Module):
def __init__(self):
super(MultiTaskLossWrapper, self).__init__()
self.criterion_lyrics = nn.CTCLoss(blank=40, zero_infinity=True)
self.criterion_melody = nn.CrossEntropyLoss()
def forward(self, mat3d, lyrics_gt, melody_gt):
n_batch, n_frame, n_ch, n_p = mat3d.shape # (batch, time, phone, pitch)
y_lyrics = torch.sum(mat3d, dim=3) # (batch, time, n_ch)
y_melody = torch.sum(mat3d, dim=2) # (batch, time, n_p)
y_lyrics = F.log_softmax(y_lyrics, dim=2)
y_lyrics = y_lyrics.transpose(0, 1) # (time, batch, n_ch) reshape for CTC
labels, input_lengths, label_lengths = lyrics_gt
loss_lyrics = self.criterion_lyrics(y_lyrics, labels, input_lengths, label_lengths)
y_melody = y_melody.transpose(1, 2) # (batch, n_p, time)
loss_melody = self.criterion_melody(y_melody, melody_gt)
return loss_lyrics, loss_melody
class BoundaryDetection(nn.Module):
def __init__(self, n_cnn_layers, rnn_dim, n_class, n_feats, stride=1, dropout=0.1):
super(BoundaryDetection, self).__init__()
self.n_class = n_class
# n residual cnn layers with filter size of 32
self.cnn_layers = nn.Sequential(
nn.Conv2d(1, n_feats, 3, stride=stride, padding=3 // 2),
nn.ReLU()
)
self.rescnn_layers = nn.Sequential(*[
ResidualCNN(n_feats, n_feats, kernel=3, stride=1, dropout=dropout, n_feats=128)
for _ in range(n_cnn_layers)
])
self.maxpooling = nn.MaxPool2d(kernel_size=(2, 3))
self.fully_connected = nn.Linear(n_feats * 64, rnn_dim) # add a linear layer
self.bilstm_layers = nn.Sequential(
BidirectionalLSTM(rnn_dim=rnn_dim, hidden_size=rnn_dim, dropout=dropout, batch_first=True),
BidirectionalLSTM(rnn_dim=rnn_dim * 2, hidden_size=rnn_dim, dropout=dropout, batch_first=False),
BidirectionalLSTM(rnn_dim=rnn_dim * 2, hidden_size=rnn_dim, dropout=dropout, batch_first=False)
)
self.classifier = nn.Sequential(
nn.Linear(rnn_dim * 2, n_class) # birnn returns rnn_dim*2
)
def forward(self, x):
x = self.cnn_layers(x)
x = self.rescnn_layers(x)
x = self.maxpooling(x)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # (batch, feature, time)
x = x.transpose(1, 2) # (batch, time, feature)
x = self.fully_connected(x)
x = self.bilstm_layers(x)
x = self.classifier(x)
x = x.view(sizes[0], sizes[3], self.n_class)
x = torch.sigmoid(x)
return x
``` |
{
"source": "jhuang97/starlink-constellation-animation",
"score": 3
} |
#### File: starlink-constellation-animation/src/get_starlink_launch_info.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from unicodedata import normalize
from dateutil.parser import parse
import re
def clean_normalize_whitespace(x):
""" Normalize unicode characters and strip trailing spaces
"""
if isinstance(x, str):
return normalize('NFKC', x).strip()
else:
return x
def de_2tuple(x):
if type(x) == tuple and len(x) == 2:
if x[0] == x[1]:
return x[0]
else:
return x[1]
else:
return x
def de_ref(x):
return re.sub(r'\s?\[\d+\]\s?', '', x)
table_sl = pd.read_html('https://en.wikipedia.org/wiki/Starlink', match='Starlink launches')
print(f'Total tables: {len(table_sl)}')
df = table_sl[-1]
df = df.applymap(clean_normalize_whitespace)
print(type(df.columns[0]) == tuple)
df.columns = df.columns.to_series().apply(clean_normalize_whitespace)
df.rename(columns=de_2tuple, inplace=True)
df.rename(columns=de_ref, inplace=True)
df.rename(columns={'Deorbited [87]': 'Deorbited'}, inplace=True)
df.replace(to_replace=r'\[\d+\]$', value='', regex=True, inplace=True)
df.replace(to_replace=r'\[\d+\]$', value='', regex=True, inplace=True)
df.replace(to_replace='Tintin[91]v0.1', value='Tintin v0.1', inplace=True)
df.replace(to_replace='°', value='', regex=True, inplace=True)
df = df.applymap(clean_normalize_whitespace)
print(df.head())
print(df.info())
df['match'] = df.Mission.eq(df.Mission.shift())
# print(df['match'])
pd.set_option("display.max_rows", None, "display.max_columns", None)
# print(df[df['match'] == False].iloc[:, [0, 1, 3]])
mask = ~df['match'] & df['Outcome'].str.contains('Success')
def parse_table_dt(dt):
d = dt.split(',')[0]
return parse(d).strftime('%Y-%m-%d')
df1 = df[mask].copy()
df1['Inclination'] = df1['Inclination'].apply(pd.to_numeric)
df1['Deployed'] = df1['Deployed'].apply(pd.to_numeric)
df1['Working'] = df1['Working'].apply(pd.to_numeric)
df1['launch_date'] = df1['Date and time, UTC'].apply(parse_table_dt)
df2 = df1.iloc[:, [0, 1, 7, 8, 9, 12]].copy()
print(df2)
df2.to_pickle('../sat_info/starlink_launches.pkl')
```
#### File: starlink-constellation-animation/src/load_filtered_tles.py
```python
from tle_util import *
from SatProp import Sgp4Prop
import os
import numpy as np
import matplotlib.pyplot as plt
import time
from datetime import date, datetime, timedelta
from my_util import *
from scipy.stats import linregress
from scipy.signal import savgol_filter
import math
from collections import defaultdict
GM = 3.9860e14 # m^3 s^-2
const = GM / 4 / math.pi / math.pi
# r_E = 6371 # km, mean radius
r_E = 6378.1 # km, equatorial radius
some_6P_card = '0.00000030. 480. 6P'
def perdelta(start, end, delta):
curr = start
while curr < end:
yield curr
curr += delta
def align_times(times_dt, t_start, n_day_divs, n_ticks):
start_dest_idx = round((times_dt[0] - t_start)/timedelta(days=1) * n_day_divs)
end_dest_idx = round((times_dt[-1] - t_start) / timedelta(days=1) * n_day_divs) + 1
time_within_range = True
if start_dest_idx < 0:
start_src_idx = -start_dest_idx
start_dest_idx = 0
elif start_dest_idx >= n_ticks:
print('TLE starts after end of time window: ', my_path)
start_src_idx = None
time_within_range = False
else:
start_src_idx = 0
if end_dest_idx > n_ticks:
end_src_idx = len(times_dt) - (end_dest_idx - n_ticks)
end_dest_idx = n_ticks
elif end_dest_idx < 0:
print('TLE ends before start of time window: ', my_path)
end_src_idx = None
time_within_range = False
else:
end_src_idx = len(times_dt)
return start_dest_idx, end_dest_idx, start_src_idx, end_src_idx, time_within_range
if __name__ == '__main__':
launch_dates = []
launch_names_all = []
norad_ranges = []
with open('../sat_info/shell_1_ids_by_launch.txt', 'r') as f:
lines = f.read().splitlines()
for l in lines:
tokens = l.split('\t')
launch_dates.append(tokens[0])
launch_names_all.append(tokens[1])
norad_ranges.append(tokens[2])
mode = 1
if mode == 1:
n_day_divs = 4
elif mode == 2: # modes 2 and 3 used to adjust parameters of reference satellite
n_day_divs = 60
elif mode == 3:
n_day_divs = 120
elif mode == 4: # smoothing
n_day_divs = 24
# build arrays to hold on all satellite info
t_start = datetime(2019, 11, 14, 18, 0, 0)
# t_start = datetime(2020, 5, 8)
t_end = datetime(2021, 8, 27)
t_all = list(perdelta(t_start, t_end, timedelta(days=1) / n_day_divs))
n_ticks = len(t_all)
# print(t_all)
start_indices = {}
end_indices = {} # not inclusive
app = Sgp4Prop('reference_sat', '../input/reference_satellite_1.inp', n_day_divs=n_day_divs, end_dt=t_end)
times_dt_ref, mean_elems_ref, osc_elems_ref = app.run()
start_dest_idx, end_dest_idx, start_src_idx, end_src_idx, time_within_range = align_times(times_dt_ref, t_start,
n_day_divs, n_ticks)
assert (start_dest_idx == 0 and end_dest_idx == n_ticks)
mean_elems_ref = mean_elems_ref[start_src_idx:end_src_idx, :]
osc_elems_ref = osc_elems_ref[start_src_idx:end_src_idx, :]
for launch_idx, norad_id_range in enumerate(norad_ranges[26:28]):
tle_data = np.load('../filtered_tle_data/f_' + norad_id_range + '.npz')
print(list(tle_data.keys()))
tle_df = tle_npz_to_df(tle_data)
print(tle_df)
section_pos = tle_data['section_pos']
norad_ids = tle_data['norad']
print(section_pos)
if mode == 1:
calc_range = range(len(section_pos)-1)
elif mode == 2 or mode == 3:
calc_range = [1, 4, 5, 6, 8]
# calc_range = [1, 4]
elif mode == 4:
# calc_range = range(11, 21)
calc_range = range(len(section_pos)-1)
# calc_range = [20, 26, 28, 29, 30, 38, 41, 44]
mean_elems_all = np.zeros((n_ticks, 6, len(section_pos)))
osc_elems_all = np.zeros((n_ticks, 6, len(section_pos)))
tle_tidx_by_sat = []
tle_epochs_by_sat = []
for calc_idx, k in enumerate(calc_range):
i1 = section_pos[k]
i2 = section_pos[k + 1]
lines = recreate_tle_range(tle_df[i1:i2], norad_ids[k], tle_data['int_desig'][k])
my_path = '../input/test_' + tle_data['norad'][k] + '.inp'
with open(my_path, 'w') as fw:
fw.write(some_6P_card + '\n')
fw.write(lines)
app = Sgp4Prop('', my_path, n_day_divs=n_day_divs, backtrack=True, save_tle_epochs=True)
times_dt, mean_elems, osc_elems, tle_tidxs, tle_epochs = app.run()
tle_tidx_by_sat.append(tle_tidxs)
tle_epochs_by_sat.append(tle_epochs)
# if times_dt[0] < datetime(2020, 2, 17):
# print(norad_ids[k])
# print(tle_epochs_by_sat[k][:20])
# figure where in matrix to insert orbit history
start_dest_idx, end_dest_idx, start_src_idx, end_src_idx, time_within_range = align_times(times_dt, t_start,
n_day_divs,
n_ticks)
assert (end_dest_idx - start_dest_idx == end_src_idx - start_src_idx)
start_indices[k] = start_dest_idx
end_indices[k] = end_dest_idx
mean_elems_all[start_dest_idx:end_dest_idx, :, k] = mean_elems[start_src_idx:end_src_idx, :]
osc_elems_all[start_dest_idx:end_dest_idx, :, k] = osc_elems[start_src_idx:end_src_idx, :]
mean_elems_all[:start_dest_idx, :, k] = np.nan
osc_elems_all[:start_dest_idx, :, k] = np.nan
mean_elems_all[end_dest_idx:, :, k] = np.nan
osc_elems_all[end_dest_idx:, :, k] = np.nan
rel_node = np.remainder(mean_elems_all[start_dest_idx:end_dest_idx, 3, k] -
mean_elems_ref[start_dest_idx:end_dest_idx, 3], 360.)
alt = (const * (86400 / mean_elems_all[start_dest_idx:end_dest_idx, 0, k]) ** 2) ** (1 / 3) / 1000 - r_E
os.remove(my_path)
long_past_asc_node = np.remainder(osc_elems_all[:, 4, :] + osc_elems_all[:, 5, :], 360.)
start_new_orbit = datetime(2020, 11, 24, 19)
print('new ref orbit, time epoch 2020 ', days_since_yr(start_new_orbit, 2020))
if mode == 1 or mode == 4:
for k in calc_range:
t1 = start_indices[k]
t2 = end_indices[k]
# plt.plot(t_all[t1:t2], np.remainder(mean_elems_all[t1:t2, 3, k] - mean_elems_ref[t1:t2, 3], 360.))
# longitude past ascending node and smoothing
plt.figure(1)
rel_long = np.remainder(long_past_asc_node[t1:t2, k] - mean_elems_ref[t1:t2, 5], 360.)
plt.plot(t_all[t1:t2], rel_long)
# if mode == 4:
# rel_rad = rel_long / 180 * np.pi
# rel_rad_filtered = savgol_filter(np.vstack((np.cos(rel_rad), np.sin(rel_rad))), 11, 3)
# smooth_rel_long = np.remainder(np.arctan2(rel_rad_filtered[1], rel_rad_filtered[0]) / np.pi * 180, 360.)
# plt.plot(t_all[t1:t2], smooth_rel_long)
plt.figure(2)
rel_node = np.remainder(mean_elems_all[t1:t2, 3, k] - mean_elems_ref[t1:t2, 3], 360.)
plt.plot(t_all[t1:t2], rel_node, label=k)
# altitude and smoothing
plt.figure(3)
alt = (const * (86400 / mean_elems_all[t1:t2, 0, k]) ** 2) ** (1 / 3) / 1000 - r_E
plt.plot(t_all[t1:t2], alt, label=norad_ids[k])
if np.any(alt > 600):
print('bad', norad_ids[k])
# if mode == 4:
# alt_filtered = savgol_filter(alt, 31, 3)
# plt.plot(t_all[t1:t2], alt_filtered)
# plt.plot(t_all[t1:t2], np.remainder(long_past_asc_node[t1:t2, k] - mean_elems_ref[t1:t2, 5], 360.), label=k)
plt.legend()
ref_time_1 = (datetime(2020, 5, 1), datetime(2020, 12, 1))
ref_time_2 = (datetime(2021, 1, 9), datetime(2021, 5, 1))
ref_time_3 = (start_new_orbit - timedelta(hours=3), start_new_orbit + timedelta(hours=3))
if mode == 2 or mode == 3:
# count laps
angle_total = 0
interval_start = False
for tidx in range(start_indices[calc_range[0]] + 1, end_indices[calc_range[0]]):
t = t_all[tidx]
if ref_time_3[0] < t < ref_time_3[1]:
if not interval_start:
interval_start = True
start_time = t
start_idx = tidx
diff = long_past_asc_node[tidx, 1] - long_past_asc_node[tidx - 1, 1]
if long_past_asc_node[tidx, 1] < 180 < long_past_asc_node[tidx - 1, 1]:
diff -= 360
angle_total += diff
end_time = t
end_idx = tidx
n_revs = angle_total / 360
days_elapsed = (end_time - start_time) / timedelta(days=1)
print(n_revs, days_elapsed, n_revs / days_elapsed)
fig, ax = plt.subplots(1, 2)
rlong_slopes = []
rnode_slopes = []
t1 = start_idx
t2 = end_idx + 1
if mode == 3:
ax[0].plot(t_all[t1:t2], mean_elems_ref[t1:t2, 3], '.-', label='ref')
ax[1].plot(t_all[t1:t2], mean_elems_ref[t1:t2, 5], '.-', label='ref')
ax[0].grid()
ax[1].grid()
for k in calc_range:
rel_node = np.remainder(mean_elems_all[t1:t2, 3, k] - mean_elems_ref[t1:t2, 3], 360.)
rel_longitude = np.remainder(long_past_asc_node[t1:t2, k] - mean_elems_ref[t1:t2, 5], 360.)
time_days = [(t - t_all[t1]) / timedelta(days=1) for t in t_all[t1:t2]]
ax[0].plot(t_all[t1:t2], rel_node, label=k)
ax[1].plot(t_all[t1:t2], rel_longitude, label=k)
result = linregress(time_days, rel_longitude)
rlong_slopes.append(result.slope)
result = linregress(time_days, rel_node)
rnode_slopes.append(result.slope)
# plt.plot(t_all[t1:t2], mean_elems_all[t1:t2, 0, k], label=k)
# plt.plot(t_all[t1:t2], np.remainder(long_past_asc_node[t1:t2, k] - mean_elems_ref[t1:t2, 5], 360.), label=k)
ax[0].legend()
ax[1].legend()
rlong_slopes.sort()
rnode_slopes.sort()
print('relative longitude: ', ', '.join(['%.2E'] * len(rlong_slopes)) % tuple(rlong_slopes))
print('relative node: ', ', '.join(['%.2E'] * len(rnode_slopes)) % tuple(rnode_slopes))
# plt.plot(t_all, np.nanmedian(mean_elems_all[:, 0, :], axis=-1), '.-')
# print(earliest_time, days_since_yr(earliest_time, 2019))
# print(latest_time, days_since_yr(latest_time, 2019))
plt.show()
``` |
{
"source": "JHuang-CV/OD",
"score": 2
} |
#### File: models/detectors/base.py
```python
from abc import ABCMeta, abstractmethod
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch.nn as nn
from mmdet.core import auto_fp16, get_classes, tensor2imgs
from mmdet.utils import print_log
import cv2
import random
import glob
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush')
class BaseDetector(nn.Module, metaclass=ABCMeta):
"""Base class for detectors"""
def __init__(self):
super(BaseDetector, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
@property
def with_shared_head(self):
return hasattr(self, 'shared_head') and self.shared_head is not None
@property
def with_bbox(self):
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
return hasattr(self, 'mask_head') and self.mask_head is not None
@abstractmethod
def extract_feat(self, imgs):
pass
def extract_feats(self, imgs):
assert isinstance(imgs, list)
for img in imgs:
yield self.extract_feat(img)
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (list[Tensor]): list of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has:
'img_shape', 'scale_factor', 'flip', and my also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
**kwargs: specific to concrete implementation
"""
pass
async def async_simple_test(self, img, img_meta, **kwargs):
raise NotImplementedError
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
pass
def init_weights(self, pretrained=None):
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
async def aforward_test(self, *, img, img_meta, **kwargs):
for var, name in [(img, 'img'), (img_meta, 'img_meta')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
num_augs = len(img)
if num_augs != len(img_meta):
raise ValueError(
'num of augmentations ({}) != num of image meta ({})'.format(
len(img), len(img_meta)))
# TODO: remove the restriction of imgs_per_gpu == 1 when prepared
imgs_per_gpu = img[0].size(0)
assert imgs_per_gpu == 1
if num_augs == 1:
return await self.async_simple_test(img[0], img_meta[0], **kwargs)
else:
raise NotImplementedError
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_meta (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(
'num of augmentations ({}) != num of image meta ({})'.format(
len(imgs), len(img_metas)))
# TODO: remove the restriction of imgs_per_gpu == 1 when prepared
imgs_per_gpu = imgs[0].size(0)
assert imgs_per_gpu == 1
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_meta, return_loss=True, **kwargs):
"""
Calls either forward_train or forward_test depending on whether
return_loss=True. Note this setting will change the expected inputs.
When `return_loss=True`, img and img_meta are single-nested (i.e.
Tensor and List[dict]), and when `resturn_loss=False`, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_meta, **kwargs)
else:
return self.forward_test(img, img_meta, **kwargs)
def show_result(self, data, result, dataset=None, score_thr=0.4):
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
img_tensor = data['img'][0]
img_metas = data['img_meta'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
if dataset is None:
class_names = self.CLASSES
elif isinstance(dataset, str):
class_names = get_classes(dataset)
elif isinstance(dataset, (list, tuple)):
class_names = dataset
else:
raise TypeError(
'dataset must be a valid dataset name or a sequence'
' of class names, not {}'.format(type(dataset)))
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
for bbox, label in zip(bboxes, labels):
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
img = cv2.rectangle(img, (x1, y1), (x2, y2), mmcv.color_val('green'), thickness=2)
img = cv2.putText(img, CLASSES[label], (x1, y1-6), cv2.FONT_HERSHEY_COMPLEX, 0.5,
mmcv.color_val('green'))
n = len(glob.glob('./detected2_images/*.jpg'))
cv2.imwrite(f'./detected2_images/{n}.jpg', img)
# mmcv.imshow_det_bboxes(
# img_show,
# bboxes,
# labels,
# class_names=class_names,
# score_thr=score_thr)
```
#### File: OD/MyDetector/CIRHead.py
```python
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.models.registry import HEADS
from mmdet.models.utils import ConvModule, bias_init_with_prob
from mmdet.models.anchor_heads.anchor_head import AnchorHead
from mmdet.models.builder import build_loss
from mmdet.core import (AnchorGenerator, delta2bbox, force_fp32,
multi_apply, multiclass_nms)
from mmdet.core.bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner
from .max_iou_assigner import MaxIoUAssigner
@HEADS.register_module
class CIRHead(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
octave_base_scale=4,
scales_per_octave=3,
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
loss_IoUness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_softcls=dict(
type='MSELoss',
loss_weight=1.0)):
super(CIRHead, self).__init__()
self.stacked_convs = stacked_convs
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
anchor_scales = octave_scales * octave_base_scale
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.anchor_strides = anchor_strides
self.anchor_base_sizes = list(
anchor_strides) if anchor_base_sizes is None else anchor_base_sizes
self.target_means = target_means
self.target_stds = target_stds
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC']
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes - 1
else:
self.cls_out_channels = num_classes
if self.cls_out_channels <= 0:
raise ValueError('num_classes={} is too small'.format(num_classes))
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_iou = build_loss(loss_IoUness)
self.loss_softcls = build_loss(loss_softcls)
self.fp16_enabled = False
self.anchor_generators = []
for anchor_base in self.anchor_base_sizes:
self.anchor_generators.append(
AnchorGenerator(anchor_base, anchor_scales, anchor_ratios))
self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales)
self._init_layers()
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.CIR_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.CIR_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
self.CIR_IoUness = nn.Conv2d(self.feat_channels, self.num_anchors, 3, padding=1)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.CIR_cls, std=0.01, bias=bias_cls)
normal_init(self.CIR_reg, std=0.01)
normal_init(self.CIR_IoUness, std=0.01)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.CIR_cls(cls_feat)
bbox_pred = self.CIR_reg(reg_feat)
IoU_feat = cls_feat+reg_feat
IoUness_pred = self.CIR_IoUness(IoU_feat)
return cls_score, bbox_pred, IoUness_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
device (torch.device | str): device for returned tensors
Returns:
tuple: anchors of each image, valid flags of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = []
for i in range(num_levels):
anchors = self.anchor_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i], device=device)
multi_level_anchors.append(anchors)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
anchor_stride = self.anchor_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w, _ = img_meta['pad_shape']
valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)
flags = self.anchor_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w),
device=device)
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
def loss_single(self, cls_score, bbox_pred, iou_pred, labels, label_weights,
bbox_targets, bbox_weights, iou_targets, iou_weights, softcls,
softcls_weights,num_total_samples, cfg):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
iou_targets = iou_targets.reshape(-1)
iou_weights = iou_weights.reshape(-1)
iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1)
loss_iou = self.loss_iou(iou_pred, iou_targets, iou_weights, avg_factor=num_total_samples)
softcls = softcls.reshape(-1, self.cls_out_channels)
softcls_weights = softcls_weights.reshape(-1, self.cls_out_channels)
loss_softcls = self.loss_softcls(cls_score, softcls, softcls_weights, avg_factor = num_total_samples)
return loss_cls, loss_bbox, loss_iou, loss_softcls
def loss(self,
cls_scores,
bbox_preds,
iou_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.anchor_generators)
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_iou_targets = self.anchor_target(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
self.target_means,
self.target_stds,
cfg,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling)
if cls_reg_iou_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
IoU_targets_list, IoU_weights_list, num_total_pos, num_total_neg, softcls_list,
softcls_weights_list) = cls_reg_iou_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
losses_cls, losses_bbox, losses_iou, losses_softcls = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
iou_preds,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
IoU_targets_list,
IoU_weights_list,
softcls_list,
softcls_weights_list,
num_total_samples=num_total_samples,
cfg=cfg)
# return dict(loss_cls=losses_cls, loss_bbox=losses_bbox,
# loss_iou=losses_iou, loss_softcls=losses_softcls)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
def anchor_target(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
target_means,
target_stds,
cfg,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
sampling=True,
unmap_outputs=True):
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, all_IoU_targets, all_IoU_weights,
pos_inds_list, neg_inds_list, all_softcls, all_softcls_weights) = multi_apply(
self.anchor_target_single,
anchor_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
target_means=target_means,
target_stds=target_stds,
cfg=cfg,
label_channels=label_channels,
sampling=sampling,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights, num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors)
# new add
IoU_targets_list = images_to_levels(all_IoU_targets, num_level_anchors)
IoU_weights_list = images_to_levels(all_IoU_weights, num_level_anchors)
softcls_list = images_to_levels(all_softcls, num_level_anchors)
softcls_weights_list = images_to_levels(all_softcls_weights, num_level_anchors)
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, IoU_targets_list, IoU_weights_list,
num_total_pos, num_total_neg, softcls_list, softcls_weights_list)
def anchor_target_single(self,
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
target_means,
target_stds,
cfg,
label_channels=1,
sampling=True,
unmap_outputs=True):
inside_flags = self.anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
cfg.allowed_border)
if not inside_flags.any():
return (None,) * 6
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
if sampling:
assign_result, sampling_result = assign_and_sample(
anchors, gt_bboxes, gt_bboxes_ignore, None, cfg)
else:
bbox_assigner = MaxIoUAssigner()
assign_result = bbox_assigner.assign(anchors, gt_bboxes,
gt_bboxes_ignore, gt_labels)
bbox_sampler = PseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
# new add
IoU_targets = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
IoU_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
softcls_targets = anchors.new_zeros((num_valid_anchors, self.cls_out_channels), dtype=torch.float)
softcls_weights = anchors.new_zeros((num_valid_anchors, self.cls_out_channels), dtype=torch.float)
soft_gt_labels = sampling_result.soft_gt_labels
unassigned_bboxes_inds = sampling_result.unassigned_bboxes_inds
softcls_targets[unassigned_bboxes_inds, soft_gt_labels[unassigned_bboxes_inds, 0]-1] = \
soft_gt_labels[unassigned_bboxes_inds, 1]
softcls_weights[unassigned_bboxes_inds, :] = 1
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes,
sampling_result.pos_gt_bboxes,
target_means, target_stds)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = cfg.pos_weight
# new add
IoU_targets[pos_inds] = sampling_result.pos_gt_IoU
IoU_weights[pos_inds] = 1.0
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(labels, num_total_anchors, inside_flags)
label_weights = unmap(label_weights, num_total_anchors, inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
# new add
IoU_targets = unmap(IoU_targets, num_total_anchors, inside_flags)
IoU_weights = unmap(IoU_weights, num_total_anchors, inside_flags)
softcls_targets = unmap(softcls_targets, num_total_anchors, inside_flags)
softcls_weights = unmap(softcls_weights, num_total_anchors, inside_flags)
return (labels, label_weights, bbox_targets, bbox_weights, IoU_targets, IoU_weights, pos_inds,
neg_inds, softcls_targets, softcls_weights)
def anchor_inside_flags(self, flat_anchors, valid_flags, img_shape,
allowed_border=0):
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border).type(torch.uint8) & \
(flat_anchors[:, 1] >= -allowed_border).type(torch.uint8) & \
(flat_anchors[:, 2] < img_w + allowed_border).type(torch.uint8) & \
(flat_anchors[:, 3] < img_h + allowed_border).type(torch.uint8)
else:
inside_flags = valid_flags
return inside_flags
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds, :] = data
return ret
def images_to_levels(target, num_level_anchors):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_level_anchors:
end = start + n
level_targets.append(target[:, start:end].squeeze(0))
start = end
return level_targets
``` |
{
"source": "jhuapl-boss/boss",
"score": 2
} |
#### File: bosscore/views/views_resource.py
```python
import copy
from django.db import transaction
from django.db.models.deletion import ProtectedError
from django.http import HttpResponse
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from guardian.shortcuts import get_objects_for_user
from django.utils import timezone
from bosscore.error import BossError, BossHTTPError, BossPermissionError, BossResourceNotFoundError, ErrorCodes
from bosscore.lookup import LookUpKey
from bosscore.permissions import BossPermissionManager
from bosscore.privileges import check_role
from bosscore.serializers import CollectionSerializer, ExperimentSerializer, ChannelSerializer, \
CoordinateFrameSerializer, CoordinateFrameUpdateSerializer, ExperimentReadSerializer, ChannelReadSerializer, \
ExperimentUpdateSerializer, ChannelUpdateSerializer, CoordinateFrameDeleteSerializer
from bosscore.models import Collection, Experiment, Channel, CoordinateFrame
from bosscore.constants import ADMIN_GRP
from bossutils.configuration import BossConfig
from bossutils.logger import bossLogger
boss_config = BossConfig()
try:
DEFAULT_CUBOID_BUCKET_NAME = 'cuboids.' + boss_config['system']['fqdn'].split('.', 1)[1]
except Exception as ex:
DEFAULT_CUBOID_BUCKET_NAME = ''
bossLogger().error(f'Failed getting system.fqdn from boss.config: {ex}')
class CollectionDetail(APIView):
"""
View to access a collection object
"""
def get(self, request, collection):
"""
Get a single instance of a collection
Args:
request: DRF Request object
collection: Collection name specifying the collection you want
Returns:
Collection
"""
try:
collection_obj = Collection.objects.get(name=collection)
# Check for permissions
if collection_obj is None:
return BossResourceNotFoundError(collection)
if collection_obj.public or request.user.has_perm("read", collection_obj):
if collection_obj.to_be_deleted is not None:
return BossHTTPError("Invalid Request. This Resource has been marked for deletion",
ErrorCodes.RESOURCE_MARKED_FOR_DELETION)
serializer = CollectionSerializer(collection_obj)
data = serializer.data
data['experiments'] = serializer.get_experiments_permissions(collection_obj,request.user)
return Response(data, status=200)
else:
return BossPermissionError('read', collection)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
@transaction.atomic
@check_role("resource-manager")
def post(self, request, collection):
"""Create a new collection
View to create a new collection and an associated bosskey for that collection
Args:
request: DRF Request object
collection : Collection name
Returns:
Collection
"""
col_data = request.data.copy()
col_data['name'] = collection
# Save the object
serializer = CollectionSerializer(data=col_data)
if serializer.is_valid():
serializer.save(creator=self.request.user)
collection_obj = Collection.objects.get(name=col_data['name'])
# Assign permissions to the users primary group and admin group
BossPermissionManager.add_permissions_primary_group(self.request.user, collection_obj)
BossPermissionManager.add_permissions_admin_group(collection_obj)
lookup_key = str(collection_obj.pk)
boss_key = collection_obj.name
LookUpKey.add_lookup(lookup_key, boss_key, collection_obj.name)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return BossHTTPError("{}".format(serializer.errors), ErrorCodes.INVALID_POST_ARGUMENT)
@transaction.atomic
def put(self, request, collection):
"""
Update a collection using django rest framework
Args:
request: DRF Request object
collection: Collection name
Returns:
Collection
"""
try:
# Check if the object exists
collection_obj = Collection.objects.get(name=collection)
# Check for permissions
if request.user.has_perm("update", collection_obj):
serializer = CollectionSerializer(collection_obj, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
# update the lookup key if you update the name
if 'name' in request.data and request.data['name'] != collection:
lookup_key = str(collection_obj.pk)
boss_key = request.data['name']
LookUpKey.update_lookup_collection(lookup_key, boss_key, request.data['name'])
return Response(serializer.data)
else:
return BossHTTPError("{}".format(serializer.errors), ErrorCodes.INVALID_POST_ARGUMENT)
else:
return BossPermissionError('update', collection)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except BossError as err:
return err.to_http()
@transaction.atomic
@check_role("resource-manager")
def delete(self, request, collection):
"""
Delete a collection
Args:
request: DRF Request object
collection: Name of collection to delete
Returns:
Http status
"""
try:
collection_obj = Collection.objects.get(name=collection)
if request.user.has_perm("delete", collection_obj):
# Are there experiments that reference it
serializer = CollectionSerializer(collection_obj)
if len(serializer.get_experiments(collection_obj)) > 0:
# This collection has experiments that reference it and cannot be deleted
return BossHTTPError("Collection {} has experiments that reference it and cannot be deleted."
"Please delete the experiments first.".format(collection),
ErrorCodes.INTEGRITY_ERROR)
collection_obj.to_be_deleted = timezone.now()
collection_obj.save()
return HttpResponse(status=204)
else:
return BossPermissionError('delete', collection)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except ProtectedError:
return BossHTTPError("Cannot delete {}. It has experiments that reference it.".format(collection),
ErrorCodes.INTEGRITY_ERROR)
class CoordinateFrameDetail(APIView):
"""
View to access a cordinate frame
"""
def get(self, request, coordframe):
"""
GET requests for a single instance of a coordinateframe
Args:
request: DRF Request object
coordframe: Coordinate frame name specifying the coordinate frame you want
Returns:
CoordinateFrame
"""
try:
coordframe_obj = CoordinateFrame.objects.get(name=coordframe)
if coordframe_obj.to_be_deleted is not None:
return BossHTTPError("Invalid Request. This Resource has been marked for deletion",
ErrorCodes.RESOURCE_MARKED_FOR_DELETION)
serializer = CoordinateFrameSerializer(coordframe_obj)
return Response(serializer.data)
except CoordinateFrame.DoesNotExist:
return BossResourceNotFoundError(coordframe)
@transaction.atomic
@check_role("resource-manager")
def post(self, request, coordframe):
"""Create a new coordinate frame
View to create a new coordinate frame
Args:
request: DRF Request object
coordframe : Coordinate frame name
Returns:
CoordinateFrame
"""
coordframe_data = request.data.copy()
coordframe_data['name'] = coordframe
serializer = CoordinateFrameSerializer(data=coordframe_data)
if serializer.is_valid():
serializer.save(creator=self.request.user)
coordframe_obj = CoordinateFrame.objects.get(name=coordframe_data['name'])
# Assign permissions to the users primary group and admin group
BossPermissionManager.add_permissions_primary_group(self.request.user, coordframe_obj)
BossPermissionManager.add_permissions_admin_group(coordframe_obj)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return BossHTTPError("{}".format(serializer.errors), ErrorCodes.INVALID_POST_ARGUMENT)
@transaction.atomic
def put(self, request, coordframe):
"""
Update a coordinate frame using django rest framework
Args:
request: DRF Request object
coordframe: Coordinate frame name
Returns:
CoordinateFrame
"""
try:
# Check if the object exists
coordframe_obj = CoordinateFrame.objects.get(name=coordframe)
if request.user.has_perm("update", coordframe_obj):
serializer = CoordinateFrameUpdateSerializer(coordframe_obj, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
# return the object back to the user
coordframe = serializer.data['name']
coordframe_obj = CoordinateFrame.objects.get(name=coordframe)
serializer = CoordinateFrameSerializer(coordframe_obj)
return Response(serializer.data)
else:
return BossHTTPError("{}".format(serializer.errors), ErrorCodes.INVALID_POST_ARGUMENT)
else:
return BossPermissionError('update', coordframe)
except CoordinateFrame.DoesNotExist:
return BossResourceNotFoundError(coordframe)
@transaction.atomic
@check_role("resource-manager")
def delete(self, request, coordframe):
"""
Delete a coordinate frame
Args:
request: DRF Request object
coordframe: Name of coordinateframe to delete
Returns:
Http status
"""
try:
coordframe_obj = CoordinateFrame.objects.get(name=coordframe)
if request.user.has_perm("delete", coordframe_obj):
# Are there experiments that reference it
serializer = CoordinateFrameDeleteSerializer(coordframe_obj)
if len(serializer.get_valid_exps(coordframe_obj)) > 0:
# This collection has experiments that reference it and cannot be deleted
return BossHTTPError(" Coordinate frame {} has experiments that reference it and cannot be deleted."
"Please delete the experiments first.".format(coordframe),
ErrorCodes.INTEGRITY_ERROR)
coordframe_obj.to_be_deleted = timezone.now()
coordframe_obj.save()
return HttpResponse(status=204)
else:
return BossPermissionError('delete', coordframe)
except CoordinateFrame.DoesNotExist:
return BossResourceNotFoundError(coordframe)
except ProtectedError:
return BossHTTPError("Cannot delete {}. It has experiments that reference it.".format(coordframe),
ErrorCodes.INTEGRITY_ERROR)
class ExperimentDetail(APIView):
"""
View to access an experiment
"""
def get(self, request, collection, experiment):
"""
GET requests for a single instance of a experiment
Args:
request: DRF Request object
collection: Collection name specifying the collection you want
experiment: Experiment name specifying the experiment instance
Returns :
Experiment
"""
try:
collection_obj = Collection.objects.get(name=collection)
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
# Check for permissions
if experiment_obj is None:
return BossResourceNotFoundError(experiment)
if experiment_obj.public or request.user.has_perm("read", experiment_obj):
if experiment_obj.to_be_deleted is not None:
return BossHTTPError("Invalid Request. This Resource has been marked for deletion",
ErrorCodes.RESOURCE_MARKED_FOR_DELETION)
serializer = ExperimentReadSerializer(experiment_obj)
data = serializer.data
import logging
logging.Logger('boss').debug("request.user: " + str(type(request.user)))
data['channels'] = serializer.get_channels_permissions(collection_obj,experiment_obj,request.user)
return Response(data)
else:
return BossPermissionError('read', experiment)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except Experiment.DoesNotExist:
return BossResourceNotFoundError(experiment)
@transaction.atomic
@check_role("resource-manager")
def post(self, request, collection, experiment):
"""Create a new experiment
View to create a new experiment and an associated bosskey for that experiment
Args:
request: DRF Request object
collection : Collection name
experiment : Experiment name
Returns:
Experiment
"""
experiment_data = request.data.copy()
experiment_data['name'] = experiment
try:
# Get the collection information
collection_obj = Collection.objects.get(name=collection)
if request.user.has_perm("add", collection_obj):
experiment_data['collection'] = collection_obj.pk
# Update the coordinate frame
if 'coord_frame' not in experiment_data:
return BossHTTPError("This request requires a valid coordinate frame",
ErrorCodes.INVALID_POST_ARGUMENT)
coord_frame_obj = CoordinateFrame.objects.get(name=experiment_data['coord_frame'])
experiment_data['coord_frame'] = coord_frame_obj.pk
serializer = ExperimentSerializer(data=experiment_data)
if serializer.is_valid():
serializer.save(creator=self.request.user)
experiment_obj = Experiment.objects.get(name=experiment_data['name'], collection=collection_obj)
# Assign permissions to the users primary group and admin group
BossPermissionManager.add_permissions_primary_group(self.request.user, experiment_obj)
BossPermissionManager.add_permissions_admin_group(experiment_obj)
lookup_key = str(collection_obj.pk) + '&' + str(experiment_obj.pk)
boss_key = collection_obj.name + '&' + experiment_obj.name
LookUpKey.add_lookup(lookup_key, boss_key, collection_obj.name, experiment_obj.name)
serializer = ExperimentReadSerializer(experiment_obj)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return BossHTTPError("{}".format(serializer.errors), ErrorCodes.INVALID_POST_ARGUMENT)
else:
return BossPermissionError('add', collection)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except CoordinateFrame.DoesNotExist:
return BossResourceNotFoundError(experiment_data['coord_frame'])
except ValueError:
return BossHTTPError("Value Error.Collection id {} in post data needs to be an integer"
.format(experiment_data['collection']), ErrorCodes.TYPE_ERROR)
@transaction.atomic
def put(self, request, collection, experiment):
"""
Update a experiment using django rest framework
Args:
request: DRF Request object
collection: Collection name
experiment : Experiment name for the new experiment
Returns:
Experiment
"""
try:
# Check if the object exists
collection_obj = Collection.objects.get(name=collection)
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
if request.user.has_perm("update", experiment_obj):
serializer = ExperimentUpdateSerializer(experiment_obj, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
# update the lookup key if you update the name
if 'name' in request.data and request.data['name'] != experiment:
lookup_key = str(collection_obj.pk) + '&' + str(experiment_obj.pk)
boss_key = collection_obj.name + '&' + request.data['name']
LookUpKey.update_lookup_experiment(lookup_key, boss_key, collection_obj.name, request.data['name'])
# return the object back to the user
experiment = serializer.data['name']
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
serializer = ExperimentReadSerializer(experiment_obj)
return Response(serializer.data)
else:
return BossHTTPError("{}".format(serializer.errors), ErrorCodes.INVALID_POST_ARGUMENT)
else:
return BossPermissionError('update', experiment)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except Experiment.DoesNotExist:
return BossResourceNotFoundError(experiment)
except BossError as err:
return err.to_http()
@transaction.atomic
@check_role("resource-manager")
def delete(self, request, collection, experiment):
"""
Delete a experiment
Args:
request: DRF Request object
collection: Name of collection
experiment: Experiment name to delete
Returns:
Http status
"""
try:
collection_obj = Collection.objects.get(name=collection)
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
if request.user.has_perm("delete", experiment_obj):
# Are there channels that reference it
serializer = ExperimentReadSerializer(experiment_obj)
if len(serializer.get_channels(experiment_obj)) > 0:
# This experiment has channels that reference it and cannot be deleted
return BossHTTPError(" Experiment {} has channels that reference it and cannot be deleted."
"Please delete the channels first.".format(experiment),
ErrorCodes.INTEGRITY_ERROR)
experiment_obj.to_be_deleted = timezone.now()
experiment_obj.save()
return HttpResponse(status=204)
else:
return BossPermissionError('delete', experiment)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except Experiment.DoesNotExist:
return BossResourceNotFoundError(experiment)
except ProtectedError:
return BossHTTPError("Cannot delete {}. It has channels that reference it."
.format(experiment), ErrorCodes.INTEGRITY_ERROR)
class ChannelDetail(APIView):
"""
View to access a channel
"""
@staticmethod
def validate_source_related_channels(experiment, source_channels, related_channels):
"""
Validate that the list of source and related channels are channels that exist
Args:
experiment:
source_channels:
related_channels:
Returns:
"""
common = set(source_channels) & set(related_channels)
if len(common) > 0:
raise BossError("Related channels have to be different from source channels",
ErrorCodes.INVALID_POST_ARGUMENT)
source_channel_obj = []
related_channel_obj = []
try:
for name in source_channels:
source_channel_obj.append(Channel.objects.get(name=name, experiment=experiment))
for name in related_channels:
related_channel_obj.append(Channel.objects.get(name=name, experiment=experiment))
return (source_channel_obj,related_channel_obj)
except Channel.DoesNotExist:
raise BossError("Invalid channel names {} in the list of source/related channels channels ".format(name),
ErrorCodes.INVALID_POST_ARGUMENT)
@staticmethod
def add_source_related_channels(channel, experiment, source_channels, related_channels):
"""
Add a list of source and related channels
Args:
related_channels:
source_channels:
experiment:
channel:
Returns:
list : A list of channels id's if the list is valid
"""
try:
for source_channel in source_channels:
channel.add_source(source_channel)
for related_channel in related_channels:
channel.related.add(related_channel.pk)
channel.save()
return channel
except Exception as err:
channel.delete()
raise BossError("Exception adding source/related channels.{}".format(err), ErrorCodes.INVALID_POST_ARGUMENT)
@staticmethod
def update_source_related_channels(channel, experiment, source_channels, related_channels):
"""
Update a list of source and related channels
Args:
related_channels: New list of related channels
source_channels: New list of source channels
experiment: Experiment for the current channel
channel: Curren channel
Returns:
Updated Channel
"""
try:
# update ist of sources
# Get all the source
cur_sources = channel.sources.all()
# Get the list of sources to remove
rm_sources = [ch for ch in cur_sources if ch not in source_channels]
for source in rm_sources:
channel.remove_source(source)
# add new sources
add_sources = [ch for ch in source_channels if ch not in cur_sources]
for source_channel in add_sources:
channel.add_source(source_channel)
cur_related = channel.related.all()
rm_related = [ch for ch in cur_related if ch not in related_channels]
for related in rm_related:
channel.related.remove(related)
add_related = [ch for ch in related_channels if ch not in cur_related]
for related_channel in add_related:
channel.related.add(related_channel.pk)
channel.save()
return channel
except Exception as err:
channel.delete()
raise BossError("Exception adding source/related channels.{}".format(err), ErrorCodes.INVALID_POST_ARGUMENT)
def get(self, request, collection, experiment, channel):
"""
Retrieve information about a channel.
Args:
request: DRF Request object
collection: Collection name
experiment: Experiment name
channel: Channel name
Returns :
Channel
"""
try:
collection_obj = Collection.objects.get(name=collection)
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
channel_obj = Channel.objects.get(name=channel, experiment=experiment_obj)
# Check for permissions
if channel_obj is None:
return BossResourceNotFoundError(channel)
if channel_obj.public or request.user.has_perm("read", channel_obj):
if channel_obj.to_be_deleted is not None:
return BossHTTPError("Invalid Request. This Resource has been marked for deletion",
ErrorCodes.RESOURCE_MARKED_FOR_DELETION)
serializer = ChannelReadSerializer(channel_obj)
return Response(serializer.data)
else:
return BossPermissionError('read', channel)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except Experiment.DoesNotExist:
return BossResourceNotFoundError(experiment)
except Channel.DoesNotExist:
return BossResourceNotFoundError(channel)
except ValueError:
return BossHTTPError("Value Error in post data", ErrorCodes.TYPE_ERROR)
@transaction.atomic
@check_role("resource-manager")
def post(self, request, collection, experiment, channel):
"""
Post a new Channel
Args:
request: DRF Request object
collection: Collection name
experiment: Experiment name
channel: Channel name
Returns :
Channel
"""
channel_data = request.data.copy()
channel_data['name'] = channel
try:
is_admin = BossPermissionManager.is_in_group(request.user, ADMIN_GRP)
if 'bucket' in channel_data and channel_data['bucket'] and not is_admin:
return BossHTTPError('Only admins can set bucket name', ErrorCodes.MISSING_PERMISSION)
if 'cv_path' in channel_data and channel_data['cv_path'] and not is_admin:
return BossHTTPError('Only admins can set cv_path', ErrorCodes.MISSING_PERMISSION)
# Get the collection and experiment
collection_obj = Collection.objects.get(name=collection)
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
# Check for add permissions
if request.user.has_perm("add", experiment_obj):
channel_data['experiment'] = experiment_obj.pk
use_cloudvol = channel_data.get('storage_type', None) == Channel.StorageType.CLOUD_VOLUME
cv_path = channel_data.get('cv_path', None)
if use_cloudvol and (cv_path is None or cv_path == ''):
channel_data['cv_path'] = f'/{collection}/{experiment}/{channel}'
if use_cloudvol:
# DX NOTE: For now we assume that cloudvolume channels are downsampled. This means
# that the num_hierarchy_levels in the experiment should be limited to the available
# mip levels in the cloudvolume layer.
channel_data['downsample_status'] = 'DOWNSAMPLED'
# The source and related channels are names and need to be removed from the dict before serialization
source_channels = channel_data.pop('sources', [])
related_channels = channel_data.pop('related', [])
# TODO: Removed source channel requirement for annotation channels. Future update should allow source channel from
# different collections.
# Source channels have to be included for new annotation channels
# if 'type' in channel_data and channel_data['type'] == 'annotation' and len(source_channels) == 0:
# return BossHTTPError("Annotation channels require the source channel to be set. "
# "Specify a valid source channel in the post", ErrorCodes.INVALID_POST_ARGUMENT)
# Validate the source and related channels if they are incuded
channels = self.validate_source_related_channels(experiment_obj, source_channels, related_channels)
source_channels_objs = channels[0]
related_channels_objs = channels[1]
# Validate and create the channel
serializer = ChannelSerializer(data=channel_data)
if serializer.is_valid():
serializer.save(creator=self.request.user)
channel_obj = Channel.objects.get(name=channel_data['name'], experiment=experiment_obj)
# Save source and related channels if they are valid
channel_obj = self.add_source_related_channels(channel_obj, experiment_obj, source_channels_objs,
related_channels_objs)
# Assign permissions to the users primary group and admin group
BossPermissionManager.add_permissions_primary_group(self.request.user, channel_obj)
BossPermissionManager.add_permissions_admin_group(channel_obj)
# Add Lookup key
lookup_key = str(collection_obj.pk) + '&' + str(experiment_obj.pk) + '&' + str(channel_obj.pk)
boss_key = collection_obj.name + '&' + experiment_obj.name + '&' + channel_obj.name
LookUpKey.add_lookup(lookup_key, boss_key, collection_obj.name, experiment_obj.name,
channel_obj.name)
serializer = ChannelReadSerializer(channel_obj)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return BossHTTPError("{}".format(serializer.errors), ErrorCodes.INVALID_POST_ARGUMENT)
else:
return BossPermissionError('add', experiment)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except Experiment.DoesNotExist:
return BossResourceNotFoundError(experiment)
except Channel.DoesNotExist:
return BossResourceNotFoundError(channel)
except BossError as err:
return err.to_http()
except ValueError:
return BossHTTPError("Value Error in post data", ErrorCodes.TYPE_ERROR)
@transaction.atomic
def put(self, request, collection, experiment, channel):
"""
Update new Channel
Args:
request: DRF Request object
collection: Collection name
experiment: Experiment name
channel: Channel name
Returns :
Channel
"""
if 'name' in request.data:
channel_name = request.data['name']
else:
channel_name = channel
try:
# Check if the object exists
collection_obj = Collection.objects.get(name=collection)
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
channel_obj = Channel.objects.get(name=channel, experiment=experiment_obj)
if request.user.has_perm("update", channel_obj):
data = copy.deepcopy(request.data)
is_admin = BossPermissionManager.is_in_group(request.user, ADMIN_GRP)
if 'storage_type' in data and not is_admin:
return BossHTTPError('Only admins can change storage_type after creation',
ErrorCodes.MISSING_PERMISSION)
if 'bucket' in data and data['bucket'] and not is_admin:
return BossHTTPError('Only admins can set bucket name', ErrorCodes.MISSING_PERMISSION)
if 'cv_path' in data and data['cv_path'] and not is_admin:
return BossHTTPError('Only admins can set cv_path', ErrorCodes.MISSING_PERMISSION)
# The source and related channels are names and need to be removed from the dict before serialization
source_channels = data.pop('sources', [])
related_channels = data.pop('related', [])
# Validate the source and related channels if they are incuded
channels = self.validate_source_related_channels(experiment_obj, source_channels, related_channels)
source_channels_objs = channels[0]
related_channels_objs = channels[1]
serializer = ChannelUpdateSerializer(channel_obj, data=data, partial=True)
if serializer.is_valid():
serializer.save()
channel_obj = Channel.objects.get(name=channel_name, experiment=experiment_obj)
# Save source and related channels if they are valid
channel_obj = self.update_source_related_channels(channel_obj, experiment_obj, source_channels_objs,
related_channels_objs)
# update the lookup key if you update the name
if 'name' in data and data['name'] != channel:
lookup_key = str(collection_obj.pk) + '&' + str(experiment_obj.pk) + '&' \
+ str(channel_obj.pk)
boss_key = collection_obj.name + '&' + experiment_obj.name + '&' + data['name']
LookUpKey.update_lookup(lookup_key, boss_key, collection_obj.name, experiment_obj.name,
data['name'])
# return the object back to the user
channel = serializer.data['name']
channel_obj = Channel.objects.get(name=channel, experiment=experiment_obj)
serializer = ChannelReadSerializer(channel_obj)
return Response(serializer.data)
else:
return BossHTTPError("{}".format(serializer.errors), ErrorCodes.INVALID_POST_ARGUMENT)
else:
return BossPermissionError('update', channel)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except Experiment.DoesNotExist:
return BossResourceNotFoundError(experiment)
except Channel.DoesNotExist:
return BossResourceNotFoundError(channel)
@transaction.atomic
@check_role("resource-manager")
def delete(self, request, collection, experiment, channel):
"""
Delete a Channel
Args:
request: DRF Request object
collection: Collection name
experiment: Experiment name
channel: Channel name
Returns :
Http status
"""
try:
collection_obj = Collection.objects.get(name=collection)
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
channel_obj = Channel.objects.get(name=channel, experiment=experiment_obj)
if request.user.has_perm("delete", channel_obj):
# The channel cannot be deleted if this is the source of any other channels
derived_channels = channel_obj.get_derived()
if len(derived_channels) > 0:
return BossHTTPError("Channel {} is the source channel of other channels and cannot be deleted"
.format(channel), ErrorCodes.INTEGRITY_ERROR)
channel_obj.to_be_deleted = timezone.now()
channel_obj.save()
return HttpResponse(status=204)
else:
return BossPermissionError('delete', channel)
except Collection.DoesNotExist:
return BossResourceNotFoundError(collection)
except Experiment.DoesNotExist:
return BossResourceNotFoundError(experiment)
except Channel.DoesNotExist:
return BossResourceNotFoundError(channel)
except ProtectedError:
return BossHTTPError("Cannot delete {}. It has channels that reference it.".format(channel),
ErrorCodes.INTEGRITY_ERROR)
class CollectionList(generics.ListAPIView):
"""
List all collections or create a new collection
"""
# Cache the public collections.
queryset = Collection.objects.filter(public=True)
serializer_class = CollectionSerializer
def list(self, request, *args, **kwargs):
"""
Display only objects that a user has access to
Args:
request: DRF request
*args:
**kwargs:
Returns: Collections that user has view permissions on
"""
collections = get_objects_for_user(request.user, 'read', klass=Collection).exclude(to_be_deleted__isnull=False)
all_colls = collections.union(self.get_queryset())
data = {"collections": [collection.name for collection in all_colls]}
return Response(data)
class ExperimentList(generics.ListAPIView):
"""
List all experiments
"""
queryset = Experiment.objects.all()
serializer_class = ExperimentSerializer
def list(self, request, collection, *args, **kwargs):
"""
return experiments for the collection that the user has permissions for
Args:
request: DRF request
collection : Collection name
*args:
**kwargs:
Returns: Experiments that user has view permissions on and are not marked for deletion
"""
collection_obj = Collection.objects.get(name=collection)
experiments = self.get_queryset().filter(collection=collection_obj).exclude(to_be_deleted__isnull=False)
data = {
"experiments": [
exp.name for exp in experiments
if exp.public == True or request.user.has_perm('read', exp)
]
}
return Response(data)
class ChannelList(generics.ListAPIView):
"""
List all channels
"""
queryset = Channel.objects.all()
serializer_class = ChannelSerializer
def list(self, request, collection, experiment, *args, **kwargs):
"""
Display only objects that a user has access to
Args:
request: DRF request
collection: Collection Name
experiment: Experiment Name
*args:
**kwargs:
Returns: Channel that user has view permissions on
"""
collection_obj = Collection.objects.get(name=collection)
experiment_obj = Experiment.objects.get(name=experiment, collection=collection_obj)
channels = self.get_queryset().filter(experiment=experiment_obj).exclude(to_be_deleted__isnull=False)
data = {
"channels": [
channel.name for channel in channels
if channel.public == True or request.user.has_perm('read', channel)
]
}
return Response(data)
class CoordinateFrameList(generics.ListCreateAPIView):
"""
List all coordinate frames
"""
queryset = CoordinateFrame.objects.all()
serializer_class = CoordinateFrameSerializer
def list(self, request, *args, **kwargs):
"""
Display only objects that a user has access to
Args:
request: DRF request
*args:
**kwargs:
Returns: Coordinate frames that user has view permissions on
"""
# Note: the line below returns all coordinate frames that the user has read permissions on
#coords = get_objects_for_user(request.user, 'read', klass=CoordinateFrame).exclude(to_be_deleted__isnull=False)
if 'owner' in request.query_params:
owner_flag = request.query_params.get('owner', "False")
else:
owner_flag = "False"
if str.capitalize(owner_flag) == "True":
coords = CoordinateFrame.objects.filter(creator=request.user).exclude(to_be_deleted__isnull=False)
else:
coords = CoordinateFrame.objects.all().exclude(to_be_deleted__isnull=False)
data = {"coords": [coord.name for coord in coords]}
return Response(data)
```
#### File: bossingest/test/setup.py
```python
from pkg_resources import resource_filename
import json
from django.contrib.auth.models import User
from bossutils.aws import get_region
import boto3
from moto import mock_sqs
from moto import mock_sqs
import time
from bossingest.models import IngestJob
class SetupTests(object):
""" Class to handle setting up tests, including support for mocking
"""
def __init__(self):
self.mock_sqs = None
def start_mocking(self):
"""Method to start mocking"""
self.mock = True
self.mock_sqs = mock_sqs()
self.mock_sqs.start()
def stop_mocking(self):
"""Method to stop mocking"""
self.mock_sqs.stop()
# ***** Upload Task SQS Queue *****
def _create_upload_queue(self, queue_name):
"""Method to create a test sqs for uploading tiles for the ingest"""
client = boto3.client('sqs', region_name=get_region())
response = client.create_queue(QueueName=queue_name)
url = response['QueueUrl']
return url
def create_upload_queue(self, queue_name):
"""Method to create a test sqs for uploading tiles for the ingest"""
if self.mock:
url = mock_sqs(self._create_upload_queue(queue_name))
else:
url = self._create_upload_queue(queue_name)
time.sleep(30)
return url
def _delete_upload_queue(self, queue_url):
"""Method to delete a test sqs for uploading tiles for the ingest"""
client = boto3.client('sqs', region_name=get_region())
client.delete_queue(QueueUrl=queue_url)
def delete_upload_queue(self, queue_name):
"""Method to delete a test sqs for uploading tiles for the ingest"""
if self.mock:
mock_sqs(self._delete_upload_queue(queue_name))
else:
self._delete_upload_queue(queue_name)
# ***** END Flush SQS Queue *****
def get_ingest_config_data_dict(self):
"""Method to get the config dictionary ingest job"""
data = {}
data['schema'] = {}
data['schema']['name'] = "boss-v0.1-schema"
data['schema']['validator'] = "BossValidatorV01"
data['client'] = {}
data['client']['backend'] = {}
data['client']['backend']['name'] = "boss"
data['client']['backend']['class'] = "BossBackend"
data['client']['backend']['host'] = "api.theboss.io"
data['client']['backend']['protocol'] = "https"
data['client']['path_processor'] = {}
data['client']['path_processor']['class'] = "ingest.plugins.multipage_tiff.SingleTimeTiffPathProcessor"
data['client']['path_processor']['params'] = {}
data['client']['tile_processor'] = {}
data['client']['tile_processor']['class'] = "ingest.plugins.multipage_tiff.SingleTimeTiffTileProcessor"
data['client']['tile_processor']['params'] = {}
data['database'] = {}
data['database']['collection'] = "my_col_1"
data['database']['experiment'] = "my_exp_1"
data['database']['channel'] = "my_ch_1"
data['ingest_job'] = {}
data['ingest_job']['resolution'] = 0
data['ingest_job']['extent'] = {}
data['ingest_job']['extent']['x'] = [0, 2048]
data['ingest_job']['extent']['y'] = [0, 2048]
data['ingest_job']['extent']['z'] = [0, 40]
data['ingest_job']['extent']['t'] = [0, 1]
data['ingest_job']['tile_size'] = {}
data['ingest_job']['tile_size']['x'] = 512
data['ingest_job']['tile_size']['y'] = 512
data['ingest_job']['tile_size']['z'] = 1
data['ingest_job']['tile_size']['t'] = 1
return data
def get_ingest_config_data_dict_volumetric(self):
"""Method to get the config dictionary ingest job"""
data = {}
data['schema'] = {}
data['schema']['name'] = "boss-v0.2-schema"
data['schema']['validator'] = "BossValidatorV02"
data['client'] = {}
data['client']['backend'] = {}
data['client']['backend']['name'] = "boss"
data['client']['backend']['class'] = "BossBackend"
data['client']['backend']['host'] = "api.theboss.io"
data['client']['backend']['protocol'] = "https"
data['client']['path_processor'] = {}
data['client']['path_processor']['class'] = "ingest.plugins.multipage_tiff.SingleTimeTiffPathProcessor"
data['client']['path_processor']['params'] = {}
# This isn't a proper chunk processor but fine for testing purposes.
data['client']['chunk_processor'] = {}
data['client']['chunk_processor']['class'] = "ingest.plugins.multipage_tiff.SingleTimeTiffTileProcessor"
data['client']['chunk_processor']['params'] = {}
data['database'] = {}
data['database']['collection'] = "my_col_1"
data['database']['experiment'] = "my_exp_1"
data['database']['channel'] = "my_ch_1"
data['ingest_job'] = {}
data['ingest_job']['resolution'] = 0
data['ingest_job']['extent'] = {}
data['ingest_job']['extent']['x'] = [0, 4096]
data['ingest_job']['extent']['y'] = [0, 4096]
data['ingest_job']['extent']['z'] = [0, 64]
data['ingest_job']['extent']['t'] = [0, 1]
data['ingest_job']['chunk_size'] = {}
data['ingest_job']['chunk_size']['x'] = 1024
data['ingest_job']['chunk_size']['y'] = 1024
data['ingest_job']['chunk_size']['z'] = 64
data['ingest_job']['ingest_type'] = 'volumetric'
return data
def create_ingest_job(self, creator = None):
config_data = self.get_ingest_config_data_dict()
# create the django model for the job
if creator is None:
user = User.objects.get(pk=1)
else:
user = creator
ingest_job_data = {
'creator': user,
'collection': config_data["database"]["collection"],
'experiment': config_data["database"]["experiment"],
'channel': config_data["database"]["channel"],
'resolution': 0,
'config_data': config_data,
'x_start': config_data["ingest_job"]["extent"]["x"][0],
'x_stop': config_data["ingest_job"]["extent"]["x"][1],
'y_start': config_data["ingest_job"]["extent"]["y"][0],
'y_stop': config_data["ingest_job"]["extent"]["y"][1],
'z_start': config_data["ingest_job"]["extent"]["z"][0],
'z_stop': config_data["ingest_job"]["extent"]["z"][1],
't_start': config_data["ingest_job"]["extent"]["t"][0],
't_stop': config_data["ingest_job"]["extent"]["t"][1],
'tile_size_x': config_data["ingest_job"]["tile_size"]["x"],
'tile_size_y': config_data["ingest_job"]["tile_size"]["y"],
'tile_size_z': config_data["ingest_job"]["tile_size"]["z"],
'tile_size_t': config_data["ingest_job"]["tile_size"]["t"],
}
job = IngestJob.objects.create(**ingest_job_data)
job.save()
return job
```
#### File: bossingest/test/test_ingest_manager.py
```python
import json
from unittest.mock import patch, MagicMock
from bossingest.ingest_manager import IngestManager
from bossingest.models import IngestJob
from bossingest.test.setup import SetupTests
from bosscore.test.setup_db import SetupTestDB
from bosscore.error import ErrorCodes
from bosscore.lookup import LookUpKey
import bossutils.aws
from django.contrib.auth.models import User
from ndingest.ndqueue.uploadqueue import UploadQueue
from rest_framework.test import APITestCase
class BossIngestManagerTest(APITestCase):
def setUp(self):
"""
Initialize the database
:return:
"""
dbsetup = SetupTestDB()
self.user = dbsetup.create_super_user(username='testuser', email='<EMAIL>', password='<PASSWORD>')
dbsetup.set_user(self.user)
self.client.force_login(self.user)
dbsetup.insert_ingest_test_data()
setup = SetupTests()
# Get the config_data for v1 schema
config_data = setup.get_ingest_config_data_dict()
self.example_config_data = config_data
self.volumetric_config_data = setup.get_ingest_config_data_dict_volumetric()
# Unit under test.
self.ingest_mgr = IngestManager()
def test_validate_ingest(self):
"""Method to test validation method"""
#Validate schema and config file
response = self.ingest_mgr.validate_config_file(self.example_config_data)
assert (response is True)
#Validate properties
response = self.ingest_mgr.validate_properties()
assert (response is True)
def test_validate_config_file(self):
"""Method to test validation of a config file"""
self.ingest_mgr.validate_config_file(self.example_config_data)
assert(self.ingest_mgr.config is not None)
assert (self.ingest_mgr.config.config_data is not None)
def test_validate_properties(self):
"""Methos to test validation of properties of the config data"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
assert (self.ingest_mgr.collection.name == 'my_col_1')
assert (self.ingest_mgr.experiment.name == 'my_exp_1')
assert (self.ingest_mgr.channel.name == 'my_ch_1')
def test_create_ingest_job(self):
"""Method to test creation of a ingest job from a config_data dict"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
assert (job.id is not None)
assert (job.ingest_type == IngestJob.TILE_INGEST)
assert (job.tile_size_x == 512)
assert (job.tile_size_y == 512)
assert (job.tile_size_z == 1)
assert (job.tile_size_t == 1)
def test_create_ingest_job_volumetric(self):
self.ingest_mgr.validate_config_file(self.volumetric_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
assert (job.id is not None)
assert (job.ingest_type == IngestJob.VOLUMETRIC_INGEST)
assert (job.tile_size_x == 1024)
assert (job.tile_size_y == 1024)
assert (job.tile_size_z == 64)
assert (job.tile_size_t == 1)
def test_generate_upload_queue_args_tile_job(self):
"""Ensure ingest_type set properly"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr._generate_upload_queue_args(job)
assert actual['ingest_type'] == IngestJob.TILE_INGEST
assert actual['z_chunk_size'] == 16
def test_generate_upload_queue_args_volumetric_job(self):
"""Ensure ingest_type set properly"""
self.ingest_mgr.validate_config_file(self.volumetric_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr._generate_upload_queue_args(job)
assert actual['ingest_type'] == IngestJob.VOLUMETRIC_INGEST
assert actual['z_chunk_size'] == 64
assert actual['ingest_queue'] is None
def test_tile_bucket_name(self):
""" Test get tile bucket name"""
tile_bucket_name = self.ingest_mgr.get_tile_bucket()
assert(tile_bucket_name is not None)
def test_get_resource_data(self):
"""Run the method and ensure keys set"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr.get_resource_data(job.id)
self.assertIn('boss_key', actual)
self.assertIn('lookup_key', actual)
self.assertIn('channel', actual)
self.assertIn('experiment', actual)
self.assertIn('coord_frame', actual)
```
#### File: django/bossingest/utils.py
```python
import boto3
def get_sqs_num_msgs(url, region):
"""
Get the approximate number of messages in the sqs queue.
Args:
url (str): The URL of the SQS queue.
region (str): AWS region the queue lives in.
Returns:
(int): Approximate number of messages in the queue.
"""
sqs = boto3.client('sqs', region)
resp = sqs.get_queue_attributes(QueueUrl=url, AttributeNames=['ApproximateNumberOfMessages'])
return int(resp['Attributes']['ApproximateNumberOfMessages'])
```
#### File: bossspatialdb/test/int_test_to_black_view.py
```python
from django.conf import settings
from rest_framework.test import APITestCase, APIRequestFactory
from rest_framework.test import force_authenticate
from rest_framework import status
import numpy as np
from bossspatialdb.views import CutoutToBlack, Cutout
from bosscore.test.setup_db import DjangoSetupLayer
import redis
import blosc
version = settings.BOSS_VERSION
class CutoutToBlackView8BitTests(APITestCase):
layer = DjangoSetupLayer
def test_uint8_cutout_to_black(self):
"""Cutout_to_black integration test with uint8 data"""
test_mat = np.random.randint(1, 254, (16, 128, 128))
test_mat = test_mat.astype(np.uint8)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=8)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel1/0/0:128/0:128/0:16/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel1',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
factory = APIRequestFactory()
request = factory.put('/' + version + '/cutout/to_black/col1/exp1/channel1/0/0:64/0:64/0:16/',
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Delete a portion of Data
response = CutoutToBlack.as_view()(request, collection='col1', experiment='exp1', channel='channel1',
resolution='0', x_range='0:64', y_range='0:64', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Create Request to get original posted data
request = factory.get('/' + version + '/cutout/col1/exp1/channel1/0/0:128/0:128/0:16/',
accepts='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel1',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
raw_data = blosc.decompress(response.content)
data_mat = np.fromstring(raw_data, dtype=np.uint8)
data_mat = np.reshape(data_mat, (16, 128, 128), order='C')
# Test for data equality (what you put in is what you got back!)
test_mat[:,0:64, 0:64] = np.zeros((16,64,64), dtype=np.uint8)
np.testing.assert_array_equal(data_mat, test_mat)
def setUp(self):
""" Copy params from the Layer setUpClass
"""
# Setup config
self.kvio_config = self.layer.kvio_config
self.state_config = self.layer.state_config
self.object_store_config = self.layer.object_store_config
self.user = self.layer.user
# Log Django User in
self.client.force_login(self.user)
# Flush cache between tests
client = redis.StrictRedis(host=self.kvio_config['cache_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
client = redis.StrictRedis(host=self.state_config['cache_state_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
self.dbsetup = self.layer.django_setup_helper
def tearDown(self):
# Flush cache between tests
client = redis.StrictRedis(host=self.kvio_config['cache_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
client = redis.StrictRedis(host=self.state_config['cache_state_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
self.layer.clear_flush_queue()
class CutoutToBlackView16BitTests(APITestCase):
layer = DjangoSetupLayer
def test_uint16_cutout_to_black(self):
"""Cutout_to_black integration test with uint16 data"""
test_mat = np.random.randint(1, 254, (16, 128, 128))
test_mat = test_mat.astype(np.uint16)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=16)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/0:128/0:128/0:16/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
factory = APIRequestFactory()
request = factory.put('/' + version + '/cutout/to_black/col1/exp1/channel2/0/0:64/0:64/0:16/',
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Delete a portion of Data
response = CutoutToBlack.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:64', y_range='0:64', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Create Request to get original posted data
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/0:128/0:128/0:16/',
accepts='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
raw_data = blosc.decompress(response.content)
data_mat = np.fromstring(raw_data, dtype=np.uint16)
data_mat = np.reshape(data_mat, (16, 128, 128), order='C')
# Test for data equality (what you put in is what you got back!)
test_mat[:,0:64, 0:64] = np.zeros((16,64,64), dtype=np.uint16)
np.testing.assert_array_equal(data_mat, test_mat)
def setUp(self):
""" Copy params from the Layer setUpClass
"""
# Setup config
self.kvio_config = self.layer.kvio_config
self.state_config = self.layer.state_config
self.object_store_config = self.layer.object_store_config
self.user = self.layer.user
# Log Django User in
self.client.force_login(self.user)
# Flush cache between tests
client = redis.StrictRedis(host=self.kvio_config['cache_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
client = redis.StrictRedis(host=self.state_config['cache_state_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
self.dbsetup = self.layer.django_setup_helper
def tearDown(self):
# Flush cache between tests
client = redis.StrictRedis(host=self.kvio_config['cache_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
client = redis.StrictRedis(host=self.state_config['cache_state_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
self.layer.clear_flush_queue()
class CutoutToBlackView64BitTests(APITestCase):
layer = DjangoSetupLayer
def test_uint64_cutout_to_black(self):
"""Cutout_to_black integration test with uint64 data"""
test_mat = np.random.randint(1, 254, (16, 128, 128))
test_mat = test_mat.astype(np.uint64)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=64)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/layer1/0/0:128/0:128/0:16/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='layer1',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
factory = APIRequestFactory()
request = factory.put('/' + version + '/cutout/to_black/col1/exp1/layer1/0/0:64/0:64/0:16/',
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Delete a portion of Data
response = CutoutToBlack.as_view()(request, collection='col1', experiment='exp1', channel='layer1',
resolution='0', x_range='0:64', y_range='0:64', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Create Request to get original posted data
request = factory.get('/' + version + '/cutout/col1/exp1/layer1/0/0:128/0:128/0:16/',
accepts='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='layer1',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
raw_data = blosc.decompress(response.content)
data_mat = np.fromstring(raw_data, dtype=np.uint64)
data_mat = np.reshape(data_mat, (16, 128, 128), order='C')
# Test for data equality (what you put in is what you got back!)
test_mat[:,0:64, 0:64] = np.zeros((16,64,64), dtype=np.uint64)
np.testing.assert_array_equal(data_mat, test_mat)
def setUp(self):
""" Copy params from the Layer setUpClass
"""
# Setup config
self.kvio_config = self.layer.kvio_config
self.state_config = self.layer.state_config
self.object_store_config = self.layer.object_store_config
self.user = self.layer.user
# Log Django User in
self.client.force_login(self.user)
# Flush cache between tests
client = redis.StrictRedis(host=self.kvio_config['cache_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
client = redis.StrictRedis(host=self.state_config['cache_state_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
self.dbsetup = self.layer.django_setup_helper
def tearDown(self):
# Flush cache between tests
client = redis.StrictRedis(host=self.kvio_config['cache_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
client = redis.StrictRedis(host=self.state_config['cache_state_host'],
port=6379, db=1, decode_responses=False)
client.flushdb()
self.layer.clear_flush_queue()
```
#### File: django/boss/views.py
```python
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import permissions
from django.contrib.auth.mixins import LoginRequiredMixin
from bosscore.error import BossHTTPError, ErrorCodes
from django.conf import settings
from bossutils.logger import bossLogger
import socket
version = settings.BOSS_VERSION
class Ping(APIView):
"""
View to provide a basic health/connectivity check
No Auth Required
"""
authentication_classes = ()
permission_classes = (permissions.AllowAny,)
renderer_classes = (JSONRenderer, )
def get(self, request):
"""
Return the server IP
:param request: DRF Request object
:type request: rest_framework.request.Request
:return:
"""
content = {'ip': socket.gethostbyname(socket.gethostname())}
return Response(content)
class Unsupported(APIView):
"""
View to handle unsupported API versions
No Auth Required
"""
authentication_classes = ()
permission_classes = (permissions.AllowAny,)
renderer_classes = (JSONRenderer, )
def get(self, request):
"""
Return the unsupported error
:param request: DRF Request object
:type request: rest_framework.request.Request
:return:
"""
return BossHTTPError(" This API version is unsupported. Update to version {}".format(version),
ErrorCodes.UNSUPPORTED_VERSION)
def post(self, request):
"""
Return the unsupported error
:param request: DRF Request object
:type request: rest_framework.request.Request
:return:
"""
return BossHTTPError(" This API version is unsupported. Update to version {}".format(version),
ErrorCodes.UNSUPPORTED_VERSION)
def delete(self, request):
"""
Return the unsupported error
:param request: DRF Request object
:type request: rest_framework.request.Request
:return:
"""
return BossHTTPError(" This API version is unsupported. Update to version {}".format(version),
ErrorCodes.UNSUPPORTED_VERSION)
def put(self, request):
"""
Return the unsupported error
:param request: DRF Request object
:type request: rest_framework.request.Request
:return:
"""
return BossHTTPError(" This API version is unsupported. Update to version {}".format(version),
ErrorCodes.UNSUPPORTED_VERSION)
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import View
# import as to deconflict with our Token class
from rest_framework.authtoken.models import Token as TokenModel
class Token(LoginRequiredMixin, View):
def get(self, request):
action = request.GET.get('action', None)
try:
token = TokenModel.objects.get(user = request.user)
if action == "Revoke":
token.delete()
token = None
except:
if action == "Generate":
token = TokenModel.objects.create(user = request.user)
else:
token = None
if token is None:
content = ""
button = "Generate"
else:
content = "<textarea>{}</textarea>".format(token)
button = "Revoke"
html = """
<html>
<head><title>BOSS Token Management</title></head>
<body>
{1}
<a href="{0}?action={2}">{2}</a>
</body>
</html>
""".format(request.path_info, content, button)
return HttpResponse(html)
from boss.throttling import MetricDatabase
from bosscore.constants import ADMIN_USER
from django.contrib.auth.models import User
from bosscore.models import ThrottleMetric, ThrottleThreshold, ThrottleUsage
class Metric(LoginRequiredMixin, APIView):
"""
View to handle Metric API requests
Auth is Required
"""
renderer_classes = (JSONRenderer, )
def __init__(self):
"""
Initialize the view with RedisMetrics object
"""
self.blog = bossLogger()
self.metricdb = MetricDatabase()
def _get_admin_user(self):
"""
Lookup the admin user
Returns: the User object for the Admin user
"""
return User.objects.get(username=ADMIN_USER)
def put(self, request):
"""
Handle PUT requests
"""
user = request.GET.get('user',str(request.user))
if not request.user == self._get_admin_user():
user = request.GET.get('user',str(request.user))
return BossHTTPError(" User {} is not authorized ".format(user),
ErrorCodes.ACCESS_DENIED_UNKNOWN)
paths = request.path_info.split("/")
if paths[-1] == 'metrics':
self.metricdb.updateMetrics(request.data)
if paths[-1] == 'thresholds':
self.metricdb.updateThresholds(request.data)
return HttpResponse(status=201)
def get(self, request):
"""
Handles the get request for metrics
"""
paths = request.path_info.split("/")
metric = request.GET.get('metric')
user = request.GET.get('user',str(request.user))
userIsAdmin = request.user == self._get_admin_user()
# determine response
if paths[-1] == 'thresholds':
if userIsAdmin:
return Response(self.metricdb.getThresholdsAsJson())
if paths[-1] == 'metrics':
if userIsAdmin:
return Response(self.metricdb.getMetricsAsJson())
if paths[-1] == 'usage':
if userIsAdmin:
return Response(self.metricdb.getUsageAsJson())
# show specific metric values
if not metric:
metric = self.metricdb.encodeMetric(MetricDatabase.USER_LEVEL_METRIC, user)
level,name = self.metricdb.decodeMetric(metric)
usersUsage = name == user
# make sure only admin user can see other metrics
if usersUsage or userIsAdmin:
return Response(self.metricdb.getUsageAsJson(metric))
return BossHTTPError(" User {} is not authorized ".format(user),
ErrorCodes.ACCESS_DENIED_UNKNOWN)
``` |
{
"source": "jhuapl-boss/boss-manage",
"score": 2
} |
#### File: boss-manage/bin/boss_rds.py
```python
import argparse
import os
import logging
import alter_path
from lib import boss_rds
from lib import configuration
COMMANDS = {
"sql-tables":boss_rds.sql_tables,
"sql-list": boss_rds.sql_list,
"sql-resource-lookup": boss_rds.sql_resource_lookup_key,
"sql-coord-frame-lookup": boss_rds.sql_coordinate_frame_lookup_key,
"sql-job-ids-lookup": boss_rds.sql_channel_job_ids,
"sql-get-names-from-lookup-keys": boss_rds.sql_get_names_from_lookup_keys,
}
HELP = {
"sql-tables",
"sql-list",
"sql-resource-lookup <coll/exp/chan> | <coll/exp> | <coll>",
"sql-coord-frame-lookup <coordinate_frame>",
"sql-job-ids-lookup <coll/exp/channel>",
'sql-get-names-from-lookup-keys "col1&exp1&chan1" . . . "coln&expn&chann"',
}
if __name__ == '__main__':
os.chdir(os.path.abspath(os.path.dirname(__file__)))
def create_help(header, options):
"""Create formated help."""
return "\n" + header + "\n" + \
"\n".join(map(lambda x: " " + x, options)) + "\n"
commands = list(COMMANDS.keys())
instructions = list(HELP)
commands_help = create_help("command supports the following:", instructions)
parser = configuration.BossParser(description = "Script for manipulating the endpoint's RDS instance",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=commands_help)
parser.add_argument("--quiet", "-q",
action='store_true',
default=False,
help='Run the script quietly, no print statements will be displayed.')
parser.add_bosslet()
parser.add_argument("command",
choices = commands,
metavar = "command",
help = "Command to execute")
parser.add_argument("arguments",
nargs = "*",
help = "Arguments to pass to the command")
args = parser.parse_args()
# Configure logging if verbose
if not args.quiet:
logging.basicConfig(level=logging.INFO)
if args.command in COMMANDS:
if args.command == "sql-get-names-from-lookup-keys":
# This command needs its args as a list.
COMMANDS[args.command](args.bosslet_config, args.arguments)
else:
COMMANDS[args.command](args.bosslet_config, *args.arguments)
else:
parser.print_usage()
parser.exit(1)
```
#### File: boss-manage/bin/iam_utils.py
```python
import argparse
import sys
import os
import pprint
import datetime
import json
from boto3 import Session
from botocore.exceptions import ClientError
import alter_path
from lib import aws
from lib import utils
from lib import constants as const
from lib.boto_wrapper import IamWrapper
from lib import configuration
from lib import console
REGION_STANDIN = '==region=='
ACCOUNT_STANDIN = '==account=='
IAM_CONFIG_DIR = const.repo_path("config", "iam")
DEFAULT_POLICY_FILE = os.path.join(IAM_CONFIG_DIR, "policies.json")
DEFAULT_GROUP_FILE = os.path.join(IAM_CONFIG_DIR, "groups.json")
DEFAULT_ROLES_FILE = os.path.join(IAM_CONFIG_DIR, "roles.json")
DEFAULT_NAMES_FILE = os.path.join(IAM_CONFIG_DIR, "names.json")
DEFAULT_REMOVED_FILE = os.path.join(IAM_CONFIG_DIR, "removed.json")
class AllResources(object):
"""Class that always returns True for any `obj in AllResources()`"""
def __contains__(self, value):
return True
def json_dumps(obj):
"""A wrapper to `json.dumps` that provides the common arguments used
throughout the code
Args:
obj (object): Object to convert to JSON
Returns:
str: String containing formatted JSON
"""
def sort(o):
if isinstance(o, list):
if len(o) > 0 and isinstance(o[0], str):
o.sort()
else:
for i in o:
sort(i)
elif isinstance(o, dict):
for k,v in o.items():
sort(v)
return o
return json.dumps(sort(obj), indent=2, sort_keys=True)
def pformat_truncate(o, width=50):
"""Convert the object to printable representation and truncate it if too long
Note: If the object is a multiline string or pformat returns a multiline
string, only the first line of the string will be returned
Args:
o (object): Object to be displayed
width (int): Maximum length for the resulting string
Returns:
str: String containing the formatted object
"""
s = pprint.pformat(o)
split = '\n' in s
if split:
s = s.splitlines()[0]
if len(s) > width:
s = s[:width-3] + '...'
elif split:
# If the line wasn't truncated but was the first line
# in a multi-line string then add a marker so we know
# that the result was modified
s = s + '...'
return s
class DryRunWrapper(object):
"""Wrapper around a Boto3 IAM client object that will print calls to specific
functions instead of executing them. If the requested function is not on the
blacklist then it will be allowed to execute
"""
def __init__(self, to_wrap):
"""Args:
to_wrap (Client): Boto3 IAM Client to wrap
"""
self.to_wrap = to_wrap
self.prefix = 'boto3.client("iam").'
self.functions = [
# Groups
'create_group',
'put_group_policy',
'attach_group_policy',
'delete_group_policy',
'detach_group_policy',
'add_role_to_instance_profile',
'create_instance_profile',
'remove_role_from_instance_profile',
'delete_instance_profile',
'delete_group',
# Roles
'create_role',
'update_assume_role_policy',
'put_role_policy',
'attach_role_policy',
'delete_role_policy',
'detach_role_policy',
'delete_role',
# Policies
'create_policy',
'delete_policy_version',
'create_policy_version',
'detach_user_policy',
'delete_policy',
]
# ???: Redirect any function starting with create/put/attach/delete/detach/add/remove/update
def __getattr__(self, function):
if function not in self.functions:
return getattr(self.to_wrap, function)
def call(*args, **kwargs):
"""Standin for the requested function that will print the function and arguments"""
args_kwargs = ", ".join([*[pformat_truncate(arg) for arg in args],
*["{} = {}".format(k, pformat_truncate(v))
for k, v in kwargs.items()]])
console.debug("{}{}({})".format(self.prefix, function, args_kwargs))
return call
class IamUtils(object):
"""Object for exporting or importing IAM groups, roles, and policies"""
def __init__(self, bosslet_config, dry_run=False):
"""
Args:
bosslet_config (BossConfiguration): Bosslet to export from or import to
"""
self.bosslet_config = bosslet_config
self.session = bosslet_config.session
self.resource = self.session.resource('iam')
self.client = self.session.client('iam')
if dry_run:
self.client = DryRunWrapper(self.client)
self.iw = IamWrapper(self.client)
self._policy_lookup = None
######################################################
## Generic functions the are resource type agnostic ##
######################################################
def to_generic(self, data):
"""Replace region and account references with standin values"""
# str(ACCOUNT_ID) used as the value could be an integer
data = data.replace(self.bosslet_config.REGION, REGION_STANDIN)
data = data.replace(str(self.bosslet_config.ACCOUNT_ID), ACCOUNT_STANDIN)
return data
def from_generic(self, data):
"""Replace standin values with the bosslet's region and account reference"""
# str(ACCOUNT_ID) used as the value could be an integer
data = data.replace(REGION_STANDIN, self.bosslet_config.REGION)
data = data.replace(ACCOUNT_STANDIN, str(self.bosslet_config.ACCOUNT_ID))
return data
def export(self, resource_type, filename, filters=[]):
"""Export the requested groups/roles/policies from IAM and save to the given file
Args:
resource_type (str): One of - groups, roles, policies
filename (str): Name of the file to save the results to
filters (list[str]): List of group/role/policy names that should be exported
"""
objects = self.load_from_aws(resource_type, filters)
self.save(filename, objects)
def save(self, filename, objects):
"""Save the given IAM objects to disk
Note: serialized objects will have bosslet specific values removed
Args:
filename (str): Name of the file to save the results to
objects (object): Objects to be serialized, converted, and saved
"""
with open(filename, 'w') as fh:
data = json_dumps(objects)
data = self.to_generic(data) # Replace stack specific values
fh.write(data)
def load(self, resource_type, filename, filters=[]):
"""Load previously exported data from disk
Note: Loaded data is compaired against the filter list and any filtered
out items will produce a warning message
Args:
resource_type (str): One of - groups, roles, policies
filename (str): Name of the file with data to load
filters (list[str]): List of group/role/policy names that should be loaded
Returns:
list[objects]: List of filtered objects
"""
with open(filename, 'r') as fh:
data = fh.read()
data = self.from_generic(data) # Replace generic values
data = json.loads(data)
key = {
'groups': 'GroupName',
'roles': 'RoleName',
'policies': 'PolicyName',
}[resource_type]
# Verify that the loaded data is valid
objects = []
for item in data:
if item[key] not in filters:
fmt = "{} {} not in whitelist, not importing"
console.warning(fmt.format(resource_type, item[key]))
else:
objects.append(item)
return objects
def load_from_aws(self, resource_type, names=[]):
"""Load the current IAM resources
The IAM results are convereted to the internal representation used and
filtered to only include the requested resources
Args:
resource_type (str): One of - groups, roles, policies
names (list[str]): List of group/role/policy names that should be loaded
Returns:
list[objects]: List of converted and filtered objects
"""
if resource_type == 'groups':
filter = ['Group']
list_key = 'GroupDetailList'
name_key = 'GroupName'
elif resource_type == 'roles':
filter = ['Role']
list_key = 'RoleDetailList'
name_key ='RoleName'
elif resource_type == 'policies':
filter = ['LocalManagedPolicy']
list_key = 'Policies'
name_key ='PolicyName'
else:
raise ValueError("resource_type '{}' is not supported".format(resource_type))
resources = []
kwargs = { 'MaxItems': 1000, 'Filter': filter }
resp = {'IsTruncated': True}
while resp['IsTruncated']:
resp = self.client.get_account_authorization_details(**kwargs)
kwargs['Marker'] = resp.get('Marker')
resources.extend([self.extract(resource_type, item)
for item in resp[list_key]
if item[name_key] in names])
# remove any resources that were excluded for other reasons
resources = [r for r in resources if r is not None]
return resources
def extract(self, resource_type, resource):
"""Convert the IAM object into the internal representation used
Args:
resource_type (str): One of - groups, roles, policies
resource (object): IAM object
Returns:
object: Converted IAM object
"""
if resource_type == 'groups':
group = {
'GroupName': resource['GroupName'],
'Path': resource['Path'],
'AttachedManagedPolicies': [policy['PolicyArn']
for policy in resource['AttachedManagedPolicies']],
'GroupPolicyList': [{'PolicyDocument': policy['PolicyDocument'],
'PolicyName': policy['PolicyName']}
for policy in resource['GroupPolicyList']],
}
return group
elif resource_type == 'roles':
if resource['Path'].startswith('/aws-service-role/'):
return None
role = {
'RoleName': resource['RoleName'],
'Path': resource['Path'],
'AssumeRolePolicyDocument': resource['AssumeRolePolicyDocument'],
'AttachedManagedPolicies': [policy['PolicyArn']
for policy in resource['AttachedManagedPolicies']],
'RolePolicyList': resource['RolePolicyList'],
'InstanceProfileList': [{'InstanceProfileName': profile['InstanceProfileName'],
'Path': profile['Path']}
for profile in resource['InstanceProfileList']],
}
return role
elif resource_type == 'policies':
for version in resource['PolicyVersionList']:
if version['IsDefaultVersion']:
policy = {
'PolicyName': resource['PolicyName'],
'Path': resource['Path'],
'PolicyDocument': version['Document'],
}
if 'Description' in resource:
policy['Description'] = policy['Description']
return policy
def import_(self, resource_type, filename, filters=[]):
"""Load the given groups/roles/policies and import them into IAM
Args:
resource_type (str): One of - groups, roles, policies
filename (str): Name of the file containing exported data to load
filters (list[str]): List of group/role/policy names that should be imported
"""
current = self.load_from_aws(resource_type, filters)
desired = self.load(resource_type, filename, filters)
self.update_aws(resource_type, current, desired)
def update_aws(self, resource_type, current, desired):
"""Compare loaded data against the current data in IAM and create or
update IAM to reflect the loaded data
Args:
resource_type (str): One of - groups, roles, policies
current (list[object]): List of objects loaded from IAM
desired (list[object]): List of objects loaded from disk
"""
key = {
'groups': 'GroupName',
'roles': 'RoleName',
'policies': 'PolicyName',
}[resource_type]
lookup = { resource[key]: resource
for resource in current }
for resource in desired:
resource_ = lookup.get(resource[key])
if resource_ is None: # Doesn't exist currently, create
console.info("Creating {} {}".format(key[:-4], resource[key]))
try:
if resource_type == 'groups':
self.group_create(resource)
elif resource_type == 'roles':
self.role_create(resource)
elif resource_type == 'policies':
self.policy_create(resource)
except ClientError as ex:
if ex.response['Error']['Code'] == 'EntityAlreadyExists':
console.error("{} {} already exists cannot load again.".format(key, resource[key]))
else:
console.error("Problem creating {}: {}".format(resource_type, resource[key]))
console.error("\tDetails: {}".format(str(ex)))
else: # Currently exists, compare and update
console.info("Updating {} {}".format(key[:-4], resource[key]))
if resource['Path'] != resource_['Path']:
console.warning("Paths differ for {} {}: '{}' != '{}'".format(key,
resource[key],
resource['Path'],
resource_['Path']))
console.info("You will need to manually delete the old resource for the Path to be changed")
continue
if resource_type == 'groups':
self.group_update(resource, resource_)
elif resource_type == 'roles':
self.role_update(resource, resource_)
elif resource_type == 'policies':
self.policy_update(resource, resource_)
def remove(self, resource_type, resources):
"""Remove the given groups/roles/policies from IAM
Args:
resource_type (str): One of - groups, roles, policies
resources (list[str]): List of group/role/policy names that should be removed
"""
for resource in resources:
if resource_type == 'groups':
self.group_remove(resource)
elif resource_type == 'roles':
self.role_remove(resource)
elif resource_type == 'policies':
self.policy_remove(resource)
######################################################
## Resource type specific create / update functions ##
######################################################
##########
# Groups
def group_create(self, resource):
"""Create a new IAM Group
Args:
resource (object): IAM Group definition to create
"""
self.iw.create_group(resource["GroupName"], resource["Path"])
for policy in resource["GroupPolicyList"]:
self.iw.put_group_policy(resource["GroupName"],
policy["PolicyName"],
policy["PolicyDocument"])
for policy in resource["AttachedManagedPolicies"]:
self.iw.attach_group_policy(resource["GroupName"], policy)
def group_update(self, resource, resource_):
"""Compare and potentially update the referenced IAM Group
Args:
resource (object): Desired IAM Group definition
resource_ (object): Current IAM Group definition
"""
lookup = { policy['PolicyName'] : policy['PolicyDocument']
for policy in resource_['GroupPolicyList'] }
for policy in resource["GroupPolicyList"]:
policy_ = lookup.get(policy['PolicyName'])
if policy_ is None:
self.iw.put_group_policy(resource["GroupName"],
policy["PolicyName"],
policy["PolicyDocument"])
else:
del lookup[policy['PolicyName']]
document = json_dumps(policy['PolicyDocument'])
document_ = json_dumps(policy_)
if document != document_:
self.iw.put_group_policy(resource["GroupName"],
policy["PolicyName"],
policy["PolicyDocument"])
for policy in lookup.keys():
# AWS has a policy that is not in the desired version, it should be deleted.
self.iw.delete_group_policy(resource['GroupName'], policy)
for arn in resource["AttachedManagedPolicies"]:
if arn not in resource_['AttachedManagedPolicies']:
self.iw.attach_group_policy(resource["GroupName"], arn)
for arn in resource_['AttachedManagedPolicies']:
if arn not in resource['AttachedManagedPolicies']:
# AWS has a managed policy that is not in the desired version, it should be deleted.
self.iw.detach_group_policy(resource["GroupName"], arn)
def group_remove(self, resource):
"""Remove the referenced IAM Group
Args:
resource (str): Name of the IAM Group to remove
"""
group = self.resource.Group(resource)
try:
group.load()
except self.client.exceptions.NoSuchEntityException:
console.info("Group {} doesn't exist".format(resource))
return
# Attached resources
for policy in group.attached_policies.all():
self.client.detach_group_policy(GroupName = resource,
PolicyArn = policy.arn)
for policy in group.policies.all():
self.client.delete_group_policy(GroupName = resource,
PolicyName = policy.name)
# The role itself
self.client.delete_group(GroupName = resource)
#########
# Roles
def role_create(self, resource):
"""Create a new IAM Role
Args:
resource (object): IAM Role definition to create
"""
self.iw.create_role(resource['RoleName'],
resource['Path'],
json_dumps(resource['AssumeRolePolicyDocument']))
for policy in resource['RolePolicyList']:
self.iw.put_role_policy(resource['RoleName'],
policy['PolicyName'],
policy['PolicyDocument'])
for policy in resource['AttachedManagedPolicies']:
self.iw.attach_role_policy(resource['RoleName'],
policy)
for profile in resource['InstanceProfileList']:
self.iw.create_instance_profile(profile['InstanceProfileName'],
profile['Path'])
self.iw.add_role_to_instance_profile(resource['RoleName'],
profile['InstanceProfileName'])
def role_update(self, resource, resource_):
"""Compare and potentially update the referenced IAM Role
Args:
resource (object): Desired IAM Role definition
resource_ (object): Current IAM Role definition
"""
policy = json_dumps(resource['AssumeRolePolicyDocument'])
policy_ = json_dumps(resource_['AssumeRolePolicyDocument'])
if policy != policy_:
console.warning('Role policy document differs')
self.iw.update_assume_role_policy(resource['RoleName'],
resource['AssumeRolePolicyDocument'])
lookup = { policy['PolicyName']: policy['PolicyDocument']
for policy in resource_['RolePolicyList'] }
for policy in resource['RolePolicyList']:
policy_ = lookup.get(policy['PolicyName'])
if policy_ is None:
self.iw.put_role_policy(resource['RoleName'],
policy['PolicyName'],
policy['PolicyDocument'])
else:
del lookup[policy['PolicyName']]
document = json_dumps(policy['PolicyDocument'])
document_ = json_dumps(policy_)
if document != document_:
self.iw.put_role_policy(resource['RoleName'],
policy['PolicyName'],
policy['PolicyDocument'])
for policy in lookup.keys():
# AWS has a policy that is not in the desired version, it should be deleted
self.iw.delete_role_policy(resource['RoleName'], policy)
for arn in resource['AttachedManagedPolicies']:
if arn not in resource_['AttachedManagedPolicies']:
self.iw.attach_role_policy(resource["RoleName"], arn)
for arn in resource_['AttachedManagedPolicies']:
if arn not in resource['AttachedManagedPolicies']:
# AWS has a managed policy that is not in the desired version, it should be deleted.
self.iw.detach_role_policy(resource["RoleName"], arn)
lookup = { profile['InstanceProfileName']: profile
for profile in resource_['InstanceProfileList'] }
for profile in resource['InstanceProfileList']:
profile_ = lookup.get(profile['InstanceProfileName'])
if profile_ is None:
self.iw.create_instance_profile(profile['InstanceProfileName'],
profile['Path'])
self.iw.add_role_to_instance_profile(resource['RoleName'],
profile['InstanceProfileName'])
else:
del lookup[profile['InstanceProfileName']]
if profile['Path'] != profile_['Path']:
console.warning("Paths differ for {} Instance Profile {}: '{}' != '{}'".format(resource['RoleName'],
profile['InstanceProfileName'],
profile['Path'],
profile_['Path']))
console.info('You will need to manually delete the old instance profile for the Path to be changed')
for profile in lookup.keys():
# AWS has an instance profile that is not in the desired version, it should be deleted
self.iw.remove_role_from_instance_profile(resource['RoleName'], profile)
self.iw.delete_instance_profile(profile)
def role_remove(self, resource):
"""Remove the referenced IAM Role
Args:
resource (str): Name of the IAM Role to remove
"""
role = self.resource.Role(resource)
try:
role.load()
except self.client.exceptions.NoSuchEntityException:
console.info("Role {} doesn't exist".format(resource))
return
# Attached resources
for policy in role.attached_policies.all():
self.client.detach_role_policy(RoleName = resource,
PolicyArn = policy.arn)
for profile in role.instance_profiles.all():
self.client.remove_role_from_instance_profile(InstanceProfileName = profile.name,
RoleName = resource)
self.client.delete_instance_profile(InstanceProfileName = profile.name)
# TODO ??? Inline policies?
# The role itself
self.client.delete_role(RoleName = resource)
############
# Policies
def policy_create(self, resource):
"""Create a new IAM Policy
Args:
resource (object): IAM Policy definition to create
"""
resource['PolicyDocument'] = json_dumps(resource['PolicyDocument'])
self.client.create_policy(**resource)
def policy_arn(self, resource):
"""Build the Policies ARN from its definition"""
return "arn:aws:iam::{}:policy{}{}".format(self.bosslet_config.ACCOUNT_ID,
resource['Path'],
resource['PolicyName'])
def delete_oldest_policy_version(self, policy_name, arn):
"""Query for the current policy versions and delete the oldest one if
there are 5 versions (the maximum number allowed)
"""
resp = self.client.list_policy_versions(PolicyArn = arn)
if len(resp['Versions']) == 5:
versions = [int(version['VersionId'][1:]) for version in resp['Versions']]
versions.sort()
resp = self.client.delete_policy_version(PolicyArn = arn,
VersionId = 'v' + str(versions[0]))
def policy_update(self, resource, resource_):
"""Compare and potentially update the referenced IAM Policy
Args:
resource (object): Desired IAM Policy definition
resource_ (object): Current IAM Policy definition
"""
policy = json_dumps(resource['PolicyDocument'])
policy_ = json_dumps(resource['PolicyDocument'])
if policy != policy_:
console.warning("Default policy differs")
arn = self.policy_arn(resource)
self.delete_oldest_policy_version(resource['PolicyName'], arn)
self.client.create_policy_version(PolicyArn = arn,
PolicyDocument = resource['PolicyDocument'],
SetAsDefault = True)
def policy_lookup(self, resource):
"""Lookup the Policy ARN based on the Policy name"""
# Memorize the mapping of policy name to arn as all policy removals will use it
if self._policy_lookup is None:
self._policy_lookup = {}
kwargs = {}
while kwargs.get('Marker', '') is not None:
resp = self.client.list_policies(**kwargs)
for policy in resp['Policies']:
self._policy_lookup[policy['PolicyName']] = policy['Arn']
if resp['IsTruncated']:
kwargs['Marker'] = resp['Marker']
else:
kwargs['Marker'] = None
return self._policy_lookup.get(resource)
def policy_remove(self, resource):
"""Remove the referenced IAM Policy
Args:
resource (str): Name of the IAM Policy to remove
"""
arn = self.policy_lookup(resource)
if arn is None:
console.info("Policy {} doesn't exist".format(resource))
return
policy = self.resource.Policy(arn)
# Attached resources
for group in policy.attached_groups.all():
self.client.detach_group_policy(GroupName = group.group_id,
PolicyArn = arn)
for role in policy.attached_roles.all():
self.client.detach_role_policy(RoleName = role.name,
PolicyArn = arn)
for user in policy.attached_users.all():
self.client.detach_user_policy(UserName = user.name,
PolicyArn = arn)
# Non-default versions
resp = self.client.list_policy_versions(PolicyArn = arn)
for version in resp['Versions']:
if not version['IsDefaultVersion']:
self.client.delete_policy_version(PolicyArn = arn,
VersionId = version['VersionId'])
# The policy itself
self.client.delete_policy(PolicyArn = arn)
if __name__ == '__main__':
parser = configuration.BossParser(description="Load Policies, Roles and Groups into and out of AWS",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='Exports and Imports Iam Information')
parser.add_argument('--names', '-n',
default=DEFAULT_NAMES_FILE,
help='JSON document containing the whitelist of names that should be exported/imported')
parser.add_argument('--groups', '-g',
default=DEFAULT_GROUP_FILE,
help='JSON document where exported data is saved to or data to import is read from')
parser.add_argument('--roles', '-r',
default=DEFAULT_ROLES_FILE,
help='JSON document where exported data is saved to or data to import is read from')
parser.add_argument('--policies', '-p',
default=DEFAULT_POLICY_FILE,
help='JSON document where exported data is saved to or data to import is read from')
parser.add_argument('--removed',
default=DEFAULT_REMOVED_FILE,
help='JSON document containing the list of resource names that should be deleted')
parser.add_argument('--dry-run', '-d',
action='store_true',
help='If the import should be dry runned')
parser.add_bosslet()
parser.add_argument("command",
choices = ['export', 'import', 'remove'])
parser.add_argument("resource_type",
choices = ['groups', 'roles', 'policies'],
nargs='+')
args = parser.parse_args()
with open(args.names, 'r') as fh:
filters = json.load(fh)
iam = IamUtils(args.bosslet_config, args.dry_run)
if args.command == 'import':
for resource_type in args.resource_type:
iam.import_(resource_type, getattr(args, resource_type), filters[resource_type])
elif args.command == 'export':
for resource_type in args.resource_type:
iam.export(resource_type, getattr(args, resource_type), filters[resource_type])
else: # remove
with open(args.removed, 'r') as fh:
filters = json.load(fh)
for resource_type in args.resource_type:
iam.remove(resource_type, filters[resource_type])
```
#### File: boss-manage/bin/manage_keypair.py
```python
import argparse
import sys
import os
import alter_path
from lib import aws
from lib import configuration
from pathlib import Path
if __name__ == '__main__':
def create_help(header, options):
"""Create formated help."""
return "\n" + header + "\n" + \
"\n".join(map(lambda x: " " + x, options)) + "\n"
actions = ["create", "delete"]
actions_help = create_help("action supports the following:", actions)
parser = configuration.BossParser(description = "Script the creation and deletion of keypairs.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=actions_help)
parser.add_bosslet()
parser.add_argument("action",
choices = actions,
metavar = "action",
help = "Action to execute")
parser.add_argument("keypairName",
metavar = "keypairName",
help = "Name of keypair to manage")
args = parser.parse_args()
client = args.bosslet_config.session.client('ec2')
#Define key pair path
key_file_path = Path.home() / '.ssh' / (args.keypairName + '.pem')
if args.action == 'create':
try:
response = aws.create_keypair(args.bosslet_config.session, args.keypairName)
print('Protect this keypair and make sure you have access to it.')
except Exception as e:
print('Failed to create keypair due to: {}'.format(e))
response = False
if response:
try:
key_file_path.touch()
with key_file_path.open('w') as fh:
fh.write(response['KeyMaterial'])
key_file_path.chmod(0o400)
print('KeyPair saved in ~/.ssh/')
except FileExistsError:
print('Directory {} already existed'.format(key_dir))
pass
elif args.action == 'delete':
response = aws.delete_keypair(args.bosslet_config.session, args.keypairName)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
try:
key_file_path.unlink()
print('KeyPair deleted successfully')
except NameError:
print('The keypair was deleted from aws but it was not in your .ssh/ directory')
pass
except FileNotFoundError:
print('Could not find the PEM key to delete under {}'.format(key_file_path))
pass
else:
print(response['ResponseMetadata'])
```
#### File: cloud_formation/configs/api.py
```python
DEPENDENCIES = ['core', 'redis'] # also depends on activities for step functions
# but this forms a circular dependency
from lib.cloudformation import CloudFormationConfiguration, Arg, Ref, Arn
from lib.userdata import UserData
from lib.keycloak import KeyCloakClient
from lib.exceptions import BossManageCanceled, MissingResourceError
from lib import aws
from lib import console
from lib import utils
from lib import constants as const
import json
import uuid
from urllib.request import Request, urlopen
from urllib.parse import urlencode
from urllib.error import HTTPError
def create_config(bosslet_config, db_config={}):
names = bosslet_config.names
session = bosslet_config.session
# Lookup IAM Role and SNS Topic ARNs for used later in the config
endpoint_role_arn = aws.role_arn_lookup(session, "endpoint")
cachemanager_role_arn = aws.role_arn_lookup(session, 'cachemanager')
dns_arn = aws.sns_topic_lookup(session, names.dns.sns)
if dns_arn is None:
raise MissingResourceError('SNS topic', names.dns.sns)
mailing_list_arn = aws.sns_topic_lookup(session, bosslet_config.ALERT_TOPIC)
if mailing_list_arn is None:
raise MissingResourceError('SNS topic', bosslet_config.ALERT_TOPIC)
# Configure Vault and create the user data config that the endpoint will
# use for connecting to Vault and the DB instance
user_data = UserData()
user_data["system"]["fqdn"] = names.endpoint.dns
user_data["system"]["type"] = "endpoint"
user_data["aws"]["db"] = names.endpoint_db.rds
user_data["aws"]["cache"] = names.cache.redis
user_data["aws"]["cache-state"] = names.cache_state.redis
if const.REDIS_SESSION_TYPE is not None:
user_data["aws"]["cache-session"] = names.cache_session.redis
else:
# Don't create a Redis server for dev stacks.
user_data["aws"]["cache-session"] = ''
## cache-db and cache-stat-db need to be in user_data for lambda to access them.
user_data["aws"]["cache-db"] = "0"
user_data["aws"]["cache-state-db"] = "0"
user_data["aws"]["cache-throttle-db"] = "0"
user_data["aws"]["cache-session-db"] = "0"
user_data["aws"]["meta-db"] = names.meta.ddb
# Use CloudFormation's Ref function so that queues' URLs are placed into
# the Boss config file.
user_data["aws"]["s3-flush-queue"] = str(Ref(names.s3flush.sqs)) # str(Ref("S3FlushQueue")) DP XXX
user_data["aws"]["s3-flush-deadletter-queue"] = str(Ref(names.deadletter.sqs)) #str(Ref("DeadLetterQueue")) DP XXX
user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket.s3
user_data["aws"]["tile_bucket"] = names.tile_bucket.s3
user_data["aws"]["ingest_bucket"] = names.ingest_bucket.s3
user_data["aws"]["s3-index-table"] = names.s3_index.ddb
user_data["aws"]["tile-index-table"] = names.tile_index.ddb
user_data["aws"]["id-index-table"] = names.id_index.ddb
user_data["aws"]["id-count-table"] = names.id_count_index.ddb
user_data["aws"]["prod_mailing_list"] = mailing_list_arn
user_data["aws"]["max_task_id_suffix"] = str(const.MAX_TASK_ID_SUFFIX)
user_data["aws"]["id-index-new-chunk-threshold"] = str(const.DYNAMO_ID_INDEX_NEW_CHUNK_THRESHOLD)
user_data["aws"]["index-deadletter-queue"] = str(Ref(names.index_deadletter.sqs))
user_data["aws"]["index-cuboids-keys-queue"] = str(Ref(names.index_cuboids_keys.sqs))
user_data["aws"]["downsample-queue"] = str(Ref(names.downsample_queue.sqs))
user_data["auth"]["OIDC_VERIFY_SSL"] = str(bosslet_config.VERIFY_SSL)
user_data["lambda"]["flush_function"] = names.multi_lambda.lambda_
user_data["lambda"]["page_in_function"] = names.multi_lambda.lambda_
user_data["lambda"]["ingest_function"] = names.tile_ingest.lambda_
user_data["lambda"]["downsample_volume"] = names.downsample_volume.lambda_
user_data["lambda"]["tile_uploaded_function"] = names.tile_uploaded.lambda_
user_data['sfn']['populate_upload_queue'] = names.ingest_queue_populate.sfn
user_data['sfn']['upload_sfn'] = names.ingest_queue_upload.sfn
user_data['sfn']['volumetric_upload_sfn'] = names.volumetric_ingest_queue_upload.sfn
user_data['sfn']['downsample_sfn'] = names.resolution_hierarchy.sfn
user_data['sfn']['index_cuboid_supervisor_sfn'] = names.index_cuboid_supervisor.sfn
user_data['sfn']['complete_ingest_sfn'] = names.complete_ingest.sfn
# Prepare user data for parsing by CloudFormation.
parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]}
config = CloudFormationConfiguration('api', bosslet_config, version="3")
keypair = bosslet_config.SSH_KEY
vpc_id = config.find_vpc()
internal_subnets, external_subnets = config.find_all_subnets()
az_subnets_asg, external_subnets_asg = config.find_all_subnets(compatibility='asg')
sgs = aws.sg_lookup_all(session, vpc_id)
# DP XXX: hack until we can get productio updated correctly
config.add_security_group('AllHttpHttpsSecurityGroup', names.https.sg, [
('tcp', '443', '443', bosslet_config.HTTPS_INBOUND),
('tcp', '80', '80', bosslet_config.HTTPS_INBOUND)
])
sgs[names.https.sg] = Ref('AllHttpHttpsSecurityGroup')
# Create SQS queues and apply access control policies.
# Deadletter queue for indexing operations. This one is populated
# manually by states in the indexing step functions.
config.add_sqs_queue(names.index_deadletter.sqs, names.index_deadletter.sqs, 30, 20160)
# Queue that holds S3 object keys of cuboids to be indexed.
config.add_sqs_queue(names.index_cuboids_keys.sqs, names.index_cuboids_keys.sqs, 120, 20160)
config.add_sqs_queue(names.deadletter.sqs, names.deadletter.sqs, 30, 20160)
# ToDo: determine if a dlq needed for this queue.
# Downsample jobs.
config.add_sqs_queue(names.downsample_queue.sqs, names.downsample_queue.sqs, 300, 20160)
max_receives = 3
config.add_sqs_queue(names.s3flush.sqs,
names.s3flush.sqs,
30,
dead=(Arn(names.deadletter.sqs), max_receives))
config.add_sqs_policy("sqsEndpointPolicy", 'sqsEndpointPolicy', # DP XXX
[Ref(names.deadletter.sqs), Ref(names.s3flush.sqs)],
endpoint_role_arn)
config.add_sqs_policy("sqsCachemgrPolicy", 'sqsCachemgrPolicy', # DP XXX
[Ref(names.deadletter.sqs), Ref(names.s3flush.sqs)],
cachemanager_role_arn)
# Create the endpoint ASG, ELB, and RDS instance
cert = aws.cert_arn_lookup(session, names.public_dns("api"))
target_group_keys = config.add_app_loadbalancer("EndpointAppLoadBalancer",
names.endpoint_elb.dns,
[("443", "443", "HTTPS", cert)],
vpc_id=vpc_id,
subnets=external_subnets_asg,
security_groups=[sgs[names.internal.sg], sgs[names.https.sg]],
public=True)
target_group_arns = [Ref(key) for key in target_group_keys]
config.add_public_dns('EndpointAppLoadBalancer', names.public_dns('api'))
config.add_autoscale_group("Endpoint",
names.endpoint.dns,
aws.ami_lookup(bosslet_config, names.endpoint.ami),
keypair,
subnets=az_subnets_asg,
type_=const.ENDPOINT_TYPE,
security_groups=[sgs[names.internal.sg]],
user_data=parsed_user_data,
min=const.ENDPOINT_CLUSTER_MIN,
max=const.ENDPOINT_CLUSTER_MAX,
notifications=dns_arn,
role=aws.instance_profile_arn_lookup(session, 'endpoint'),
health_check_grace_period=90,
detailed_monitoring=True,
target_group_arns=target_group_arns,
depends_on=["EndpointDB"])
# Endpoint servers are not CPU bound typically, so react quickly to load
config.add_autoscale_policy("EndpointScaleUp",
Ref("Endpoint"),
adjustments=[
(0.0, 10, 1), # 12% - 22% Utilization add 1 instance
(10, None, 2) # Above 22% Utilization add 2 instances
],
alarms=[
("CPUUtilization", "Maximum", "GreaterThanThreshold", "12")
],
period=1)
config.add_autoscale_policy("EndpointScaleDown",
Ref("Endpoint"),
adjustments=[
(None, 0.0, -1), # Under 1.5% Utilization remove 1 instance
],
alarms=[
("CPUUtilization", "Average", "LessThanThreshold", "1.5")
],
period=50)
config.add_rds_db("EndpointDB",
names.endpoint_db.dns,
db_config.get("port"),
db_config.get("name"),
db_config.get("user"),
db_config.get("password"),
internal_subnets,
type_ = const.RDS_TYPE,
security_groups=[sgs[names.internal.sg]])
# Create the Meta, s3Index, tileIndex, annotation Dynamo tables
with open(const.DYNAMO_METADATA_SCHEMA, 'r') as fh:
dynamo_cfg = json.load(fh)
config.add_dynamo_table_from_json("EndpointMetaDB", names.meta.ddb, **dynamo_cfg)
with open(const.DYNAMO_S3_INDEX_SCHEMA, 'r') as s3fh:
dynamo_s3_cfg = json.load(s3fh)
config.add_dynamo_table_from_json('s3Index', names.s3_index.ddb, **dynamo_s3_cfg) # DP XXX
with open(const.DYNAMO_TILE_INDEX_SCHEMA, 'r') as tilefh:
dynamo_tile_cfg = json.load(tilefh)
config.add_dynamo_table_from_json('tileIndex', names.tile_index.ddb, **dynamo_tile_cfg) # DP XXX
with open(const.DYNAMO_ID_INDEX_SCHEMA, 'r') as id_ind_fh:
dynamo_id_ind__cfg = json.load(id_ind_fh)
config.add_dynamo_table_from_json('idIndIndex', names.id_index.ddb, **dynamo_id_ind__cfg) # DP XXX
with open(const.DYNAMO_ID_COUNT_SCHEMA, 'r') as id_count_fh:
dynamo_id_count_cfg = json.load(id_count_fh)
config.add_dynamo_table_from_json('idCountIndex', names.id_count_index.ddb, **dynamo_id_count_cfg) # DP XXX
return config
def generate(bosslet_config):
"""Create the configuration and save it to disk"""
try:
with bosslet_config.call.vault() as vault:
db_config = vault.read(const.VAULT_ENDPOINT_DB)
if db_config is None:
raise Exception()
except:
db_config = const.ENDPOINT_DB_CONFIG.copy()
config = create_config(bosslet_config, db_config)
config.generate()
def pre_init(bosslet_config):
# NOTE: In version 2 the public DNS records are managed by CloudFormation
# If the DNS record currently exists in Route53 the creation of the
# CloudFormation template will fail, so check to see if it exists
# due to previous launches of a Boss stack
session = bosslet_config.session
ext_domain = bosslet_config.EXTERNAL_DOMAIN
ext_cname = bosslet_config.names.public_dns('api')
target = aws.get_dns_resource_for_domain_name(session, ext_cname, ext_domain)
if target is not None:
console.warning("Removing existing Api public DNS entry, so CloudFormation can manage the DNS record")
aws.route53_delete_records(session, ext_domain, ext_cname)
def create(bosslet_config):
"""Configure Vault, create the configuration, and launch it"""
db_config = const.ENDPOINT_DB_CONFIG.copy()
db_config['password'] = <PASSWORD>()
with bosslet_config.call.vault() as vault:
vault.write(const.VAULT_ENDPOINT, secret_key = str(uuid.uuid4()))
vault.write(const.VAULT_ENDPOINT_DB, **db_config)
dns = bosslet_config.names.public_dns("api")
uri = "https://{}".format(dns)
vault.update(const.VAULT_ENDPOINT_AUTH, public_uri = uri)
config = create_config(bosslet_config, db_config)
pre_init(bosslet_config)
try:
config.create()
except:
print("Error detected, revoking secrets")
try:
with bosslet_config.call.vault() as vault:
vault.delete(const.VAULT_ENDPOINT)
vault.delete(const.VAULT_ENDPOINT_DB)
#vault.delete(const.VAULT_ENDPOINT_AUTH) # Deleting this will bork the whole stack
except:
print("Error revoking Django credentials")
raise
# Outside the try/except so it can be run again if there is an error
post_init(bosslet_config)
def post_init(bosslet_config):
call = bosslet_config.call
names = bosslet_config.names
# Write data into Vault
# DP TODO: Move into the pre-launch Vault writes, so it is available when the
# machines initially start
with call.vault() as vault:
dns = names.public_dns("api")
uri = "https://{}".format(dns)
#vault.update(const.VAULT_ENDPOINT_AUTH, public_uri = uri)
creds = vault.read("secret/auth")
bossadmin = vault.read("secret/auth/realm")
auth_uri = vault.read("secret/endpoint/auth")['url']
# Verify Keycloak is accessible
print("Checking for Keycloak availability")
call.check_keycloak(const.TIMEOUT_KEYCLOAK)
# Add the API servers to the list of OIDC valid redirects
with call.tunnel(names.auth.dns, 8080) as auth_port:
print("Update KeyCloak Client Info")
auth_url = "http://localhost:{}".format(auth_port)
with KeyCloakClient(auth_url, **creds) as kc:
# DP TODO: make add_redirect_uri able to work multiple times without issue
kc.add_redirect_uri("BOSS","endpoint", uri + "/*")
# Get the boss admin's bearer token
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
params = {
'grant_type': 'password',
'client_id': bossadmin['client_id'],
'username': bossadmin['username'],
'password': <PASSWORD>['password'],
}
auth_uri += '/protocol/openid-connect/token'
req = Request(auth_uri,
headers = headers,
data = urlencode(params).encode('utf-8'))
resp = json.loads(urlopen(req).read().decode('utf-8'))
# Make an API call that will log the boss admin into the endpoint
# and create the Large Ingest Group
call.check_url(uri + '/ping', 60)
headers = {
'Authorization': 'Bearer {}'.format(resp['access_token']),
}
# NOTE: group name must match value at boss.git/django/bosscore/constants.py:INGEST_GRP
api_uri = uri + '/latest/groups/bossingest'
req = Request(api_uri, headers = headers, method='POST')
try:
resp = urlopen(req)
print("Boss Ingest Group: {}".format(resp))
except HTTPError as ex:
if ex.code == 404:
print("Boss Ingest Group already exists")
else:
raise
def update(bosslet_config):
with bosslet_config.call.vault() as vault:
db_config = vault.read(const.VAULT_ENDPOINT_DB)
config = create_config(bosslet_config, db_config)
config.update()
def delete(bosslet_config):
session = bosslet_config.session
domain = bosslet_config.INTERNAL_DOMAIN
names = bosslet_config.names
if not console.confirm("All data will be lost. Are you sure you want to proceed?"):
raise BossManageCanceled()
aws.route53_delete_records(session, domain, names.endpoint.dns)
# Other configs may define SQS queues and we shouldn't delete them
aws.sqs_delete_all(session, domain) # !!! TODO FIX this so it doesn't bork the stack
aws.policy_delete_all(session, domain, '/ingest/')
config = CloudFormationConfiguration('api', bosslet_config)
config.delete()
```
#### File: cloud_formation/configs/copycuboid.py
```python
DEPENDENCIES = []
import json
from lib.cloudformation import CloudFormationConfiguration, Ref, Arn, Arg
from lib.userdata import UserData
from lib import aws
from lib import utils
from lib import console
from lib import constants as const
from lib import stepfunctions as sfn
from lib.lambdas import load_lambdas_on_s3, update_lambda_code
"""
This CloudFormation config file creates resources for copying cuboids from
one channel to another.
"""
def create_config(bosslet_config):
"""Create the CloudFormationConfiguration object."""
config = CloudFormationConfiguration('copycuboid', bosslet_config)
names = bosslet_config.names
session = bosslet_config.session
role = aws.role_arn_lookup(session, "lambda_cache_execution")
config.add_arg(Arg.String(
"LambdaCacheExecutionRole", role,
"IAM role for multilambda." + bosslet_config.INTERNAL_DOMAIN))
config.add_sqs_queue(names.copy_cuboid_dlq.sqs, names.copy_cuboid_dlq.sqs, 30, 20160)
config.add_lambda("CopyCuboidLambda",
names.copy_cuboid_lambda.lambda_,
Ref("LambdaCacheExecutionRole"),
handler="copy_cuboid_lambda.handler",
timeout=60,
memory=128,
dlq=Arn(names.copy_cuboid_dlq.sqs))
return config
def generate(bosslet_config):
"""Create the configuration and save it to disk."""
config = create_config(bosslet_config)
config.generate()
def create(bosslet_config):
"""Create the configuration and launch."""
if console.confirm("Rebuild multilambda", default = True):
pre_init(bosslet_config)
config = create_config(bosslet_config)
config.create()
def pre_init(bosslet_config):
"""Build multilambda zip file and put in S3."""
load_lambdas_on_s3(bosslet_config)
def update(bosslet_config):
if console.confirm("Rebuild multilambda", default = True):
pre_init(bosslet_config)
update_lambda_code(bosslet_config)
config = create_config(bosslet_config)
config.update()
def delete(bosslet_config):
config = CloudFormationConfiguration('copycuboid', bosslet_config)
config.delete()
```
#### File: cloud_formation/configs/idindexing.py
```python
import json
from lib.cloudformation import CloudFormationConfiguration, Ref, Arn, Arg
from lib.userdata import UserData
from lib import aws
from lib import utils
from lib import console
from lib import constants as const
from lib import stepfunctions as sfn
from lib.lambdas import load_lambdas_on_s3, update_lambda_code
"""
This CloudFormation config file creates the step functions and lambdas used
for annotation (object) id indexing. When building from scratch, it should
be run after the CloudFormation cachedb config.
"""
DEPENDENCIES = ['activities', 'cachedb']
def STEP_FUNCTIONS(bosslet_config):
names = bosslet_config.names
return [
(names.index_supervisor.sfn, 'index_supervisor.hsd'),
(names.index_cuboid_supervisor.sfn, 'index_cuboid_supervisor.hsd'),
(names.index_id_writer.sfn, 'index_id_writer.hsd'),
(names.index_find_cuboids.sfn, 'index_find_cuboids.hsd'),
(names.index_enqueue_cuboids.sfn, 'index_enqueue_cuboids.hsd'),
(names.index_fanout_enqueue_cuboids.sfn, 'index_fanout_enqueue_cuboids.hsd'),
(names.index_dequeue_cuboids.sfn, 'index_dequeue_cuboids.hsd'),
(names.index_fanout_id_writers.sfn, 'index_fanout_id_writers.hsd'),
]
def create_config(bosslet_config):
"""Create the CloudFormationConfiguration object."""
config = CloudFormationConfiguration('idindexing', bosslet_config)
session = bosslet_config.session
names = bosslet_config.names
#topic_arn = aws.sns_topic_lookup(session, "ProductionMicronsMailingList")
role = aws.role_arn_lookup(session, "lambda_cache_execution")
config.add_arg(Arg.String(
"LambdaCacheExecutionRole", role,
"IAM role for " + names.multi_lambda.lambda_))
def add_lambda(key, name, handler, timeout, memory):
"""A method for defining the common arguments for adding a lambda"""
config.add_lambda(key,
name,
Ref('LambdaCacheExecutionRole'),
handler=handler,
timeout=timeout,
memory=memory)
add_lambda("indexS3WriterLambda",
names.index_s3_writer.lambda_,
"write_s3_index_lambda.handler",
timeout=120, memory=1024)
add_lambda("indexFanoutIdWriterLambda",
names.index_fanout_id_writer.lambda_,
"fanout_write_id_index_lambda.handler",
timeout=120, memory=256)
add_lambda("indexWriteIdLambda",
names.index_write_id.lambda_,
"write_id_index_lambda.handler",
timeout=120, memory=512)
add_lambda("indexWriteFailedLambda",
names.index_write_failed.lambda_,
"write_index_failed_lambda.handler",
timeout=60, memory=128)
add_lambda("indexFindCuboidsLambda",
names.index_find_cuboids.lambda_,
"index_find_cuboids_lambda.handler",
timeout=120, memory=256)
add_lambda("indexFanoutEnqueueCuboidsKeysLambda",
names.index_fanout_enqueue_cuboid_keys.lambda_,
"fanout_enqueue_cuboid_keys_lambda.handler",
timeout=120, memory=256)
add_lambda("indexBatchEnqueueCuboidsLambda",
names.index_batch_enqueue_cuboids.lambda_,
"batch_enqueue_cuboids_lambda.handler",
timeout=60, memory=128)
add_lambda("indexFanoutDequeueCuboidKeysLambda",
names.index_fanout_dequeue_cuboid_keys.lambda_,
"fanout_dequeue_cuboid_keys_lambda.handler",
timeout=60, memory=128)
add_lambda("indexDequeueCuboidKeysLambda",
names.index_dequeue_cuboid_keys.lambda_,
"dequeue_cuboid_keys_lambda.handler",
timeout=60, memory=128)
add_lambda("indexGetNumCuboidKeysMsgsLambda",
names.index_get_num_cuboid_keys_msgs.lambda_,
"get_num_msgs_cuboid_keys_queue_lambda.handler",
timeout=60, memory=128)
add_lambda("indexCheckForThrottlingLambda",
names.index_check_for_throttling.lambda_,
"check_for_index_throttling_lambda.handler",
timeout=60, memory=128)
add_lambda("indexInvokeIndexSupervisorLambda",
names.index_invoke_index_supervisor.lambda_,
"invoke_index_supervisor_lambda.handler",
timeout=60, memory=128)
add_lambda("indexSplitCuboidsLambda",
names.index_split_cuboids.lambda_,
"split_cuboids_lambda.handler",
timeout=120, memory=128)
add_lambda("indexLoadIdsFromS3Lambda",
names.index_load_ids_from_s3.lambda_,
"load_ids_from_s3_lambda.handler",
timeout=120, memory=128)
return config
def generate(bosslet_config):
"""Create the configuration and save it to disk."""
config = create_config(bosslet_config)
config.generate()
def create(bosslet_config):
"""Create the configuration and launch."""
if console.confirm("Rebuild multilambda", default = True):
pre_init(bosslet_config)
config = create_config(bosslet_config)
config.create()
post_init(bosslet_config)
def pre_init(bosslet_config):
"""Build multilambda zip file and put in S3."""
load_lambdas_on_s3(bosslet_config)
def post_init(bosslet_config):
"""Create step functions."""
role = 'StatesExecutionRole-us-east-1 '
for name, path in STEP_FUNCTIONS(bosslet_config):
sfn.create(bosslet_config, name, path, role)
def post_update(bosslet_config):
"""Create step functions."""
for name, path in STEP_FUNCTIONS(bosslet_config):
sfn.update(bosslet_config, name, path)
def update(bosslet_config):
if console.confirm("Rebuild multilambda", default = True):
pre_init(bosslet_config)
update_lambda_code(bosslet_config)
config = create_config(bosslet_config)
config.update()
post_update(bosslet_config)
def delete(bosslet_config):
CloudFormationConfiguration('idindexing', bosslet_config).delete()
delete_sfns(bosslet_config)
def delete_sfns(bosslet_config):
for name, _ in STEP_FUNCTIONS(bosslet_config):
sfn.delete(bosslet_config, name)
```
#### File: boss-manage/lib/aws.py
```python
import os
import time
import json
import re
import sys
from . import hosts
from .utils import deprecated
from .exceptions import BossManageError
def get_all(to_wrap, key):
"""Utility helper method for requesting all results from AWS
Usage:
items = get_all(session.client('ec2').describe_instances, 'Reservations') \
(Filters=[...])
items # => List of Reservations returned by describe_instances
Args:
to_wrap (method): AWS client method to execute to get results
key (str): The dictionary key in the `to_wrap` response where results
are stored
Returns:
function: Function that takes arguments for `to_wrap` and will continue to call
`to_wrap` until there is not a valid 'NextToken' in the response. The
result is a list of values that were stored under `key` in the original
response from AWS
"""
def wrapper(*args, **kwargs):
rtn = []
while True:
resp = to_wrap(*args, **kwargs)
rtn.extend(resp[key])
if 'NextToken' in resp and resp['NextToken'] is not None:
kwargs['NextToken'] = resp['NextToken']
else:
return rtn
return wrapper
def machine_lookup_all(session, hostname, public_ip = True):
"""Lookup all of the IP addresses for a given AWS instance name.
Multiple instances with the same name is a result of instances belonging to
an auto scale group. Useful when an action needs to happen to all machines
in an auto scale group.
Args:
session (Session) : Active Boto3 session
hostname (string) : Hostname of the EC2 instances
public_ip (bool) : Whether or not to return public IPs or private IPs
Returns:
(list) : List of IP addresses
"""
client = session.client('ec2')
items = get_all(client.describe_instances, 'Reservations') \
(Filters=[{"Name":"tag:Name", "Values":[hostname]},
{"Name":"instance-state-name", "Values":["running"]}])
addresses = []
if len(items) > 0:
for i in items:
item = i['Instances'][0]
if 'PublicIpAddress' in item and public_ip:
addresses.append(item['PublicIpAddress'])
elif 'PrivateIpAddress' in item and not public_ip:
addresses.append(item['PrivateIpAddress'])
return addresses
def machine_lookup(session, hostname, public_ip = True):
"""Lookup the IP addresses for a given AWS instance name.
Note: If not address could be located an error message is printed
If there are multiple machines with the same hostname, to select a specific
one, prepend the hostname with "#." where '#' is the zero based index.
Example: 0.auth.integration.boss
Retrieved instances are sorted by InstanceId.
Args:
session (Session) : Active Boto3 session
hostname (string) : Hostname of the EC2 instance
public_ip (bool) : Whether or not to return the public IP or private IP
Returns:
(string|None) : IP address or None if one could not be located.
"""
try:
idx, target = hostname.split('.', 1)
idx = int(idx) # if it is not a valid number, then it is a hostname
hostname = target
except:
idx = 0
client = session.client('ec2')
item = get_all(client.describe_instances, 'Reservations') \
(Filters=[{"Name":"tag:Name", "Values":[hostname]},
{"Name":"instance-state-name", "Values":["running"]}])
if len(item) == 0:
print("Could not find IP address for '{}'".format(hostname))
return None
else:
item.sort(key = lambda i: i['Instances'][0]["InstanceId"])
if len(item) <= idx:
print("Could not find IP address for '{}' index '{}'".format(hostname, idx))
return None
else:
item = item[idx]['Instances'][0]
if 'PublicIpAddress' in item and public_ip:
return item['PublicIpAddress']
elif 'PrivateIpAddress' in item and not public_ip:
return item['PrivateIpAddress']
else:
print("Could not find IP address for '{}'".format(hostname))
return None
def rds_lookup(session, hostname):
"""Lookup the public DNS for a given AWS RDS instance name.
Note: If not address could be located an error message is printed
Args:
session (Session) : Active Boto3 session
hostname (string) : Instance name of the RDS instance
Returns:
(string|None) : Public DNS or None if one could not be located.
"""
client = session.client('rds')
response = client.describe_db_instances(DBInstanceIdentifier=hostname)
item = response['DBInstances']
if len(item) == 0:
print("Could not find DNS for '{}'".format(hostname))
return None
else:
return item[0]['Endpoint']['Address']
def _find(xs, predicate):
"""Locate an item in a list based on a predicate function.
Args:
xs (list) : List of data
predicate (function) : Function taking a data item and returning bool
Returns:
(object|None) : The first list item that predicate returns True for or None
"""
for x in xs:
if predicate(x):
return x
return None
def asg_restart(session, hostname, timeout, callback=None):
"""Terminate all of the instances for an ASG, with the given timeout between
each termination.
"""
client = session.client('ec2')
resource = session.resource('ec2')
response = client.describe_instances(Filters=[{"Name":"tag:Name", "Values":[hostname]},
{"Name":"instance-state-name", "Values":["running"]}])
for reservation in response['Reservations']:
for instance in reservation['Instances']:
id = instance['InstanceId']
print("Terminating {} instance {}".format(hostname, id))
resource.Instance(id).terminate()
print("Sleeping for {} minutes".format(timeout/60.0))
time.sleep(timeout)
if callback is not None:
callback()
def asg_name_lookup(session, hostname):
"""Lookup the Group name for the ASG creating the EC2 instances with the given hostname
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
hostname (string) : Hostname of the EC2 instances created by the ASG
Returns:
(string|None) : ASG Group name or None of the ASG could not be located
"""
if session is None:
return None
client = session.client('autoscaling')
response = client.describe_auto_scaling_groups()
if len(response['AutoScalingGroups']) == 0:
return None
else:
# DP NOTE: Unfortunatly describe_auto_scaling_groups() doesn't allow filtering results
for g in response['AutoScalingGroups']:
t = _find(g['Tags'], lambda x: x['Key'] == 'Name')
if t and t['Value'] == hostname:
return g['AutoScalingGroupName']
return None
def vpc_id_lookup(session, vpc_domain):
"""Lookup the Id for the VPC with the given domain name.
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
vpc_domain (string) : Name of VPC to lookup
Returns:
(string|None) : VPC ID or None if the VPC could not be located
"""
if session is None:
return None
client = session.client('ec2')
response = client.describe_vpcs(Filters=[{"Name": "tag:Name", "Values": [vpc_domain]}])
if len(response['Vpcs']) == 0:
return None
else:
return response['Vpcs'][0]['VpcId']
def subnet_id_lookup(session, subnet_domain):
"""Lookup the Id for the Subnet with the given domain name.
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
subnet_domain (string) : Name of Subnet to lookup
Returns:
(string|None) : Subnet ID or None if the Subnet could not be located
"""
if session is None:
return None
client = session.client('ec2')
response = client.describe_subnets(Filters=[{"Name": "tag:Name", "Values": [subnet_domain]}])
if len(response['Subnets']) == 0:
return None
else:
return response['Subnets'][0]['SubnetId']
def azs_lookup(bosslet_config, compatibility=None):
"""Lookup all of the Availablity Zones for the connected region.
Args:
bosslet_config (BossConfiguration) : Bosslet configuration
compatiblity (str|None) : AVAILABILITY_ZONE_USAGE key to apply
Returns:
(list) : List of tuples (availability zone, zone letter)
"""
client = bosslet_config.session.client('ec2')
response = client.describe_availability_zones()
rtn = [(z["ZoneName"], z["ZoneName"][-1]) for z in response["AvailabilityZones"]]
if compatibility:
try:
limits = bosslet_config.AVAILABILITY_ZONE_USAGE[compatibility]
except:
pass # Don't do anything
else:
for az in rtn.copy():
if az[1] not in limits:
rtn.remove(az)
return rtn
def ami_lookup(bosslet_config, ami_name, version = None):
"""Lookup the Id for the AMI with the given name.
If ami_name ends with '.boss', the AMI_VERSION environmental variable is used
to either search for the latest commit hash tagged AMI ('.boss-h<hash>') or
for the AMI with the specific tag ('.boss-<AMI_VERSION>').
Args:
bosslet_config (BossConfiguration) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
ami_name (string) : Name of AMI to lookup
version (string|None) : Overrides the AMI_VERSION environment variable
used to specify a specific version of an AMI
Returns:
(tuple|None) : Tuple of strings (AMI ID, Commit hash of AMI build) or None
if AMI could not be located
"""
specific = False
if ami_name.endswith(bosslet_config.AMI_SUFFIX):
ami_version = bosslet_config.ami_version if version is None else version
if ami_version == "latest":
# limit latest searching to only versions tagged with hash information
ami_search = ami_name + "-h*"
else:
ami_search = ami_name + "-" + ami_version
specific = True
else:
ami_search = ami_name
client = bosslet_config.session.client('ec2')
response = client.describe_images(Filters=[{"Name": "name", "Values": [ami_search]}])
if len(response['Images']) == 0:
if specific:
print("Could not locate AMI '{}', trying to find the latest '{}' AMI".format(ami_search, ami_name))
return ami_lookup(bosslet_config, ami_name, version = "latest")
else:
raise BossManageError("Could not locate AMI '{}'".format(ami_name))
else:
response['Images'].sort(key=lambda x: x["CreationDate"], reverse=True)
image = response['Images'][0]
ami = image['ImageId']
tag = _find(image.get('Tags', []), lambda x: x["Key"] == "Commit")
commit = None if tag is None else tag["Value"]
return (ami, commit)
class NoneDict(dict):
"""Custom Dictionary that returns none if the key doesn't exist.
Normal behavior it to throw an exception.
"""
def __getitem__(self, key):
if key not in self:
return None
else:
return super().__getitem__(key)
def sg_lookup_all(session, vpc_id):
"""Lookup the Ids for all of the VPC Security Groups.
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
vpc_id (string) : VPC ID of the VPC to search in
Returns:
(dict|None) : Dictionary of Security Group Name and ID
Dictionary will be empty if session is None or no security groups
could be located
"""
if session is None:
return NoneDict()
client = session.client('ec2')
response = client.describe_security_groups(Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
if len(response['SecurityGroups']) == 0:
return NoneDict()
else:
sgs = NoneDict()
for sg in response['SecurityGroups']:
key = _find(sg.get('Tags', []), lambda x: x["Key"] == "Name")
if key:
key = key['Value']
sgs[key] = sg['GroupId']
return sgs
def sg_lookup(session, vpc_id, group_name):
"""Lookup the Id for the VPC Security Group with the given name.
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
vpc_id (string) : VPC ID of the VPC to search in
group_name (string) : Name of the Security Group to lookup
Returns:
(string|None) : Security Group ID or None if the Security Group could not be located
"""
if session is None:
return None
client = session.client('ec2')
response = client.describe_security_groups(Filters=[{"Name": "vpc-id", "Values": [vpc_id]},
{"Name": "tag:Name", "Values": [group_name]}])
if len(response['SecurityGroups']) == 0:
return None
else:
return response['SecurityGroups'][0]['GroupId']
def rt_lookup(session, vpc_id, rt_name):
"""Lookup the Id for the VPC Route Table with the given name.
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
vpc_id (string) : VPC ID of the VPC to search in
rt_name (string) : Name of the Route Table to lookup
Returns:
(string|None) : Route Table ID or None if the Route Table could not be located
"""
if session is None:
return None
client = session.client('ec2')
response = client.describe_route_tables(Filters=[{"Name": "vpc-id", "Values": [vpc_id]},
{"Name": "tag:Name", "Values": [rt_name]}])
if len(response['RouteTables']) == 0:
return None
else:
return response['RouteTables'][0]['RouteTableId']
def rt_name_default(session, vpc_id, new_rt_name):
"""Name the default Route Table that is created for a new VPC.
Find the default VPC Route Table and give it a name so that it can be referenced latter.
Needed because by default the Route Table does not have a name and rt_lookup() will not find it.
The default VPC Route Table is determined as the first Route Table without a
name.
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
vpc_id (string) : VPC ID of the VPC to search in
new_rt_name (string) : Name to give the VPC's default Route Table
Returns:
None
"""
client = session.client('ec2')
response = client.describe_route_tables(Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
rt_id = None
for rt in response['RouteTables']:
nt = _find(rt['Tags'], lambda x: x['Key'] == 'Name')
if nt is None or nt['Value'] == '':
rt_id = rt['RouteTableId']
if rt_id is None:
print("Could not locate unnamed default route table")
return
resource = session.resource('ec2')
rt = resource.RouteTable(rt_id)
response = rt.create_tags(Tags=[{"Key": "Name", "Value": new_rt_name}])
def peering_lookup(session, from_id, to_id, owner_id=None):
"""Lookup the Id for the Peering Connection between the two VPCs.
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
from_id (string) : VPC ID of the VPC from which the Peering Connection is
made (Requester)
to_id (string) : VPC ID of the VPC to which the Peering Connection is made
(Accepter)
owner_id (string) : Account ID that owns both of the VPCs that are connected.
If None is provided the Account ID will be looked up from
the session.
Returns:
(string|None) : Peering Connection ID or None if the Peering Connection
could not be located
"""
if session is None:
return None
if owner_id is None:
owner_id = get_account_id_from_session(session)
client = session.client('ec2')
response = client.describe_vpc_peering_connections(Filters=[{"Name": "requester-vpc-info.vpc-id",
"Values": [from_id]},
{"Name": "requester-vpc-info.owner-id",
"Values": [owner_id]},
{"Name": "accepter-vpc-info.vpc-id",
"Values": [to_id]},
{"Name": "accepter-vpc-info.owner-id",
"Values": [owner_id]},
{"Name": "status-code", "Values": ["active"]},
])
if len(response['VpcPeeringConnections']) == 0:
return None
else:
return response['VpcPeeringConnections'][0]['VpcPeeringConnectionId']
def instanceid_lookup(session, hostname):
"""Look up instance id by hostname (instance name).
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
hostname (string) : Name of the Instance to lookup
Returns:
(string|None) : Instance ID or None if the Instance could not be located
"""
if session is None:
return None
client = session.client('ec2')
response = client.describe_instances(
Filters=[{"Name": "tag:Name", "Values": [hostname]}])
item = response['Reservations']
if len(item) == 0:
return None
else:
item = item[0]['Instances']
if len(item) == 0:
return None
else:
item = item[0]
if 'InstanceId' in item:
return item['InstanceId']
return None
def cert_arn_lookup(session, domain_name):
"""Looks up the ARN for a SSL Certificate
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
domain_name (string) : Domain Name that the Certificate was issued for
Returns:
(string|None) : Certificate ARN or None if the Certificate could not be located
"""
if session is None:
return None
client = session.client('acm')
response = client.list_certificates()
for certs in response['CertificateSummaryList']:
if certs['DomainName'] == domain_name:
return certs['CertificateArn']
if certs['DomainName'].startswith('*'): # if it is a wildcard domain like "*.thebossdev.io"
cert_name = certs['DomainName'][1:] + '$'
if re.search(cert_name, domain_name) != None:
return certs['CertificateArn']
return None
def instance_public_lookup(session, hostname):
"""Lookup the Public DNS name for a EC2 instance
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
hostname (string) : Name of the Instance to lookup
Returns:
(string|None) : Public DNS name or None if the Instance could not be
located / has no Public DNS name
"""
if session is None:
return None
client = session.client('ec2')
response = client.describe_instances(
Filters=[{"Name": "tag:Name", "Values": [hostname]},
{"Name": "instance-state-name", "Values": ["running"]}])
item = response['Reservations']
if len(item) == 0:
return None
else:
item = item[0]['Instances']
if len(item) == 0:
return None
else:
item = item[0]
if 'PublicDnsName' in item:
return item['PublicDnsName']
return None
def cloudfront_public_lookup(session, hostname):
"""
Lookup cloudfront public domain name which has hostname as the origin.
Args:
session(Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
hostname: name of api domain or auth domain. Ex: api.integration.theboss.io
Returns:
(string|None) : Public DNS name of cloud front or None if it could not be located
"""
if session is None:
return None
client = session.client('cloudfront')
response = client.list_distributions(
MaxItems='100'
)
items = response["DistributionList"]["Items"]
for item in items:
cloud_front_domain_name = item["DomainName"]
if item["Aliases"]["Quantity"] > 0:
if hostname in item["Aliases"]["Items"]:
return cloud_front_domain_name
return None
def elb_public_lookup(session, hostname):
"""Lookup the Public DNS name for a ELB. Now searches for both classic and application ELBs
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
hostname (string) : Name of the ELB to lookup
Returns:
(string|None) : Public DNS name or None if the ELB could not be located
"""
if session is None:
return None
hostname_ = hostname.replace(".", "-")
client = session.client('elb')
responses = client.describe_load_balancers()
for response in responses["LoadBalancerDescriptions"]:
if response["LoadBalancerName"].startswith(hostname_):
return response["DNSName"]
client = session.client('elbv2')
responses_v2 = client.describe_load_balancers()
for response in responses_v2["LoadBalancers"]:
if response["LoadBalancerName"].startswith(hostname_):
return response["DNSName"]
return None
# Should be something more like elb_check / elb_name_check, because
# _lookup is normally used to return the ID of something
def lb_lookup(session, lb_name):
"""Look up ELB Id by name
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
lb_name (string) : Name of the ELB to lookup
Returns:
(bool) : If the lb_name is a valid ELB name
"""
if session is None:
return None
lb_name = lb_name.replace('.', '-')
###
# ELB
client = session.client('elb')
response = client.describe_load_balancers()
for i in range(len(response['LoadBalancerDescriptions'])):
if (response['LoadBalancerDescriptions'][i]['LoadBalancerName']) == lb_name:
return True
###
# ELB v2
client = session.client('elbv2')
response = client.describe_load_balancers()
for i in range(len(response['LoadBalancers'])):
if (response['LoadBalancers'][i]['LoadBalancerName']) == lb_name:
return True
return False
def sns_topic_lookup(session, topic_name):
"""Lookup up SNS topic ARN given a topic name
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
topic_name (string) : Name of the topic to lookup
Returns:
(string|None) : ARN for the topic or None if the topic could not be located
"""
if session is None:
return None
client = session.client('sns')
response = client.list_topics()
topics_list = response['Topics']
for topic in topics_list:
arn_topic_name = topic["TopicArn"].split(':').pop()
if arn_topic_name == topic_name:
return topic["TopicArn"]
return None
def sqs_delete_all(session, domain):
"""Delete all of the SQS Queues that start with the given domain name
Args:
session (Session) : Boto3 session used to lookup information in AWS
domain (string) : Domain name prefix of queues to delete
Raises:
(boto3.ClientError): If queue not found.
"""
client = session.client('sqs')
resp = client.list_queues(QueueNamePrefix=domain.replace('.','-'))
for url in resp.get('QueueUrls', []):
client.delete_queue(QueueUrl=url)
def sqs_lookup_url(session, queue_name):
"""Lookup up SQS url given a name.
Args:
session (Session) : Boto3 session used to lookup information in AWS.
queue_name (string) : Name of the queue to lookup.
Returns:
(string) : URL for the queue.
Raises:
(boto3.ClientError): If queue not found.
"""
client = session.client('sqs')
resp = client.get_queue_url(QueueName=queue_name)
return resp['QueueUrl']
def request_cert(session, domain_name, validation_domain):
"""Requests a certificate in the AWS Certificate Manager for the domain name
Args:
session (Session|None) : Boto3 session used to communicate with AWS CertManager
If session is None no action is performed
domain_name (string) : domain name the certificate is being requested for
validation_domain (string) : domain suffix that request validation email
will be sent to.
Returns:
(dict|None) : Dictionary with the "CertificateArn" key containing the new
certificate's ARN or None if the session is None
"""
if session is None:
return None
client = session.client('acm')
validation_options = [
{
'DomainName': domain_name,
'ValidationDomain': validation_domain
},
]
response = client.request_certificate(DomainName=domain_name,
DomainValidationOptions=validation_options)
return response
def get_hosted_zone_id(session, hosted_zone):
"""Look up Hosted Zone ID by DNS Name
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
hosted_zone (string) : DNS Name of the Hosted Zone to lookup
Returns:
(string|None) : Hosted Zone ID or None if the Hosted Zone could not be located
"""
if session is None:
return None
client = session.client('route53')
response = client.list_hosted_zones_by_name(
DNSName=hosted_zone,
MaxItems='1'
)
if len(response['HostedZones']) >= 1:
full_id = response['HostedZones'][0]['Id']
id_parts = full_id.split('/')
return id_parts.pop()
else:
return None
def set_domain_to_dns_name(session, domain_name, dns_resource, hosted_zone):
"""Updates or Creates a domain name with FQDN resource.
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
domain_name (string) : FQDN of the public record to create / update
dns_resource (string) : Public FQDN of the AWS resource to map domain_name to
hosted_zone (string) : DNS Name of the Hosted Zone that contains domain_name
Returns:
(dict|None) : Dictionary with the "ChangeInfo" key containing a dict of
information about the requested change or None if the session
is None
"""
if session is None:
return None
client = session.client('route53')
hosted_zone_id = get_hosted_zone_id(session, hosted_zone)
if hosted_zone_id is None:
print("Error: Unable to find Route 53 Hosted Zone, " + hosted_zone + ", Cannot set resource record for: " +
dns_resource)
return None
response = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': domain_name,
'Type': 'CNAME',
'ResourceRecords': [
{
'Value': dns_resource
},
],
'TTL': 300,
}
},
]
}
)
return response
def get_dns_resource_for_domain_name(session, domain_name, hosted_zone):
"""gets to resource name attached to a domain name
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
domain_name (string) : FQDN of the public record to create / update
hosted_zone (string) : DNS Name of the Hosted Zone that contains domain_name
Returns:
str: Public FQDN of the AWS resource mapped to the domain name
"""
if session is None:
return None
client = session.client('route53')
hosted_zone_id = get_hosted_zone_id(session, hosted_zone)
domain_name += '.' # DNS record format
if hosted_zone_id is None:
print("Error: Unable to find Route 53 Hosted Zone, " + hosted_zone + ", Cannot set resource record for: " +
dns_resource)
return None
response = client.list_resource_record_sets(
HostedZoneId=hosted_zone_id,
StartRecordName=domain_name,
StartRecordType='CNAME'
)
for record in response['ResourceRecordSets']:
if record['Name'] == domain_name:
return record['ResourceRecords'][0]['Value']
return None
def route53_delete_records(session, hosted_zone, cname):
"""Delete all of the matching CNAME records from a DNS Zone
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no delete is performed
hosted_zone (string) : Name of the hosted zone
cname (string) : The DNS records to delete
"""
if session is None:
return None
client = session.client('route53')
hosted_zone_id = get_hosted_zone_id(session, hosted_zone)
if hosted_zone_id is None:
print("Could not locate Route53 Hosted Zone '{}'".format(hosted_zone))
return None
response = client.list_resource_record_sets(
HostedZoneId=hosted_zone_id,
StartRecordName=cname,
StartRecordType='CNAME'
)
changes = []
for record in response['ResourceRecordSets']:
if not record['Name'].startswith(cname):
continue
changes.append({
'Action': 'DELETE',
'ResourceRecordSet': record
})
if len(changes) == 0:
print("No {} records to remove".format(cname))
return None
response = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch={'Changes': changes}
)
return response
def sns_unsubscribe_all(bosslet_config, topic):
"""Unsubscribe all subscriptions for the given SNS topic
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no delete is performed
topic (string) : Name of the SNS topic
region (string) : AWS region where SNS topic resides
account (string) : AWS account ID. If None is provided the account ID
will be looked up from the session object using iam
"""
session = bosslet_config.session
region = bosslet_config.REGION
account = bosslet_config.ACCOUNT_ID
topic = "arn:aws:sns:{}:{}:{}".format(region, account, topic.replace(".", "-"))
client = session.client('sns')
response = client.list_subscriptions()
for res in response['Subscriptions']:
if res['TopicArn'] == topic:
client.unsubscribe(SubscriptionArn=res['SubscriptionArn'])
return None
def sns_create_topic(session, topic):
"""
Creates a new Topic
Args:
session:
topic:
Returns:
TopicArn or None
"""
if session is None:
return None
client = session.client("sns")
response = client.create_topic(Name=topic)
print(response)
if response is None:
return None
else:
return response['TopicArn']
def policy_delete_all(session, domain, path="/"):
"""Delete all of the IAM policies that start with the given domain name
Args:
session (Session) : Boto3 session used to lookup information in AWS
domain (string) : Domain name prefix of policies to delete
path (string) : IAM path of the policy, if one was used
Raises:
(boto3.ClientError): If queue not found.
"""
client = session.client('iam')
resp = client.list_policies(Scope='Local', PathPrefix=path)
prefix = domain.replace('.', '-')
for policy in resp.get('Policies', []):
if policy['PolicyName'].startswith(prefix):
ARN = policy['Arn']
if policy['AttachmentCount'] > 0:
# cannot delete a policy if it is still in use
attached = client.list_entities_for_policy(PolicyArn=ARN)
for group in attached.get('PolicyGroups', []):
client.detach_group_policy(GroupName=group['GroupName'], PolicyArn=ARN)
for user in attached.get('PolicyUsers', []):
client.detach_user_policy(UserName=user['UserName'], PolicyArn=ARN)
for role in attached.get('PolicyRoles', []):
client.detach_role_policy(RoleName=role['RoleName'], PolicyArn=ARN)
client.delete_policy(PolicyArn=ARN)
def role_arn_lookup(session, role_name):
"""
Returns the arn associated the the role name.
Using this method avoids hardcoding the aws account into the arn name.
Args:
session:
role_name:
Returns:
"""
if session is None:
return None
client = session.client('iam')
response = client.get_role(RoleName=role_name)
if response is None:
return None
else:
return response['Role']['Arn']
def instance_profile_arn_lookup(session, instance_profile_name):
"""
Returns the arn associated the the role name.
Using this method avoids hardcoding the aws account into the arn name.
Args:
session:
role_name:
Returns:
"""
if session is None:
return None
client = session.client('iam')
response = client.get_instance_profile(InstanceProfileName=instance_profile_name)
if response is None:
return None
else:
return response['InstanceProfile']['Arn']
def s3_bucket_exists(session, name):
"""Test for existence of an S3 bucket.
Note that this method can only test for the existence of buckets owned by
the user.
Args:
session (Session): Boto3 session used to lookup information in AWS.
name (string): Name of S3 bucket.
Returns:
(bool): True if bucket exists.
"""
client = session.client('s3')
resp = client.list_buckets()
for bucket in resp['Buckets']:
if bucket['Name'] == name:
return True
return False
def s3_bucket_delete(session, name, empty=False):
"""Delete the given S3 bucket
Args:
session (Session): Boto3 session used to lookup information in AWS.
name (string): Name of S3 bucket.
Returns:
(None)
"""
s3 = session.resource('s3')
bucket = s3.Bucket(name)
if empty:
bucket.objects.all().delete()
bucket.delete()
def lambda_arn_lookup(session, lambda_name):
"""
Returns the arn for a lambda given a lambda function name.
Args:
session (Session): boto3.session.Session object
lambda_name (str): name of the lambda function
Returns:
(str):
"""
if session is None:
return None
client = session.client("lambda")
response = client.get_function(FunctionName=lambda_name)
if response is None:
return None
else:
return response['Configuration']['FunctionArn']
def dynamo_scan(session, table_name):
if session is None:
return None
client = session.client("dynamodb")
response = aws.scan(TableName=table_name)
if response is None:
return None
else:
return response
def dynamodb_delete_table(session, table_name, wait=True):
"""Deletes the given DynamoDB table, optionally waiting until it has been deleted
Args:
session (Session): boto3.session.Session object
table_name (str): name of the DynamoDB Table
wait (optional[bool]): If the function should poll AWS until the table is removed
"""
client = session.client("dynamodb")
tables = client.list_tables()['TableNames']
if table_name not in tables:
return
client.delete_table(TableName = table_name)
if wait:
print("Deleting {} .".format(table_name), end='', flush=True)
while True:
print(".", end='', flush=True)
time.sleep(10)
tables = client.list_tables()['TableNames']
if table_name not in tables:
print(". done")
break
def get_data_pipeline_id(session, name):
client = session.client('datapipeline')
marker = ''
while True:
resp = client.list_pipelines(marker = marker)
for obj in resp['pipelineIdList']:
if obj['name'] == name:
return obj['id']
if not resp['hasMoreResults']:
break
marker = resp['marker']
return None
def create_data_pipeline(session, name, pipeline):
client = session.client('datapipeline')
resp = client.create_pipeline(name = name,
uniqueId = name)
id = resp['pipelineId']
resp = client.put_pipeline_definition(pipelineId = id,
pipelineObjects = pipeline.objects)
for warning in resp['validationWarnings']:
for msg in warning['warnings']:
print("{:20}: {}".format(warning['id'], msg))
for error in resp['validationErrors']:
for msg in error['errors']:
print("{:20}: {}".format(error['id'], msg))
if resp['errored']:
print("Errors in the pipeline, deleting...")
delete_data_pipeline(session, id)
return None
return id
def delete_data_pipeline(session, id):
client = session.client('datapipeline')
client.delete_pipeline(pipelineId = id)
def activate_data_pipeline(session, id):
client = session.client('datapipeline')
from datetime import datetime
client.activate_pipeline(pipelineId = id,
startTimestamp = datetime.utcnow())
def get_existing_stacks(bosslet_config):
client = bosslet_config.session.client('cloudformation')
suffix = "".join([x.capitalize() for x in bosslet_config.INTERNAL_DOMAIN.split('.')])
invalid = ("DELETE_COMPLETE", )
existing = {
stack['StackName'][:-len(suffix)].lower(): stack
for stack in get_all(client.list_stacks, 'StackSummaries')()
if stack['StackName'].endswith(suffix) and stack['StackStatus'] not in invalid
}
return existing
def create_keypair(session, KeyName, DryRun=False):
"""
Returns dict with SHA-1 digest of the DER encoded private key
An unencrypted PEM encoded RSA private key
and the name of the key pair.
Args:
session(Session): boto3.session.Session object
KeyName (str): Desired name of the keypair
Returns:
(dict):
"""
if session is None:
return None
client = session.client('ec2')
response = client.create_key_pair(
KeyName = KeyName,
DryRun = DryRun
)
return response
def delete_keypair(session, KeyName, DryRun=False):
"""
Returns none
Args:
session(Session): boto3.session.Session object
KeyName (str): Desired name of the keypair
Returns:
none
"""
if session is None:
return None
client = session.client('ec2')
response = client.delete_key_pair(
KeyName = KeyName,
DryRun = DryRun
)
return response
```
#### File: boss-manage/lib/boss_rds.py
```python
import logging
LOGGER = logging.getLogger(__name__)
def sql_tables(bosslet_config):
"""
List all tables in sql.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
Returns:
tables(list): Lookup key.
"""
query = "show tables"
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
tables = cursor.fetchall()
for i in tables:
LOGGER.info(tables)
return tables
def sql_list(bosslet_config, db_table):
"""
List all the available members of a given sql table.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
db_table: Identifies which table members to list.
Returns:
ans(list): list of all members of sql table.
"""
query = "SELECT * FROM {}".format(db_table)
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
ans = cursor.fetchall()
if len(ans) == 0:
raise Exception(
"Can't find table name: {}".format(db_table))
else:
for i in ans:
LOGGER.info(i)
return ans
def sql_resource_lookup_key(bosslet_config, resource_params):
"""
Get the lookup key that identifies the resource from the database.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
resource_params (str): Identifies collection, experiment or channel.
Returns:
cuboid_str(str): Cuboid lookup key.
"""
collection, experiment, channel = None, None, None
resource = resource_params.split("/")
if len(resource) == 0:
raise Exception("Incorrect number of arguments(Make sure the resource provided has at least a collection to lookup)")
else:
if len(resource) > 0:
collection = resource_params.split("/")[0]
if len(resource) > 1:
experiment = resource_params.split("/")[1]
if len(resource) > 2:
channel = resource_params.split("/")[2]
elif len(resource) > 3:
raise Exception("Only provide /coll/exp/chan")
coll_query = "SELECT id FROM collection WHERE name = %s"
exp_query = "SELECT id FROM experiment WHERE name = %s"
chan_query = "SELECT id FROM channel WHERE name = %s"
with bosslet_config.call.connect_rds() as cursor:
if collection is not None:
cursor.execute(coll_query, (collection,))
coll_set = cursor.fetchall()
if len(coll_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find collection: {}".format(collection))
else:
cuboid_str = "{}&".format(coll_set[0][0])
LOGGER.info("{} collection id: {}".format(collection, coll_set[0][0]))
if experiment is not None:
cursor.execute(exp_query, (experiment,))
exp_set = cursor.fetchall()
if len(exp_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find experiment: {}".format(experiment))
else:
cuboid_str = cuboid_str + "{}&".format(exp_set[0][0])
LOGGER.info("{} experiment id: {}".format(experiment, exp_set[0][0]))
if channel is not None:
cursor.execute(chan_query, (channel,))
chan_set = cursor.fetchall()
if len(chan_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find channel: {}".format(channel))
else:
cuboid_str = cuboid_str + "{}&".format(chan_set[0][0])
LOGGER.info("{} channel id: {}".format(channel, chan_set[0][0]))
LOGGER.info("Cuboid key: {} \n".format(cuboid_str))
return cuboid_str
def sql_coordinate_frame_lookup_key(bosslet_config, coordinate_frame):
"""
Get the lookup key that identifies the coordinate fram specified.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
coordinate_frame: Identifies coordinate frame.
Returns:
coordinate_set(str): Coordinate Frame lookup key.
"""
query = "SELECT id FROM coordinate_frame WHERE name = %s"
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query, (coordinate_frame,))
coordinate_set = cursor.fetchall()
if len(coordinate_set) != 1:
raise Exception(
"Can't find coordinate frame: {}".format(coordinate_frame))
else:
LOGGER.info("{} coordinate frame id: {}".format(coordinate_frame, coordinate_set[0][0]))
return coordinate_set[0][0]
def sql_channel_job_ids(bosslet_config, resource):
"""
Get a list of channel job ids related to a given channel
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
resource(str): resource
Returns:
job_ids(list): job_ids and start dates and x,y and z range associated with channel
format: (id, start_date, x_start,y_start,z_start,x_stop, y_stop, z_stop)
ex: (2933, datetime.datetime(2019, 3, 16, 21, 33, 37, 831357), 32000, 45824, 14880, 213760, 169728, 14912)
"""
coll = resource.split("/")[0]
exp = resource.split("/")[1]
chan = resource.split("/")[2]
query = "SELECT id,start_date,x_start,y_start,z_start,x_stop,y_stop,z_stop FROM ingest_job WHERE collection = '{}' AND experiment = '{}' AND channel = '{}'".format(coll,exp,chan)
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
job_ids = cursor.fetchall()
if len(job_ids) == 0:
raise Exception(
"Can't find resource name: {}/{}/{}".format(coll,exp,chan))
else:
LOGGER.info("\n Job-Ids corresponding to {}/{}/{}".format(coll,exp,chan))
LOGGER.info("< id, start_date, x_start,y_start,z_start,x_stop, y_stop, z_stop>")
for i in job_ids:
LOGGER.info(i)
return job_ids
def sql_get_names_from_lookup_keys(bosslet_config, lookup_keys):
"""
Gets collection/experiment/channel names from lookup keys.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
lookup_keys (list[str]): List of lookup keys to get col/exp/chan names for.
Expected format f'{col_id}&{exp_id}&{chan_id}'
Returns:
(list[tuple(str, str, str)]): List of tuples of collection/exp/chan names.
If a look up key is not found, empty strings
will be returned for that key's corresponding tuple.
"""
names = []
if len(lookup_keys) < 1:
LOGGER.error('No lookup keys provided, aborting.')
return names
query = 'SELECT collection_name, experiment_name, channel_name FROM lookup WHERE lookup_key = %(key)s'
with bosslet_config.call.connect_rds() as cursor:
for key in lookup_keys:
cursor.execute(query, { 'key': key })
rows = cursor.fetchall()
if(len(rows) > 0):
this_row = (rows[0][0], rows[0][1], rows[0][2])
else:
this_row = ('', '', '')
names.append(this_row)
LOGGER.info('key: {}, coll: {}, exp: {}, chan: {}'.format(key, this_row[0], this_row[1], this_row[2]))
return names
```
#### File: boss-manage/lib/configuration.py
```python
import os
import sys
import json
import importlib
import warnings
import glob
import itertools
from argparse import ArgumentParser
from pprint import pformat
from boto3.session import Session
from . import exceptions
from . import constants as const
from . import console
from .external import ExternalCalls
from .ssh import SSHTarget
from .aws import machine_lookup
from .utils import keypair_to_file, parse_hostname
from .names import AWSNames
CONFIGS_GLOBS = [const.repo_path('config', '*.py'),
const.repo_path('config', 'custom', '*.py')]
CONFIGS_FMTS = [const.repo_path('config', '{}.py'),
const.repo_path('config', 'custom', '{}.py')]
def valid_bosslet(bosslet_name):
return bosslet_name in list_bosslets()
def list_bosslets():
return [os.path.basename(f)[:-3].replace('_','.')
for f in itertools.chain(*[glob.glob(g) for g in CONFIGS_GLOBS])]
class BossConfiguration(object):
__EXPECTED_KEYS = [
'EXTERNAL_DOMAIN',
'EXTERNAL_FORMAT', # Optional
'INTERNAL_DOMAIN',
'NETWORK', # Optional
'SUBNET_CIDR', # Optional
'AMI_SUFFIX',
'AMI_VERSION', # Optional
'SCENARIO', # Optional, no default
'VERIFY_SSL', # Optional
'AUTH_RDS',
'LAMBDA_BUCKET',
'LAMBDA_SERVER',
'LAMBDA_SERVER_KEY',
'REGION',
'AVAILABILITY_ZONE_USAGE', # Optional
'ACCOUNT_ID',
'PROFILE', # Optional
'OUTBOUND_BASTION',
'OUTBOUND_IP', # Conditional
'OUTBOUND_PORT', # Conditional
'OUTBOUND_USER', # Conditional
'OUTBOUND_KEY', # Conditional
'HTTPS_INBOUND', # Optional
'SSH_INBOUND',
'SSH_KEY',
'BILLING_TOPIC', # Optional
'BILLING_THRESHOLDS', # Conditional, required if setting up account
'BILLING_CURRENCY', # Optional
'ALERT_TOPIC', # Optional
'SLACK_WEBHOOK_HOST', # Optional
'SLACK_WEBHOOK_PATH_DYNAMODB_AUTOSCALE', # Conditional, to use Slack integration
'DYNAMODB_AUTOSCALE_PROVISIONER', # Optional
]
__DEFAULTS = {
"EXTERNAL_FORMAT": "{machine}",
"NETWORK": "10.0.0.0/16",
"SUBNET_CIDR": 24,
"AMI_VERSION": "latest",
"VERIFY_SSL": True,
"AVAILABILITY_ZONE_USAGE": {},
"OUTBOUND_BASTION": False,
"HTTPS_INBOUND": "0.0.0.0/0",
"BILLING_TOPIC": "BossBillingList",
"BILLING_CURRENCY": "USD",
"ALERT_TOPIC": "BossMailingList",
'SLACK_WEBHOOK_HOST': 'hooks.slack.com',
'SLACK_WEBHOOK_PATH_DYNAMODB_AUTOSCALE': None,
'DYNAMODB_AUTOSCALE_PROVISIONER': 'BossDefaultProvisioners',
}
def __init__(self, bosslet, **kwargs):
self.bosslet = bosslet
# Import the bosslet configuration file
try:
bosslet = bosslet.replace('.','_')
prefix = const.repo_path()
for fmt in CONFIGS_FMTS:
path = fmt.format(bosslet)
if os.path.exists(path):
# Translate the file path into a module import reference
mod = path.replace(prefix, '').replace('/', '.')[1:-3]
self._config = importlib.import_module(mod)
break
else:
raise ValueError("Cannot located Bosslet '{}'".format(self.bosslet))
except ImportError:
raise exceptions.BossManageError("Problem importing '{}'".format(mod))
if not self.verify():
raise exceptions.BossManageError("Bosslet config is not valid")
# Handle keyword arguments
self.disable_preview = kwargs.get('disable_preview')
self.ami_version = self.get('AMI_VERSION')
if kwargs.get('ami_version') is not None:
self.ami_version = kwargs['ami_version']
self.scenario = self.get('SCENARIO')
if kwargs.get('scenario') is not None:
self.scenario = kwargs['scenario']
# Create the session object
self.session = Session(profile_name = self.get('PROFILE'),
region_name = self._config.REGION)
if self.session.get_credentials() is None:
console.warning("Could not located AWS credentials")
self.session = None
# Load outbound bastion information in one location
if self._config.OUTBOUND_BASTION:
keyfile = keypair_to_file(self._config.OUTBOUND_KEY)
if not os.path.exists(keyfile):
raise ValueError("OUTBOUND_KEY '{}' doesn't exist".format(keyfile))
self.outbound_bastion = SSHTarget(keyfile,
self._config.OUTBOUND_IP,
self._config.OUTBOUND_PORT,
self._config.OUTBOUND_USER)
else:
self.outbound_bastion = None
# Load ssh key path in one location
if self._config.SSH_KEY:
keyfile = keypair_to_file(self._config.SSH_KEY)
if keyfile is None or not os.path.exists(keyfile):
raise ValueError("SSH_KEY '{}' doesn't exist".format(keyfile))
self.ssh_key = keyfile
else:
self.ssh_key = None
self.names = AWSNames.from_bosslet(self)
# Use __getattr__ to get the __DEFAULT value if not specified
if not self.VERIFY_SSL:
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def __getattr__(self, attr):
if hasattr(self._config, attr):
return getattr(self._config, attr)
elif attr in self.__DEFAULTS:
return self.__DEFAULTS[attr]
elif attr == 'call':
# DP NOTE: Delayed loading of ExternalCalls because when intialized
# it does DNS lookupss fo the bastion and vault instances
# which will fail unless the core config has been launched
if self.session is None:
raise AttributeError("Require an AWS session to use ExternalCalls")
if self.ssh_key is None:
raise AttributeError("Require a SSH key to use ExternalCalls")
# Using __getattr__ instead of an @property because if an
# @property raises an AttributeError then __getattr__ gets
# called.
self.call = ExternalCalls(self) # saving as self.call for future lookups
return self.call
else:
msg = "'{}' object has not attribute '{}'".format(self.__class__.__name__,
attr)
raise AttributeError(msg)
def get(self, key, default=None):
try:
return self.__getattr__(key)
except AttributeError:
return default
def __repr__(self):
return "BossConfiguration('{}')".format(self.bosslet)
def verify(self, fh=sys.stdout):
ret = True
for key in self.__EXPECTED_KEYS:
if not hasattr(self._config, key):
if key not in self.__DEFAULTS:
if key in ('SCENARIO', 'BILLING_THRESHOLDS', 'PROFILE'):
pass
elif key in ('OUTBOUND_IP',
'OUTBOUND_PORT',
'OUTBOUND_USER',
'OUTBOUND_KEY') and \
self.get('OUTBOUND_BASTION') == False:
pass
else:
console.error("Variable '{}' not defined".format(key), file=fh)
ret = False
for key in dir(self._config):
if key not in self.__EXPECTED_KEYS:
if not key.startswith('__'):
console.warning("Extra variable '{}' defined".format(key), file=fh)
return ret
def display(self, fh = sys.stdout):
for key in self.__EXPECTED_KEYS:
try:
val = pformat(self.__getattr__(key))
print("{} = {}".format(key, val), file=fh)
except AttributeError:
if key in ('SCENARIO', 'BILLING_THRESHOLDS', 'PROFILE'):
pass
elif key in ('OUTBOUND_IP',
'OUTBOUND_PORT',
'OUTBOUND_USER',
'OUTBOUND_KEY') and \
self._config.OUTBOUND_BASTION == False:
pass
else:
raise
def create_help(header, options):
"""Create formated help
Args:
header (str): The header for the help
options (list[str]): The options that are available for argument
Returns:
str: Formated argument help
"""
return "\n" + header + "\n" + \
"\n".join(map(lambda x: " " + x, options)) + "\n"
class BossParser(ArgumentParser):
"""A custom argument parser that provides common handling of looking up a
specific hostname
"""
_bosslet = False
_hostname = False
__subparsers = {}
def __init__(self, *args, **kwargs):
if 'help' in kwargs:
# Remove 'help' from the arguments, as it is a valid keyword argument
# for subparsers, but not the initial parser
del kwargs['help']
super().__init__(*args, **kwargs)
def create_subparser(self, dest, **kwargs):
"""Create a subparser definition that can be populated with `add_subcommand()`
Args:
dest (str): The name of the argument where the selected subcommand is stored
kwargs (dict): Any other arguments for the `add_subparser()` call
"""
subparser = self.add_subparsers(dest=dest,
parser_class=BossParser,
**kwargs)
subparser.required = True
self.__subparsers[dest] = subparser
def add_subcommand(self, dest, subcommand):
"""Add a subcommand to a previously defined subparser
Args:
dest (str): The `dest` value passed to `create_subparser()`
subcommand (str): The name of the subcommand that is being defined
Returns:
function: A function that acts like BossParser and can be used to
to create the parser for the given subcommand
"""
def add_parser(**kwargs):
# BossParser / ArgumentParser __init__ doesn't have a 'help' argument
# but a sub-parser requires 'help' for it to be displayed in --help
if 'description' in kwargs and 'help' not in kwargs:
kwargs['help'] = kwargs['description']
return self.__subparsers[dest].add_parser(subcommand, **kwargs)
return add_parser
def add_hostname(self, private_ip = False, help = "Hostname of the target EC2 instance"):
"""Called to add arguments to the parser
Can be automatically called by the constructor by passing add_hostname=True
Adds '--private-ip', '--bosslet', and 'hostname' arguments to the parser
Args:
private_ip (bool) : If the '--private-ip' flag is allowed
"""
self._hostname = True
self._private_ip = private_ip
if self._bosslet:
raise Exception("Cannot add_hostname and add_bosslet")
if self._private_ip:
self.add_argument("--private-ip", "-p",
action='store_true',
default=False,
help = "If the hostname is an AWS IP address instead of an EC2 instance name")
self.add_argument("--bosslet",
metavar = "BOSSLET",
choices = list_bosslets(),
default=None,
help="Bosslet in which the machine is running")
self.add_argument("hostname", help = help)
def add_bosslet(self, help = "Name of the target Bosslet configuration"):
"""Called to add arguments to the parser
Can be automatically called by the constructor by passing add_bosslet=True
Adds 'bosslet_name' argument to the parser
"""
self._bosslet = True
if self._hostname:
raise Exception("Cannot add_bosslet and add_hostname")
self.add_argument("bosslet_name",
metavar = "bosslet_name",
choices = list_bosslets(),
help = help)
def parse_args(self, *args, **kwargs):
"""Calls the underlying 'parse_args()' method and then handles resolving
the AWS hostname and building the BossConfiguration.
This method will add 'bosslet_config' as a variable on the returned object
containing the BossConfiguration for the given bosslet.
This method will exit will usage message and error message if an invalid
combination of arguements have been given.
"""
a = super().parse_args(*args, **kwargs)
# Note: Using `'<variable>' in a` instead of using the self._bosslet
# or self._hostname variables because with a nested parser the
# parent parser will not see those variables for the subparser
# Note: Using `self.print_usage()` will not necessarly print the correct
# usage if the problem is with arguments from a subparser
try:
if 'bosslet_name' in a:
a.bosslet_config = BossConfiguration(a.bosslet_name)
elif 'hostname' in a:
finished = False
if 'private_ip' in a:
if a.private_ip:
if not a.bosslet:
msg = "--bosslet required if --private-ip is used"
self.error(msg)
else:
a.bosslet_config = BossConfiguration(a.bosslet)
a.ip = a.hostname
finished = True
if not finished:
idx, machine, bosslet_name = parse_hostname(a.hostname)
if not bosslet_name and not a.bosslet:
msg = "Could not parse out bosslet name, include --bosslet"
self.error(msg)
elif bosslet_name and a.bosslet:
if bosslet_name != a.bosslet:
msg = "Two different bosslet names were specified, remove one"
self.error(msg)
elif a.bosslet:
bosslet_name = a.bosslet
bosslet_config = BossConfiguration(bosslet_name)
hostname = bosslet_config.names[machine].dns
if idx is not None:
hostname = str(idx) + "." + hostname
a.bosslet_name = bosslet_name
a.bosslet_config = bosslet_config
a.hostname = hostname
if self._private_ip: # only lookup IP if we allow specifying a private ip
ip = machine_lookup(bosslet_config.session, hostname, public_ip=False)
if not ip:
sys.exit(1) # machine_lookup() already printed an error message
a.ip = ip
return a
except exceptions.BossManageError as ex: # BossConfig import or verification error
self.error(ex)
except ValueError as ex: # Invalid Bosslet name
self.error(ex)
class BossCLI(object):
"""Interface for defining a CLI application / script"""
def get_parser(self, ParentParser=BossParser):
"""Create and return the parser for this application
Args:
ParentParser: If this application is a subcommand ParentParser will
be the results from `BossParser.add_subcommand`. If
this is not provided, use BossParser.
Returns:
BossParser: The parser instance created and populated
"""
raise NotImplemented()
def run(self, args):
"""The main entrpoint for the application
Args:
args (Namespace): The parsed results for the application to use
Returns:
optional[int]: Return code
"""
raise NotImplemented()
def main(self): # just put into if __name__ ...
"""Application entrypoint that parsers the arguments and calls `run()`"""
parser = self.get_parser()
args = parser.parse_args()
self.run(args)
class NestedBossCLI(BossCLI):
"""Implementation of a nested CLI
A nested CLI contains common arguments and a set of subcommands that will be executed
To use:
* Define the COMMANDS, PARSER_ARGS, SUBPARSER_ARGS variables
* Optionally implement the `add_common_arguments()` method
Attributes:
COMMANDS: Mapping of subcommands and the implementing BossCLI reference
PARSER_ARGS: BossParser arguments
SUBPARSER_ARGS: BossParser.create_subparser
The key 'dest' must be defined
"""
COMMANDS = {
# 'command_name': BossCLI,
}
PARSER_ARGS = {
# 'description': '',
}
SUBPARSER_ARGS = {
# 'dest': 'nested_command',
# 'metavar': 'command',
# 'help': 'nested commands',
}
def __init__(self):
self.subcommands = { name: cli()
for name, cli in self.COMMANDS.items() }
self.dest = self.SUBPARSER_ARGS['dest']
def add_common_arguments(self, parser):
"""Method for adding the common arguments for all of the nested commands
Note: Called before add the subcommands so that non optional arguments
will appear before the subcommands
Args:
parser (BossParser): Parser instance to add common arguments to
"""
pass
def get_parser(self, ParentParser=BossParser):
self.parser = ParentParser(**self.PARSER_ARGS)
self.add_common_arguments(self.parser)
self.parser.create_subparser(**self.SUBPARSER_ARGS)
for subcommand in self.subcommands.keys():
parser_ = self.parser.add_subcommand(self.dest, subcommand)
self.subcommands[subcommand].get_parser(parser_)
return self.parser
def run(self, args):
return self.subcommands[getattr(args, self.dest)].run(args)
```
#### File: boss-manage/lib/datapipeline.py
```python
class Ref(object):
"""Reference internal to the Data Pipeline"""
def __init__(self, ref):
self.ref = ref
def __str__(self):
return self.ref
def field_key(key):
"""Create the correct key for a field
Needed because you cannot pass '*password' as a kwarg
"""
if key == "password":
return "*password"
else:
return key
def field_type(value):
"""Return the type of the field, using the Ref object"""
if isinstance(value, Ref):
return "RefValue"
else:
return "StringValue"
def field_value(value):
"""REturn the value of the field, using the Ref object"""
if isinstance(value, Ref):
return value.ref
else:
return value
class DataPipeline(object):
"""Create an AWS Data Pipeline object
This class is similar to the CloudFormation library, in that
you create a pipeline object and add elements to the pipeline.
Elements can reference other elements using the Ref object
and element name.
"""
def __init__(self, role="DataPipelineDefaultRole", resource_role="DataPipelineDefaultResourceRole", log_uri=None, fmt="CF"):
"""Create a new DataPipeline object
Args:
role (str): IAM role for the Data Pipeline to execute under
resource_role (str): IAM role for the EC2 instance and EMR cluster
to execute under
log_uri (uri): S3 URI for the location to store execution logs
fmt (str): Either 'CF' or 'DP' for the internal format that will
be used when adding elements.
'CF' for use with CloudFormation templates
'DP' for when launching directly in Data Pipeline
"""
self.fmt = fmt
self.objects = []
# Set the schedule for the pipeline
self.add_field("DefaultSchedule",
"DefaultSchedule",
type = "Schedule",
period = "1 weeks",
startAt = "FIRST_ACTIVATION_DATE_TIME")
# Set default values used by all resources
self.add_field("Default",
"Default",
type = "Default",
schedule = Ref("DefaultSchedule"),
pipelineLogUri = log_uri,
failureAndRerunMode = "CASCADE",
resourceRole = resource_role,
role = role,
scheduleType = "cron")
def add_field(self, id, name, **fields):
"""Add a new field to the pipeline under construction"""
def key_(k):
"""Handle the different between CF and DP definitions.
DP requires some keys to be capitalized while CF requires
them to be lower case (why did they do this???) """
if self.fmt != "CF":
k = k[0].lower() + k[1:]
return k
field = {
key_("Id"): id,
key_("Name"): name,
key_("Fields"): [
{ key_("Key"): field_key(key),
key_(field_type(value)): field_value(value)}
for key, value in fields.items() if value # not None
],
}
self.objects.append(field)
def add_ec2_instance(self, name, type="t1.micro", sgs=None, subnet=None, duration="2 Hours", image=None):
"""Add an EC2 instance to the pipeline
Args:
name (str): Name of the resource
type (str): EC2 Instance type to launch
sgs ([str]): A List of Security Group Ids to attach to the EC2 instance
subnet (str): A Subnet Id to launch the EC2 instance into
Used to associate the instance with a VPC
duration (str): A time string (ex '2 Hours') after which the instance
will be terminated (if it hasn't finished)
image (str): AMI image to use when launching the instance
NOTE: the image must conform to the Data Pipleline standards
or it will not work
"""
self.add_field(name,
name,
type = "Ec2Resource",
instanceType = type,
actionOnTaskFailure = "terminate",
securityGroupIds = sgs,
subnetId = subnet,
imageId = image,
terminateAfter = duration)
def add_emr_cluster(self, name, type="m3.xlarge", count="1", version="3.9.0", region='us-east-1', duration="2 Hours"):
"""Add an Elastic Map Reduce cluster to the pipeline
(Used for DynamoDB operations)
Args:
name (str): Name of the resource
type (str): EMR reduce instance type to launch (both core and master instances)
count (str|int): Number of core instances to launch
version (str): Version string for the EMR AMI to launch
region (str): AWS Region
duration (str): A time string (ex '2 Hours') after which the instance
will be terminated (if it hasn't finished)
"""
bootstrapArgs = """
s3://{}.elasticmapreduce/bootstrap-actions/configure-hadoop,
--yarn-key-value,yarn.nodemanager.resource.memory-mb=11520,
--yarn-key-value,yarn.scheduler.maximum-allocation-mb=11520,
--yarn-key-value,yarn.scheduler.minimum-allocation-mb=1440,
--yarn-key-value,yarn.app.mapreduce.am.resource.mb=2880,
--mapred-key-value,mapreduce.map.memory.mb=5760,
--mapred-key-value,mapreduce.map.java.opts=-Xmx4608M,
--mapred-key-value,mapreduce.reduce.memory.mb=2880,
--mapred-key-value,mapreduce.reduce.java.opts=-Xmx2304m,
--mapred-key-value,mapreduce.map.speculative=false
""".replace("\n", "").format(region)
self.add_field(name,
name,
type = "EmrCluster",
bootstrapAction = bootstrapArgs,
coreInstanceCount = str(count),
coreInstanceType = type,
amiVersion = version,
masterInstanceType = type,
region = region,
terminateAfter = duration)
def add_rds_database(self, name, instance, username, password):
"""Add a RDS database definition
Args:
name (str): Name of the resource
instance (str): RDS Instance Id
username (str): Database username
password (str): Database password
"""
self.add_field(name,
name,
type = "RdsDatabase",
jdbcProperties = "allowMultiQueries=true",
rdsInstanceId = instance,
username = username,
password = password)
def add_rds_table(self, name, database, table):
"""Add a RDS table definition
Uses a 'SELECT * FROM {table}' to dump the table's data
Args:
name (str): Name of the resource
database (Ref): Reference to the containing database
table (str): Name of the RDS table
"""
self.add_field(name,
name,
type = "SqlDataNode",
database = database,
table = table,
selectQuery = "select * from #{table}")
def add_ddb_table(self, name, table, read_percent="0.25", write_percent="0.25"):
"""Add a DynamoDB table definition
Args:
name (str): Name of the resource
table (str): Name of the DynamoDB table
read_percent (str|float): Read Throughput Percentage (ex 0.25)
write_percent (str|float): Write Throughput Percentage (ex 0.25)
"""
self.add_field(name,
name,
type = "DynamoDBDataNode",
readThroughputPercent = read_percent,
writeThroughputPercent = write_percent,
tableName = table)
def add_s3_bucket(self, name, s3_directory):
"""Add a S3 bucket
Args:
name (str): Name of the resource
s3_directory (uri): S3 URI of the directory to expose
"""
self.add_field(name,
name,
type = "S3DataNode",
directoryPath = s3_directory)
def add_rds_copy(self, name, source, destination, runs_on=None):
"""Add a RDS Copy Activity
Args:
name (str): Name of the resource
source (Ref): Source RDS Table
destination (Ref): S3 data destination
runs_of (Ref): EC2 instance used to run the copy
"""
self.add_field(name,
name,
type = "CopyActivity",
input = source,
output = destination,
runsOn = runs_on)
def add_emr_copy(self, name, source, destination, runs_on=None, region='us-east-1', export=True):
"""Add a EMR / DynamoDB Copy Activity
Args:
name (str): Name of the resource
source (Ref): DynamoDB table or S3 bucket
destination (Ref): S3 bucket or DynamoDB table
runs_on (Ref): EMR cluster to run the copy
region (str): The AWS region
export (bool): If the copy is an export or import
"""
step = "s3://dynamodb-emr-{region}/emr-ddb-storage-handler/2.1.0/emr-ddb-2.1.0.jar,org.apache.hadoop.dynamodb.tools.DynamoDb{port},#{{{dir_type}.directoryPath}},#{{{tbl_type}.tableName}},#{{{rate}ThroughputPercent}}".format(
region = region,
port = "Export" if export else "Import",
dir_type = "output" if export else "input",
tbl_type = "input" if export else "output",
rate = "input.read" if export else "output.write",
)
self.add_field(name,
name,
type = "EmrActivity",
input = source,
output = destination,
runsOn = runs_on,
maximumRetries = "2",
step = step,
resizeClusterBeforeRunning = "true")
def add_shell_command(self, name, command, source=None, destination=None, runs_on=None):
"""Add a Shell Command
Args:
name (str): Name of the resource
command (str): Shell command to run
source (Ref): S3 bucket of input data
destination (Ref): S3 bucket for output data
runs_on (Ref): EC2 Instance on which to run the command
"""
self.add_field(name,
name,
type = "ShellCommandActivity",
input = source,
output = destination,
runsOn = runs_on,
stage = "true",
command = command)
```
#### File: boss-manage/lib/utils.py
```python
import sys
import os
import subprocess
import shlex
import getpass
import string
import warnings
import time
from contextlib import contextmanager
@contextmanager
def open_(filename, mode='r'):
"""Custom version of open that understands stdin/stdout"""
is_std = filename is None or filename == '-'
if is_std:
if 'r' in mode:
fh = sys.stdin
else:
fh = sys.stdout
else:
fh = open(filename, mode)
try:
yield fh
finally:
if not is_std:
fh.close()
def get_command(action=None):
argv = sys.argv[:]
if action:
# DP HACK: hardcoded list of supported actions, should figure out something else
actions = ["create", "update", "delete", "post-init", "pre-init", "generate"]
argv = [action if a in actions else a for a in argv]
return " ".join(argv)
def json_sanitize(data):
return (data.replace('"', '\"')
.replace('\\', '\\\\'))
def python_minifiy(file):
"""Outputs a minified version of the given Python file.
Runs pyminifier on the given file. The minified filename has '.min'
added before the '.py' extension. This function is used to help code
fit under the 4k limit when uploading lambda functions, directly, as
opposed to pointing to a zip file in S3. The minification process
strips out all comments and uses minimal whitespace.
Example: lambda.py => lambda.min.py
Args:
file (string): File name of Python file to minify.
Returns:
(string): File name of minified file.
Raises:
(subprocess.CalledProcessError): on a non-zero return code from pyminifier.
"""
file_parts = os.path.splitext(file)
min_filename = file_parts[0] + '.min' + file_parts[1]
cmd = 'pyminifier -o ' + min_filename + ' ' + file
result = subprocess.run(
shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
print(result.stderr)
# Package up exception with output and raise if there was a failure.
result.check_returncode()
return min_filename
def get_commit():
"""Get the git commit hash of the current directory.
Returns:
(string) : The git commit hash or "unknown" if it could not be located
"""
try:
cmd = "git rev-parse HEAD"
result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)
return result.stdout.decode("utf-8").strip()
except:
return "unknown"
def get_submodule_commit(submodule_path):
try:
cmd = "git submodule status"
result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)
for line in result.stdout.decode("utf-8").splitlines():
if submodule_path in line:
commit, _ = line.strip().split(' ', 1)
# Remove the indicator that the commit was changed but not committed
if commit[0] == '+':
commit = commit[1:]
return commit
except:
pass
return "unknown"
def keypair_to_file(keypair):
"""Looks for the SSH private key for keypair under ~/.ssh/
Prints an error if the file doesn't exist.
Args:
keypair (string) : AWS keypair to locate a private key for
Returns:
(string|None) : SSH private key file path or None is the private key doesn't exist.
"""
file = os.path.expanduser("~/.ssh/{}.pem".format(keypair))
if not os.path.exists(file):
print("Error: SSH Key '{}' does not exist".format(file))
return None
return file
def password(what):
"""Prompt the user for a password and verify it.
If password and verify don't match the user is prompted again
Args:
what (string) : What password to enter
Returns:
(string) : Password
"""
while True:
pass_ = getpass.getpass("{} Password: ".format(what))
pass__ = getpass.getpass("Verify {} Password: ".format(what))
if pass_ == pass__:
return pass_
else:
print("Passwords didn't match, try again.")
def generate_password(length=16):
"""Generate an alphanumeric password of the given length.
Args:
length (int) : length of the password to be generated
Returns:
(string) : password
"""
chars = string.ascii_letters + string.digits #+ string.punctuation
return "".join([chars[c % len(chars)] for c in os.urandom(length)])
def find_dict_with(list_of_dicts, key, value):
"""
finds the first dictionary containing the key, value pair.
Args:
list_of_dicts: a list of dictionaries
key: key to search for in the dictionaries
value: the value that should be assigned to the key
Returns:
returns the first dictionary containing the key,value pair.
"""
for d in list_of_dicts:
if key in d:
if d[key] == value:
return d;
return None
def deprecated(msg = "The called function is now deprecated"):
warnings.warn(msg, DeprecationWarning, stacklevel=2)
def parse_hostname(hostname):
# handle one of the following
# - index.machine_name.bosslet_name
# - index.machine_name
# - machine_name.bosslet_name
# - machine_name
# where bosslet_name may contain a '.'
# NOTE: Doesn't support passing an IP address
# split out the index, machine name, and bosslet name
try:
# assume index.machine.bosslet
tmp = hostname.split(".", 1)
idx = int(tmp[0])
try:
_, machine, bosslet_name = hostname.split(".", 2)
except ValueError: # assume no bosslet_name
# handle just index.machine
_, machine = tmp
bosslet_name = None
except ValueError: # assume no index
idx = None
try:
# handle just machine.bosslet
machine, bosslet_name = tmp
except ValueError: # assume no bosslet_name
# handle just machine
machine, bosslet_name = tmp[0], None
return (idx, machine, bosslet_name)
def run(cmd, input=None, env_extras=None, checkreturn=True, shell=False, **kwargs):
"""Run a command and stream the output
Args:
cmd (str): String with the command to run
input (optional[str]): String with data to sent to the processes stdin
env_extras (optional[dict]): Dictionary of extra environmental variable to provide
checkreturn (bool): If the return code should be checked and an exception raised if not zero
kwargs: Other arguments to pass to the Popen constructor
Return:
int: The return code of the process
Raises:
Exception: If checkreturn is True and the return code is 0 (zero)
"""
if env_extras is not None:
env = os.environ.copy()
env.update(env_extras)
else:
env = None
proc = subprocess.Popen(shlex.split(cmd) if not shell else cmd,
env=env,
shell=shell,
bufsize=1, # line bufferred
#universal_newlines=True, # so we don't have to encode/decode
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE if input is not None else None,
**kwargs)
if input is not None:
proc.stdin.write(cmd.encode())
proc.stdin.close()
for line in proc.stdout:
print(line.decode('utf8'), end='', flush=True)
while proc.poll() is None:
time.sleep(1) # sometimes stdout is closed before the process has completely finished
if checkreturn:
if proc.returncode != 0:
raise Exception("Return code: {}".format(proc.returncode))
return proc.returncode
```
#### File: boss-manage/test/test_downsample.py
```python
import argparse
import blosc
import boto3
from boto3.dynamodb.conditions import Attr
import json
from multiprocessing.pool import Pool
import os
from sys import stdout
import alter_path
from lib import aws
from lib.hosts import PROD_ACCOUNT, DEV_ACCOUNT
from lib.names import AWSNames
from lib.configuration import BossParser
from bossutils.multidimensional import XYZ, Buffer
from bossutils.multidimensional import range as xyz_range
from spdb.c_lib.ndtype import CUBOIDSIZE
from spdb.spatialdb import AWSObjectStore, Cube
from spdb.project.basicresource import BossResourceBasic
"""
Script for running the downsample process against test data.
By default, fills the first 2x2x2 cubes of the channel with random data. If
coordinate frame extents are not provided, defaults to a large frame (see parse_args()).
The test places data directly in the S3 cuboid bucket. Artificial ids are used
for the collection and experiment so that there is no chance of overwriting
actual data in the cuboid bucket. To do so, letters are used for the ids of
the collection and experiment instead of integers.
Existing test data may be reused by providing the `--noupload` flag. This
flag should also be used if you wish to test against an empty dataset.
Provide a channel id that's never been populated along with the `--noupload`
flag.
***************************************
Prerequisites
***************************************
Before running, place the parents of spdb and bossutils in the PYTHONPATH
environment variable.
AWS credentials are also required since S3 and step functions are accessed.
Sample usage:
./test_downsample.py -f 4096 4096 160 integration.boss 1234
This will fill a coordinate frame with x-stop: 4096, y-stop: 4096, z-stop: 160
using channel id 1234 and running in the integration stack.
To run the test again with the same data, use:
./test_downsample.py --noupload -f 4096 4096 160 integration.boss 1234
"""
# Format string for building the first part of step function's arn.
SFN_ARN_PREFIX_FORMAT = 'arn:aws:states:{}:{}:stateMachine:'
LAMBDA_ARN_FORMAT = 'arn:aws:lambda:{}:{}:function:{}'
def ceildiv(a, b):
"""
Round up the result of a / b.
From https://stackoverflow.com/questions/14822184/is-there-a-ceiling-equivalent-of-operator-in-python/17511341#17511341
"""
return -(-a // b)
class S3Bucket(object):
"""
Wrapper for calls to S3.
"""
def __init__(self, session, bucket_name):
"""
Args:
session (boto3.Session): Open session.
bucket_name (str):
"""
self.bucket = bucket_name
self.s3 = session.client('s3')
def put(self, key, data):
"""
Upload to bucket.
Args:
key (string): S3 object key.
data (bytes|file-like object):
"""
self.s3.put_object(Key=key, Body=data, Bucket=self.bucket)
class TestDownsample(object):
def __init__(self, bosslet_config, chan_id, frame):
"""
Args:
bosslet_config (BossConfiguration): Boss Configuration of the stack when downsample should be executed
chan_id (str): Id of channel. Use letters to avoid collisions with real data.
frame (list[int]): Coordinate frame x/y/z stops.
"""
self.bosslet_config = bosslet_config
self.chan_id = chan_id
self.frame = frame
def get_image_dict(self):
"""
Generate an initial set of parameters to use to instantiate a basic
resource for an IMAGE dataset.
Note that strings are not used for the ids of the collection, experiment,
or channel. This is to prevent accidentally overwriting real data in the
cuboid bucket.
Returns:
(dict) - a dictionary of data to initialize a basic resource
"""
data = {}
data['boss_key'] = 'foo'
data['lookup_key'] = 'collfake&expfake&{}'.format(self.chan_id)
data['collection'] = {}
data['collection']['name'] = "col1"
data['collection']['description'] = "Test collection 1"
data['coord_frame'] = {}
data['coord_frame']['name'] = "coord_frame_1"
data['coord_frame']['description'] = "Test coordinate frame"
data['coord_frame']['x_start'] = 0
data['coord_frame']['x_stop'] = self.frame[0]
data['coord_frame']['y_start'] = 0
data['coord_frame']['y_stop'] = self.frame[1]
data['coord_frame']['z_start'] = 0
data['coord_frame']['z_stop'] = self.frame[2]
data['coord_frame']['x_voxel_size'] = 4
data['coord_frame']['y_voxel_size'] = 4
data['coord_frame']['z_voxel_size'] = 35
data['coord_frame']['voxel_unit'] = "nanometers"
data['experiment'] = {}
data['experiment']['name'] = "exp1"
data['experiment']['description'] = "Test experiment 1"
data['experiment']['num_hierarchy_levels'] = 7
data['experiment']['hierarchy_method'] = 'anisotropic'
data['experiment']['num_time_samples'] = 0
data['experiment']['time_step'] = 0
data['experiment']['time_step_unit'] = "na"
data['channel'] = {}
data['channel']['name'] = "ch1"
data['channel']['description'] = "Test channel 1"
data['channel']['type'] = "image"
data['channel']['datatype'] = 'uint8'
data['channel']['base_resolution'] = 0
data['channel']['sources'] = []
data['channel']['related'] = []
data['channel']['default_time_sample'] = 0
data['channel']['downsample_status'] = "NOT_DOWNSAMPLED"
return data
def get_downsample_args(self):
"""
Get arguments for starting the downsample.
Returns:
(dict): Arguments.
"""
names = self.bosslet_config.names
sfn_arn_prefix = SFN_ARN_PREFIX_FORMAT.format(self.bosslet_config.REGION,
self.bosslet_config.ACCOUNT_ID)
start_args = {
# resolution_hierarchy_sfn is used by the test script, but not by the
# actual resolution hiearchy step function that the script invokes.
'resolution_hierarchy_sfn': '{}{}'.format(sfn_arn_prefix, names.sfn.resolution_hierarchy),
'downsample_volume_lambda': LAMBDA_ARN_FORMAT.format(self.bosslet_config.REGION,
self.bosslet_config.ACCOUNT_ID,
names.lambda_.downsample_volume)
'test': True,
'collection_id': 'collfake',
'experiment_id': 'expfake',
'channel_id': self.chan_id,
'annotation_channel': False,
'data_type': 'uint8',
's3_index': names.s3.s3_index,
's3_bucket': names.s3.cuboid_bucket,
'x_start': 0,
'y_start': 0,
'z_start': 0,
'x_stop': self.frame[0],
'y_stop': self.frame[1],
'z_stop': self.frame[2],
'resolution': 0,
'resolution_max': 7,
'res_lt_max': True,
'type': 'anisotropic',
'iso_resolution': 3,
'aws_region': self.bosslet_config.REGION,
}
return start_args
def upload_data(self, args):
"""
Fill the coord frame with random data.
Args:
args (dict): This should be the dict returned by get_downsample_args().
"""
cuboid_size = CUBOIDSIZE[0]
x_dim = cuboid_size[0]
y_dim = cuboid_size[1]
z_dim = cuboid_size[2]
resource = BossResourceBasic()
resource.from_dict(self.get_image_dict())
resolution = 0
ts = 0
version = 0
# DP HACK: uploading all cubes will take longer than the actual downsample
# just upload the first volume worth of cubes.
# The downsample volume lambda will only read these cubes when
# passed the 'test' argument.
bucket = S3Bucket(self.bosslet_config.session, args['s3_bucket'])
print('Uploading test data', end='', flush=True)
for cube in xyz_range(XYZ(0,0,0), XYZ(2,2,2)):
key = AWSObjectStore.generate_object_key(resource, resolution, ts, cube.morton)
key += "&0" # Add the version number
#print('morton: {}'.format(cube.morton))
#print('key: {}'.format(key))
#print("{} -> {} -> {}".format(cube, cube.morton, key))
cube = Cube.create_cube(resource, [x_dim, y_dim, z_dim])
cube.random()
data = cube.to_blosc()
bucket.put(key, data)
print('.', end='', flush=True)
print(' Done uploading.')
def delete_data(self, args):
lookup_prefix = '&'.join([args['collection_id'], args['experiment_id'], args['channel_id']])
client = self.bosslet_config.session.client('s3')
args_ = { 'Bucket': args['s3_bucket'] }
resp = { 'KeyCount': 1 }
count = 0
spin = ['|', '/', '-', '\\']
print("Deleting S3 test cubes, this may take a long time")
while resp['KeyCount'] > 0:
resp = client.list_objects_v2(**args_)
args_['ContinuationToken'] = resp['NextContinuationToken']
print("\rDeleting Cubes: Querying", end='')
for obj in resp['Contents']:
if lookup_prefix in obj['Key']:
count += 1
print("\rDeleting Cubes: {}".format(spin[count % 4]), end='')
client.delete_object(Bucket = args['s3_bucket'],
Key = obj['Key'])
print("Deleted {} cubes".format(count))
def delete_index_keys(self, args):
table = self.bosslet_config.session.resource('dynamodb').Table(args['s3_index'])
lookup_prefix = '&'.join([args['collection_id'], args['experiment_id'], args['channel_id']])
resp = {'Count': 1}
while resp['Count'] > 0:
resp = table.scan(FilterExpression = Attr('lookup-key').begins_with(lookup_prefix))
print("Removing {} S3 index keys".format(resp['Count']))
for item in resp['Items']:
key = {
'object-key': item['object-key'],
'version-node': item['version-node'],
}
table.delete_item(Key = key)
def parse_args():
"""
Parse command line or config file.
Returns:
(Namespace): Parsed arguments.
"""
parser = BossParser(
description='Script for testing downsample process. ' +
'To supply arguments from a file, provide the filename prepended with an `@`.',
fromfile_prefix_chars = '@')
parser.add_argument('--frame', '-f',
nargs=3,
type=int,
default=[277504, 277504, 1000],
help='Coordinate frame max extents (default: 277504, 277504, 1000)')
parser.add_argument('--noupload',
action='store_true',
default=False,
help="Don't upload any data to the channel")
parser.add_argument('--leave-index',
action = 'store_true',
default = False,
help = "Don't remove S3 Index table test keys")
parser.add_argument('--cleanup',
action = 'store_true',
default = False,
help = 'Remove S3 cubes and S3 index table keys related to testing')
parser.add_bosslet()
parser.add_argument(
'channel_id',
help='Id of channel that will hold test data')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
ds_test = TestDownsample(args.bosslet_config, args.channel_id, args.frame)
start_args = ds_test.get_downsample_args()
if args.cleanup:
ds_test.delete_index_keys(session, start_args)
ds_test.delete_data(session, start_args)
import sys; sys.exit(0)
if not args.leave_index:
ds_test.delete_index_keys(session, start_args)
if not args.noupload:
ds_test.upload_data(start_args)
sfn = args.bosslet_config.session.client('stepfunctions')
resp = sfn.start_execution(
stateMachineArn=start_args['resolution_hierarchy_sfn'],
input=json.dumps(start_args)
)
print(resp)
``` |
{
"source": "jhuapl-boss/boss-test",
"score": 3
} |
#### File: boss-test/autoscale_test/lambda.py
```python
import json
import boto3
from time import time as now
from urllib2 import Request, urlopen, HTTPError
from urllib import urlencode
def request(queue, url, headers = {}):
msg = {'start': now()}
try:
req = Request(url,
headers = headers)
msg['req_start'] = now()
resp = urlopen(req)
msg['read_start'] = msg['req_stop'] = now()
msg['code'] = resp.getcode()
data = resp.read()
msg['bytes'] = len(data)
msg['read_stop'] = now()
except HTTPError as e:
msg['error_start'] = now()
msg['code'] = e.code
msg['error'] = str(e)
msg['error_stop'] = now()
except Exception as e:
msg['error'] = str(e)
msg['stop'] = now()
try:
queue.send_message(MessageBody = json.dumps(msg))
except Exception as e:
print("{}: {}".format(e, msg))
def handler(event, context):
token = event['token']
#urls = event['urls']
sqs = boto3.resource('sqs')
queue = sqs.Queue(event['queue'])
input_queue = sqs.Queue(event['input'])
headers = {
'Authorization': 'Token {}'.format(token),
'Accept': 'application/blosc',
}
#for url in urls:
# request(queue, url,
# headers = headers)
retry = 2
while retry > 0:
msgs = input_queue.receive_messages(WaitTimeSeconds=20, MaxNumberOfMessages=1)
if len(msgs) == 0:
retry -= 1
continue
msg = msgs[0]
request(queue, msg.body, headers = headers)
input_queue.delete_messages(Entries=[{'Id':'X', 'ReceiptHandle': msg.receipt_handle}])
```
#### File: boss-test/autoscale_test/resources.py
```python
import io
import zipfile
import time
import json
from contextlib import contextmanager
@contextmanager
def create_queue(session, name):
sqs = session.resource('sqs')
client = session.client('sqs')
resp = client.create_queue(QueueName = name,
Attributes = {
})
url = resp['QueueUrl']
try:
yield sqs.Queue(url)
finally:
resp = client.delete_queue(QueueUrl = url)
#print("Waiting 60 seconds for queue to be deleted")
#time.sleep(60)
@contextmanager
def create_role(session):
policy = {
'Version': '2012-10-17',
'Statement': [{
'Effect': 'Allow',
'Principal': {
'Service': 'lambda.amazonaws.com',
},
'Action': 'sts:AssumeRole',
}]
}
client = session.client('iam')
resp = client.create_role(RoleName = 'AutoScaleTest',
AssumeRolePolicyDocument = json.dumps(policy))
role_arn = resp['Role']['Arn']
try:
policy = {
'Version': '2012-10-17',
'Statement': [{
'Effect': 'Allow',
'Action': [
'sqs:SendMessage',
'sqs:ReceiveMessage',
'sqs:DeleteMessage',
],
'Resource': [
'*'
],
}]
}
resp = client.create_policy(PolicyName = 'AutoScaleTest',
PolicyDocument = json.dumps(policy))
policy_arn = resp['Policy']['Arn']
try:
resp = client.attach_role_policy(RoleName = 'AutoScaleTest',
PolicyArn = policy_arn)
try:
time.sleep(6) # wait for role to become avalable to lambda)))
yield role_arn
finally:
resp = client.detach_role_policy(RoleName = 'AutoScaleTest',
PolicyArn = policy_arn)
finally:
resp = client.delete_policy(PolicyArn = policy_arn)
finally:
resp = client.delete_role(RoleName = 'AutoScaleTest')
@contextmanager
def create_lambda(session, role, timeout):
with open('lambda.py', 'r') as fh:
lambda_code = fh.read().replace('"', '\"').replace('\\', '\\\\')
code = io.BytesIO()
archive = zipfile.ZipFile(code, mode='w')
archive_file = zipfile.ZipInfo('index.py')
archive_file.external_attr = 0o777 << 16
archive.writestr(archive_file, lambda_code)
archive.close()
lambda_code = code.getvalue()
client = session.client('lambda')
resp = client.create_function(FunctionName = 'AutoScaleTest',
Runtime = 'python2.7',
Role = role,
Handler = 'index.handler',
Code = {'ZipFile': lambda_code},
Description = 'AutoScale test lambda',
Timeout = timeout,
MemorySize = 128) # MBs, multiple of 64
try:
yield resp['FunctionArn']
finally:
resp = client.delete_function(FunctionName = 'AutoScaleTest')
```
#### File: system_test/tests/simple_boss_test.py
```python
import systemtest
import time
from utils import boss_test_utils, numpy_utils, plot_utils
class SimpleBossTest(systemtest.SystemTest):
""" System tests for the Boss API, testing different services.
Intended to be easy method of testing cutout, tile, and image services.
Properties (inherited):
class_config Static class dictionary (inherited from SystemTest)
parameters Parameters of the current test instance (inherited from SystemTest)
result Result of the current test instance (inherited from SystemTest)
Attributes (inherited):
"""
# Attributes:
__collection_resource = None
__coord_frame_resource = None
__experiment_resource = None
__channel_resource = None
__remote_resource = None
unit_size = [256, 256, 8, 1]
step_size = [256, 256, 8, 1]
steps = 1
cache_hit = False
cuboid_align = False
resolution = 0
delay = 0
@property
def channel_resource(self):
return type(self).__channel_resource
@property
def remote_resource(self):
return type(self).__remote_resource
@property
def experiment_resource(self):
return type(self).__experiment_resource
@property
def collection_resource(self):
return type(self).__collection_resource
@classmethod
def setUpClass(cls):
"""Use class configuration (+ defaults) to set up resources."""
super(SimpleBossTest, cls).setUpClass()
# Set up boss resources #
remote = boss_test_utils.get_remote()
# Default resources and configuration:
config = {
'collection':{'name': 'test_collection', 'delete':1},
'coord_frame':{'name': 'test_frame', 'delete':1},
'experiment':{'name': 'test_experiment', 'delete':1},
'channel':{'name': 'test_channel', 'delete':1, 'datatype':'uint8'},
'unit_size':[256, 256, 8, 1],
'step_size':[256, 256, 8, 1],
'steps':1
}
# Override certain defaults that appear in the class configuration:
if 'collection' in cls._class_config:
if 'name' in cls._class_config['collection']:
config['collection']['name'] = cls._class_config['collection']['name']
if 'delete' in cls._class_config['collection']:
config['collection']['delete'] = cls._class_config['collection']['delete']
if 'coord_frame' in cls._class_config:
if 'name' in cls._class_config['coord_frame']:
config['coord_frame']['name'] = cls._class_config['coord_frame']['name']
if 'delete' in cls._class_config['coord_frame']:
config['coord_frame']['delete'] = cls._class_config['coord_frame']['delete']
if 'experiment' in cls._class_config:
if 'name' in cls._class_config['experiment']:
config['experiment']['name'] = cls._class_config['experiment']['name']
if 'delete' in cls._class_config['experiment']:
config['experiment']['delete'] = cls._class_config['experiment']['delete']
if 'channel' in cls._class_config:
if 'name' in cls._class_config['channel']:
config['channel']['name'] = cls._class_config['channel']['name']
if 'delete' in cls._class_config['channel']:
config['channel']['delete'] = cls._class_config['channel']['delete']
if 'unit_size' in cls._class_config:
for i in range(0, min(4, len(cls._class_config['unit_size']))):
config['unit_size'][i] = int(cls._class_config['unit_size'][i]) # x y z (t)
if 'step_size' in cls._class_config:
for i in range(0, min(4, len(cls._class_config['step_size']))):
config['step_size'][i] = int(cls._class_config['step_size'][i]) # x y z (t)
if 'steps' in cls._class_config:
config['steps'] = int(cls._class_config['steps'])
# We are not interested in any other variables; set everything else to default calculations
x_sizes = [config['unit_size'][0] + (j * config['step_size'][0]) for j in range(0, config['steps'])]
y_sizes = [config['unit_size'][1] + (j * config['step_size'][1]) for j in range(0, config['steps'])]
z_sizes = [config['unit_size'][2] + (j * config['step_size'][2]) for j in range(0, config['steps'])]
t_sizes = [config['unit_size'][3] + (j * config['step_size'][3]) for j in range(0, config['steps'])]
config['coord_frame']['x_start'] = 0
config['coord_frame']['y_start'] = 0
config['coord_frame']['z_start'] = 0
scale = 500
config['coord_frame']['x_stop'] = scale * sum(x_sizes)
config['coord_frame']['y_stop'] = scale * sum(y_sizes)
config['coord_frame']['z_stop'] = scale * sum(z_sizes)
# Create resource object for each resource
cls.__collection_resource = boss_test_utils.set_collection_resource(
remote, config['collection'])
cls.__coord_frame_resource = boss_test_utils.set_coordinate_frame_resource(
remote, config['coord_frame'])
config['experiment']['num_time_samples'] = scale * sum(t_sizes)
experiment = dict((k,v) for k,v in config['experiment'].items())
experiment['collection_name'] = cls.__collection_resource.name
experiment['coord_frame'] = cls.__coord_frame_resource.name
cls.__experiment_resource = boss_test_utils.set_experiment_resource(
remote, experiment)
channel = dict((k,v) for k,v in config['channel'].items())
channel['collection_name'] = cls.__collection_resource.name
channel['experiment_name'] = cls.__experiment_resource.name
cls.__channel_resource = boss_test_utils.set_channel_resource(
remote, channel)
cls.__remote_resource = remote
# Drop anything else that was in the class config json
cls._class_config = config
# TODO: this may be incorrect
@classmethod
def tearDownClass(cls):
""" Delete resources (in order) if specified in the class configuration """
remote = boss_test_utils.get_remote()
# config = cls._class_config
try:
boss_test_utils.delete_channel(remote, cls.__channel_resource)
except:
print('Failed to delete channel {0}'.format(cls.__channel_resource.name))
try:
boss_test_utils.delete_experiment(remote, cls.__experiment_resource)
except:
print('Failed to delete experiment {0}'.format(cls.__experiment_resource.name))
try:
boss_test_utils.delete_coord_frame(remote, cls.__coord_frame_resource)
except:
print('Failed to delete coordinate frame {0}'.format(cls.__coord_frame_resource.name))
try:
boss_test_utils.delete_collection(remote, cls.__collection_resource)
except:
print('Failed to delete collection {0}'.format(cls.__collection_resource.name))
super(SimpleBossTest, cls).tearDownClass()
```
#### File: system_test/tests/sys_test_boss__base.py
```python
import systemtest
import time
from utils import boss_test_utils
class BossSystemTest(systemtest.SystemTest):
# Protected attributes:
_channel = None # Each test uses either the "default" (initial) channel or a new channel
_version = None # Boss version
# Class attributes:
__collection = None
__coordinate_frame = None
__experiment = None
__default_channel = None
__write_delay = float(5.0) # Default time (in seconds) to wait after writing to the channel #
@property
def default_channel(self):
return type(self).__default_channel
@property
def default_write_delay(self):
return type(self).__write_delay
@classmethod
def setUpClass(cls):
""" Set up the default channel as specified in the class configuration """
super(BossSystemTest, cls).setUpClass()
if bool(cls._class_config):
# Set up boss resources #
remote = boss_test_utils.get_remote()
cls.__collection = boss_test_utils.set_collection_resource(
remote,
cls._class_config['collection'])
cls.__coordinate_frame = boss_test_utils.set_coordinate_frame_resource(
remote,
cls._class_config['coordinate_frame'])
cls._class_config['experiment']['collection_name'] = cls.__collection.name
cls._class_config['experiment']['coord_frame'] = cls.__coordinate_frame.name
cls.___experiment = boss_test_utils.set_experiment_resource(
remote,
cls._class_config['experiment'])
cls._class_config['channel']['collection_name'] = cls.__collection.name
cls._class_config['channel']['experiment_name'] = cls.___experiment.name
cls.__default_channel = boss_test_utils.set_channel_resource(
remote,
cls._class_config['channel'])
def setUp(self):
"""Called before a single test begins. Set up a new remote for the current test """
super(BossSystemTest, self).setUp()
if 'version' in self.class_config:
self._version = self.class_config['version']
else:
self._version = self.parser_args.version
def validate_params(self, test_params=None, *args, **kwargs):
"""Call this at the start of the system test method """
if test_params is not None:
if 'channel' in test_params:
remote = boss_test_utils.get_remote()
test_params['channel']['collection_name'] = self.default_channel.collection_name
test_params['channel']['experiment_name'] = self.default_channel.experiment_name
self._channel = boss_test_utils.set_channel_resource(
remote,
test_params['channel'])
time.sleep(self.default_write_delay)
else:
self._channel = self.default_channel
def tearDown(self):
"""Called after a single test completes"""
if ('delete' in self.parameters) and bool(self.parameters['delete']) and \
(self._channel is not self.default_channel):
remote = boss_test_utils.get_remote()
remote.delete_project(self._channel)
super(BossSystemTest, self).tearDown()
@classmethod
def tearDownClass(cls):
""" Delete resources (in order) if specified in the class configuration """
_config, remote = cls._class_config, boss_test_utils.get_remote()
def delete_resource(resource):
try:
if bool(resource):
remote.delete_project(resource)
return None
except:
return resource
# Delete is TRUE by default
if ('channel' not in _config) or ('delete' not in _config['channel']) or bool(_config['channel']['delete']):
cls.__default_channel = delete_resource(cls.__default_channel)
if ('delete' not in _config['experiment']) or bool(_config['experiment']['delete']):
cls.__default_channel = delete_resource(cls.__default_channel)
cls.__experiment = delete_resource(cls.__experiment)
if ('delete' not in _config['coordinate_frame']) or bool(_config['coordinate_frame']['delete']):
cls.__default_channel = delete_resource(cls.__default_channel)
cls.__experiment = delete_resource(cls.__experiment)
cls.__coordinate_frame = delete_resource(cls.__coordinate_frame)
if ('delete' not in _config['collection']) or bool(_config['collection']['delete']):
cls.__default_channel = delete_resource(cls.__default_channel)
cls.__experiment = delete_resource(cls.__experiment)
cls.__collection = delete_resource(cls.__collection)
super(BossSystemTest, cls).tearDownClass()
```
#### File: system_test/utils/boss_test_utils.py
```python
import requests, time, inspect
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
# DEFAULT_DOMAIN = 'integration.theboss.io'
DEFAULT_VERSION = 0.7
# Define cache dimensions #
CACHE_SIZE_X = 512
CACHE_SIZE_Y = 512
CACHE_SIZE_Z = 16
def get_remote(config=None) -> BossRemote:
""" Create a new Boss remote service
Returns:
intern.remote.boss.BossRemote : New remote
"""
remote = BossRemote(config)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
remote.project_service.session_send_opts = {'verify': False}
remote.metadata_service.session_send_opts = {'verify': False}
remote.volume_service.session_send_opts = {'verify': False}
return remote
def get_host(remote: BossRemote, section: str='Default') -> str:
"""Extract the default host name from a BossRemote configuration"""
return remote._config._sections[section]['host']
def set_channel_resource(remote: BossRemote, params: dict) -> ChannelResource:
"""Use the arguments in the class config to create a channel resource object"""
if 'name' not in params:
params['name'] = 'channel{0}'.format(hex(round(time.time()))[2:])
param_names = [str(p.name) for p in inspect.signature(ChannelResource).parameters.values()]
filtered_params = {k: v for k, v in list(params.items()) if k in param_names} # Filter unexpected arguments
chan_resource = ChannelResource(**filtered_params)
if chan_resource.name in remote.list_channels(chan_resource.coll_name, chan_resource.exp_name):
channel = remote.update_project(chan_resource.name, chan_resource)
print('Updated channel {0}'.format(chan_resource.name))
else:
channel = remote.create_project(chan_resource)
print('Created channel {0}'.format(chan_resource.name))
return channel
def delete_channel(remote: BossRemote, channel, coll_name=None, exp_name=None):
chan_obj = None
if isinstance(channel, ChannelResource):
chan_obj = channel
elif isinstance(channel, str) and coll_name is not None and exp_name is not None:
chan_name = channel
chan_obj = remote.get_project(ChannelResource(name=chan_name, experiment_name=exp_name))
if chan_obj is not None:
print('Deleting channel "{0}"...'.format(chan_obj.name))
remote.delete_project(chan_obj)
def set_experiment_resource(remote: BossRemote, params: dict) -> ExperimentResource:
"""Use the arguments in the class config to create an experiment resource object"""
if 'name' not in params:
params['name'] = 'experiment{0}'.format(hex(round(time.time()))[2:])
param_names = [str(p.name) for p in inspect.signature(ExperimentResource).parameters.values()]
filtered_params = {k: v for k, v in list(params.items()) if k in param_names} # Filter unexpected arguments
exp_resource = ExperimentResource(**filtered_params)
if exp_resource.name in remote.list_experiments(exp_resource.coll_name):
experiment = remote.update_project(exp_resource.name, exp_resource)
print('Updated experiment {0}'.format(exp_resource.name))
else:
experiment = remote.create_project(exp_resource)
print('Created experiment {0}'.format(exp_resource.name))
return experiment
def delete_experiment(remote: BossRemote, experiment, coll_name=None):
exp_obj = None
exp_name = None
if isinstance(experiment, ExperimentResource):
exp_obj = experiment
exp_name = experiment.name
coll_name = experiment.coll_name
elif isinstance(experiment, str) and coll_name is not None:
exp_name = experiment
exp_obj = remote.get_project(ExperimentResource(name=exp_name, collection_name=coll_name))
if exp_name is not None:
print('Deleting channels of experiment "{0}"...'.format(exp_name))
chan_names = remote.list_channels(coll_name, exp_name)
for n in chan_names:
delete_channel(remote, n, exp_name, coll_name)
if exp_obj is not None:
print('Deleting experiment "{0}"...'.format(exp_obj.name))
remote.delete_project(exp_obj)
def set_collection_resource(remote: BossRemote, params: dict) -> CollectionResource:
"""Use the arguments in the class config to create a collection resource object"""
if 'name' not in params:
params['name'] = 'collection{0}'.format(hex(round(time.time()))[2:])
param_names = [str(p.name) for p in inspect.signature(CollectionResource).parameters.values()]
filtered_params = {k: v for k, v in list(params.items()) if k in param_names} # Filter unexpected arguments
collection_resource = CollectionResource(**filtered_params)
if collection_resource.name in remote.list_collections():
collection = remote.update_project(collection_resource.name, collection_resource)
print('Updated collection {0}'.format(collection_resource.name))
else:
collection = remote.create_project(collection_resource)
print('Created collection {0}'.format(collection_resource.name))
return collection
def delete_collection(remote: BossRemote, collection):
coll_obj = None
coll_name = None
if isinstance(collection, CollectionResource):
coll_obj = collection
coll_name = collection.name
elif isinstance(collection, str):
coll_name = collection
coll_obj = remote.get_project(CollectionResource(name=coll_name))
if coll_name is not None:
print('Deleting experiments of collection "{0}"...'.format(coll_name))
exp_names = remote.list_experiments(coll_name)
for n in exp_names:
delete_experiment(remote, n, coll_name)
if coll_obj is not None:
print('Deleting collection "{0}"...'.format(coll_name))
remote.delete_project(coll_obj)
def set_coordinate_frame_resource(remote: BossRemote, params: dict) -> CoordinateFrameResource:
"""Use the arguments in the class config to create a frame resource object"""
if 'name' not in params:
params['name'] = 'frame{0}'.format(hex(round(time.time()))[2:])
param_names = [str(p.name) for p in inspect.signature(CoordinateFrameResource).parameters.values()]
filtered_params = {k: v for k, v in list(params.items()) if k in param_names} # Filter unexpected arguments
frame_resource = CoordinateFrameResource(**filtered_params)
if frame_resource.name in remote.list_coordinate_frames():
coordinate_frame = remote.update_project(frame_resource.name, frame_resource)
print('Updated frame {0}'.format(frame_resource.name))
else:
coordinate_frame = remote.create_project(frame_resource)
print('Created frame {0}'.format(frame_resource.name))
return coordinate_frame
def delete_coord_frame(remote: BossRemote, coord_frame):
frame_obj = None
# frame_name = None
if isinstance(coord_frame, CoordinateFrameResource):
frame_obj = coord_frame
# frame_name = coord_frame.name
elif isinstance(coord_frame, str):
frame_name = coord_frame
frame_obj = remote.get_project(CoordinateFrameResource(name=frame_name))
if frame_obj is not None:
print('Deleting coordinate frame "{0}"...'.format(frame_obj.name))
remote.delete_project(frame_obj)
def post_obj(remote: BossRemote, url: str, format_accept: str='*/*') -> requests.Response:
"""POST request"""
token = remote.token_project
headers = {'content-type': 'application/json',
'Accept': format_accept,
'Authorization': 'Token {0}'.format(token)}
return requests.post(url=url, headers=headers)
def get_obj(remote: BossRemote, url: str, format_accept: str='*/*') -> requests.Response:
"""GET request"""
token = remote.token_project
headers = {'content-type': 'application/json',
'Accept': format_accept,
'Authorization': 'Token {0}'.format(token)}
return requests.get(url, params=None, headers=headers)
```
#### File: system_test/utils/plot_utils.py
```python
import numpy
import os
import re
if __name__ == '__main__':
import argparse
import json_utils
else:
from utils import json_utils
import matplotlib.pyplot as pyplot
PLOT_KEY = '_PLOT_'
# Todo: Allow y-axis to be list of names, so that multiple arrays can be plotted simultaneously.
# Display a plot of two arrays #
def show_plot(xarray, yarray, xlabel: str='x', ylabel: str='y', title: str=""):
"""Whenever an axis pair is detected, creates a line plot using those axes. This function is called by read_json()
and is not designed to be called directly.
Args:
xarray (list/numpy.ndarray) : Values for the x-axis
yarray (list/numpy.ndarray) : Values for the y-axis
xlabel (str) : Label for the x-axis
ylabel (str) : Label for the y-axis
title (str) : Title for the line plot
"""
for i in range(0, len(xarray)):
if not isinstance(xarray[i], (int, float)):
# warnings.showwarning('Some x-Axis values are not numeric',Warning,filename,0)
break
elif not isinstance(yarray[i], (int, float)):
# warnings.showwarning('Some y-Axis values are not numeric',Warning,filename,0)
break
try:
x_lims = [min(x for x in xarray if x is not None), max(x for x in xarray if x is not None)]
y_lims = [min(y for y in yarray if y is not None), max(y for y in yarray if y is not None)]
# If data is constant value, adjust axes limits #
x_lims[1] = (x_lims[1] if x_lims[0] == x_lims[1] else x_lims[1]+1)
y_lims[1] = (y_lims[1] if y_lims[0] == y_lims[1] else y_lims[1]+1)
# f = pyplot.gcf()
# ax = f.add_axes([x_lims[0], y_lims[0], x_lims[1], y_lims[1]])
pyplot.plot(xarray, yarray)
pyplot.draw()
# axes = pyplot.gca()
# axes.set_xlim([x_lims[0]-x_margin, x_lims[1]+x_margin])
# axes.set_ylim([y_lims[0]-y_margin, y_lims[1]+y_margin])
pyplot.grid(True)
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
if not bool(title):
title = 'Plot {0} vs {1}'.format(ylabel, xlabel)
pyplot.title(title)
except Exception as e:
# print('ERROR: {0}'.format(str(e)))
raise e
return
def read_json(data: dict, keyword: str):
"""Parse a JSON dictionary and find array pairs to plot. Array pairs are identified when the output has a key
called show_plot (which may be set as json_plot.PLOY_KEY). The value of this dictionary entry should be a 2-element
vector, where both elements are strings that are key names. These keys should refer to values in the dictionary
that are numeric lists. For example, the unit test function result (in the JSON) could be written as:
{
'x_vals':[1, 2, 3, 4],
'y_vals':[10,11,12,13,
<PLOT_KEY>:{'x_vals', 'y_vals'},
...
}
Args:
data (dict) : Result of load()
keyword (str) : The key of the dictionary where we are looking. This is because the function is recursive and
may operate on nested dictionaries.
"""
plot_list = []
if isinstance(data, dict):
for key in data:
if key == keyword:
plots_info = data[key] if isinstance(data[key], list) else list([data[key]])
for info in plots_info:
try:
y_key = info['y']
y_vals = data[y_key]
x_key = info['x']
x_vals = data[x_key]
assert bool(x_vals), 'Empty x axis data'
assert bool(y_vals), 'Empty y axis data'
title = info['title'] if 'title' in info else ""
xlabel = info['xlabel'] if 'xlabel' in info else x_key
ylabel = info['ylabel'] if 'ylabel' in info else y_key
# if 'x' not in info or min(x_vals)==max(x_vals):
# if min(x_vals)==max(x_vals):
# title += ' ({0}={1})'.format(x_key, max(x_vals))
# x_vals = list(range(1,len(y_vals)+1))
# xlabel = 'iterations ({0})'.format(len(x_vals))
plot_list.append({'x': x_vals,
'y': y_vals,
'xlabel': xlabel,
'ylabel': ylabel,
'title': title})
except Exception as e:
# print(e)
raise e
pass
else:
p = read_json(data[key], keyword)
if p:
plot_list += p
elif isinstance(data, (list, numpy.ndarray)):
for item in data:
p = read_json(item, keyword)
if p:
plot_list += p
return plot_list
def read_files(file_names):
"""Scan a file or list of files for plot data, and then display plots."""
if not isinstance(file_names, list):
file_names = list([file_names])
plots = list([])
for i in range(0, len(file_names)):
try:
json_data = json_utils.read(file_names[0])
if json_data:
plots_recursive = read_json(json_data, PLOT_KEY)
for j in range(0, len(plots_recursive)):
plots_recursive[j]['filename'] = file_names[i]
plots += plots_recursive
except Exception as e:
print('Exception while reading {0}: {1}'.format(file_names[i], str(e)))
for k in range(0, len(plots)):
# pyplot.figure()
show_plot(plots[k]['x'],
plots[k]['y'],
plots[k]['xlabel'],
plots[k]['ylabel'],
plots[k]['title'],)
pyplot.show()
# print(plots[k])
# imgfile = '{0}.png'.format(
# # plots[k]['filename'].replace('.json', ''),
# re.sub(r':|;|\*| |!|\$|,', '_', plots[k]['title']))
# print(imgfile)
# pyplot.savefig(imgfile)
def read_directory(dir_name):
"""Scan a directory for files with plot data. Make sure to use each file's full name."""
file_list = [('{0}/{1}'.format(dir_name, x) if dir_name not in x else x) for x in os.listdir(dir_name)]
read_files(file_list)
# for filename in os.listdir(dirname):
# print('Trying to read {0}...'.format(filename))
# read_file('/'.join([dirname, filename]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parser that accepts configuration file name')
parser.add_argument('files', metavar='N',
nargs='+',
help='JSON output file(s) with numeric array data')
args = parser.parse_args()
read_files(args.files)
``` |
{
"source": "jhuapl-boss/boss-tools",
"score": 2
} |
#### File: boss-tools/activities/boss_db.py
```python
import pymysql.cursors
import bossutils
LOG = bossutils.logger.bossLogger()
def get_db_connection(host):
"""
Connects to vault to get database information and then makes a DB connection.
Note that the connection is opened with auto-commit turned ON.
Args:
host (str): Host name of database.
Returns:
(pymysql.Connection) connection to DB
"""
vault = bossutils.vault.Vault()
# ------ get values from Vault -----------
user = vault.read('secret/endpoint/django/db', 'user')
password = vault.read('secret/endpoint/django/db', 'password')
db_name = vault.read('secret/endpoint/django/db', 'name')
port = int(vault.read('secret/endpoint/django/db', 'port'))
# ---- debug locally -------
# host = "localhost"
# user = "testuser"
# password = ""
# db_name = "boss"
# port = 3306
return pymysql.connect(host=host,
user=user,
password=password,
db=db_name,
port=port,
# Don't turn off autocommit w/o visiting every user
# of this connection and ensuring that they use a
# transaction!
autocommit=True,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
def update_downsample_status_in_db(args):
"""
Update the downsample status in the MySQL database.
This supports a state of the resolution hierarchy step function.
This function is tested in
boss.git/django/bossspatialdb/test/test_update_downsample_status.py.
Tests live there because Django owns the DB.
Args:
args (dict):
db_host (str): MySQL host name.
channel_id (int): ID of channel for downsample.
status (str): String from DownsampleStatus class.
Returns:
(dict): Returns input args for passing to next SFN state.
"""
sql = """
UPDATE channel
SET downsample_status = %(status)s
WHERE id = %(chan_id)s
"""
db_host = args['db_host']
chan_id = args['channel_id']
status = args['status']
sql_args = dict(status=status, chan_id=str(chan_id))
try:
db_connection = get_db_connection(db_host)
with db_connection.cursor(pymysql.cursors.SSCursor) as cursor:
rows = cursor.execute(sql, sql_args)
if rows < 1:
LOG.error(
f'DB said no rows updated when trying to set downsample status to {status} for channel {chan_id}'
)
except Exception as ex:
LOG.exception(f'Failed to set downsample status to {status} for channel {chan_id}: {ex}')
return args
def set_downsample_arn_in_db(args):
"""
Set the arn of the running downsample step function in the MySQL database.
This supports a state of the resolution hierarchy step function.
This function is tested in
boss.git/django/bossspatialdb/test/test_set_downsample_arn.py.
Tests live there because Django owns the DB.
Args:
args (dict):
db_host (str): MySQL host name.
channel_id (int): ID of channel for downsample.
exe_sfn_arn (str): ARN of running downsample step function.
Returns:
(dict): Returns input args for passing to next SFN state.
"""
sql = """
UPDATE channel
SET downsample_arn = %(arn)s
WHERE id = %(chan_id)s
"""
db_host = args['db_host']
chan_id = args['channel_id']
arn = args['exe_sfn_arn']
sql_args = dict(arn=arn, chan_id=str(chan_id))
try:
db_connection = get_db_connection(db_host)
with db_connection.cursor(pymysql.cursors.SSCursor) as cursor:
rows = cursor.execute(sql, sql_args)
if rows < 1:
LOG.error(
f'DB said no rows updated when trying to set downsample arn for channel {chan_id}'
)
except Exception as ex:
LOG.exception(f'Failed to set downsample arn for channel {chan_id}: {ex}')
return args
```
#### File: boss-tools/activities/scan_for_missing_chunks.py
```python
import boto3
import json
import pymysql
import pymysql.cursors
import time
from boss_db import get_db_connection
from bossutils import logger
from ndingest.nddynamo.boss_tileindexdb import TASK_INDEX, MAX_TASK_ID_SUFFIX, TILE_UPLOADED_MAP_KEY
from ingestclient.core.backend import BossBackend
"""
Scans the tile index in DynamoDB for any chunks that have missing tiles. If
tiles are missing, they are placed back in the upload queue for that ingest
job. If there are missing tiles, the ingest job's state is reset to UPLOADING.
"""
log = logger.bossLogger()
# Tile index attributes defined in ndingest.git/nddynamo/schemas/boss_tile_index.json.
APPENDED_TASK_ID = 'appended_task_id'
CHUNK_KEY = 'chunk_key'
SQS_BATCH_SIZE = 10
SQS_RETRY_TIMEOUT = 15
# These values are defined in boss.git/django/bossingest/models.py.
UPLOADING_STATUS = 1
WAIT_ON_QUEUES = 6
TILE_INGEST = 0
def activity_entry_point(args):
"""
Entry point to the chunk scanner step function activity.
Args:
args (dict):
tile_index_table (str): Name of tile index table.
region (str): AWS region to use.
db_host (str): Host of MySQL database.
job (dict):
collection (int): Collection id.
experiment (int): Experiment id.
channel (int): Channel id.
task_id (int): The ingest job's id.
resolution (int): Resolution of chunk.
z_chunk_size (int): How many z slices in the chunk.
upload_queue (str): Tile upload queue.
ingest_queue (str): Tile ingest queue.
ingest_type (int): Tile (0) or volumetric ingest (1).
resource (dict): Boss resource data.
x_size (int): Tile size in x dimension.
y_size (int): Tile size in y dimension.
KVIO_SETTINGS: spdb settings.
STATEIO_CONFIG: spdb settings.
OBJECTIO_CONFIG: spdb settings.
Returns:
(dict): Returns incoming args so they can be passed to the next activity.
Also adds 'quit' key. Sets 'quit' to True if missing tiles
were found. Otherwise, sets 'quit' to False.
"""
# This should only run on tile ingests.
if args['job']['ingest_type'] != TILE_INGEST:
args['quit'] = False
return args
dynamo = boto3.client('dynamodb', region_name=args['region'])
sqs = boto3.resource('sqs', region_name=args['region'])
cs = ChunkScanner(dynamo, sqs, args['tile_index_table'], args['db_host'],
args['job'], args['resource'], args['x_size'], args['y_size'],
args['KVIO_SETTINGS'], args['STATEIO_CONFIG'], args['OBJECTIO_CONFIG'])
args['quit'] = cs.run()
return args
class ChunkScanner:
JOB_FIELDS = frozenset([
'collection', 'experiment', 'channel',
'task_id', 'resolution', 'z_chunk_size',
'upload_queue', 'ingest_queue', 'ingest_type',
])
def __init__(self, dynamo, sqs, tile_index_table, db_host, job, resource,
tile_x_size, tile_y_size, kvio_settings, stateio_config, objectio_config):
"""
Args:
dynamo (boto3.Dynamodb): Dynamo client.
sqs (boto3.SQS.ServiceResource): SQS client.
tile_index_table (str): Name of tile index table.
db_host (str): Host of MySQL database.
job (dict):
collection (int): Collection id.
experiment (int): Experiment id.
channel (int): Channel id.
task_id (int): The ingest job's id.
resolution (int): Resolution of chunk.
z_chunk_size (int): How many z slices in the chunk.
upload_queue (str): Tile upload queue.
ingest_queue (str): Tile ingest queue.
resource (dict): Boss resource data.
tile_x_size (int): Tile size in x dimension.
tile_y_size (int): Tile size in y dimension.
kvio_settings: spdb settings.
stateio_config: spdb settings.
objectio_config: spdb settings.
"""
self.dynamo = dynamo
self.sqs = sqs
self.tile_index_table = tile_index_table
self.db_host = db_host
self.job = job
self.resource = resource
self.tile_x_size = tile_x_size
self.tile_y_size = tile_y_size
self.kvio_settings = kvio_settings
self.stateio_config = stateio_config
self.objectio_config = objectio_config
self.found_missing_tiles = False
self.reenqueued_chunks = False
# Validate job parameter.
for field in ChunkScanner.JOB_FIELDS:
if field not in job:
raise KeyError('Job must have {}'.format(field))
def _get_project_info(self):
"""
Get the project info required by Backend.encode_tile_key().
Returns:
(list[str]): [collection, experiment, channel].
"""
return [self.job['collection'], self.job['experiment'], self.job['channel']]
def run(self):
"""
Scan all DynamoDB partitions for remaining chunks in the tile index.
Tiles missing from chunks are put back in the tile upload queue.
Returns:
(bool): True if missing tiles found or if chunks put back on the ingest queue.
"""
for i in range(0, MAX_TASK_ID_SUFFIX):
self.run_scan(i)
return self.found_missing_tiles or self.reenqueued_chunks
def run_scan(self, partition_num):
"""
Scan a single partition for remaining chunks.
During an ingest, chunks are written across (0, INGEST_MAX_SIZE)
partitions so Dynamo doesn't throttle the ingest due to a hot partition.
If any remaining chunks are missing tiles, it puts those tiles on the
upload queue. After each batch of messages enqueued, it sets the
state of the ingest job to UPLOADING. This is done after each batch
in case the ingest client clears the upload queue and tries to restart
the complete process.
self.found_missing_tiles set to True if missing tiles found.
self.reenqueued_chunks set to True if chunks put back in ingest queue.
Args:
dynamo (boto3.Dynamodb): Dynamo client.
partition_num (int): Which partition to scan (Suffix appended to task/job id).
"""
appended_task_id = {'S': '{}_{}'.format(self.job['task_id'], partition_num) }
query_args = {
'TableName': self.tile_index_table,
'IndexName': TASK_INDEX,
'KeyConditionExpression': '#appended_task_id = :appended_task_id',
'ExpressionAttributeNames': {
'#appended_task_id': APPENDED_TASK_ID,
'#chunk_key': CHUNK_KEY,
'#tile_uploaded_map': TILE_UPLOADED_MAP_KEY
},
'ExpressionAttributeValues': { ':appended_task_id': appended_task_id },
'ProjectionExpression': '#chunk_key, #tile_uploaded_map'
}
db_connection = get_db_connection(self.db_host)
try:
upload_queue = self.sqs.Queue(self.job['upload_queue'])
ingest_queue = self.sqs.Queue(self.job['ingest_queue'])
query = self.dynamo.get_paginator('query')
resp_iter = query.paginate(**query_args)
for resp in resp_iter:
for item in resp['Items']:
missing_msgs = self.check_tiles(item[CHUNK_KEY]['S'], item[TILE_UPLOADED_MAP_KEY]['M'])
no_missing_tiles = True
if self.enqueue_missing_tiles(upload_queue, missing_msgs):
self.found_missing_tiles = True
no_missing_tiles = False
self.set_ingest_status(db_connection, UPLOADING_STATUS)
if no_missing_tiles:
# This is a chunk with all its tiles, so put it back
# in the ingest queue.
self.reenqueued_chunks = True
self.enqueue_chunk(ingest_queue, item[CHUNK_KEY]['S'])
if not self.found_missing_tiles and self.reenqueued_chunks:
self.set_ingest_status(db_connection, WAIT_ON_QUEUES)
finally:
db_connection.close()
def enqueue_chunk(self, queue, chunk_key):
"""
Put the chunk back in the ingest queue. All its tiles should be in S3,
but the ingest lambda must have failed.
Args:
queue (sqs.Queue): Ingest queue.
chunk_key (str): Key identifying which chunk to re-ingest.
"""
log.info(f'Re-enqueuing chunk: {chunk_key}')
raw_msg = {
'chunk_key': chunk_key,
'ingest_job': self.job['task_id'],
'parameters': {
'KVIO_SETTINGS': self.kvio_settings,
'STATEIO_CONFIG': self.stateio_config,
'OBJECTIO_CONFIG': self.objectio_config,
'resource': self.resource,
},
'x_size': self.tile_x_size,
'y_size': self.tile_y_size,
}
queue.send_message(MessageBody=json.dumps(raw_msg))
def set_ingest_status(self, db_connection, status):
"""
Set the status of the ingest job to the given status.
Args:
db_connection (pymysql.Connection)
status (int): New ingest status.
"""
sql = 'UPDATE ingest_job SET status = %(status)s WHERE id = %(job_id)s'
sql_args = dict(status=str(status), job_id=str(self.job['task_id']))
try:
with db_connection.cursor(pymysql.cursors.SSCursor) as cursor:
rows = cursor.execute(sql, sql_args)
if rows < 1:
log.error(
'DB said no rows updated when trying to set UPLOADING job status for job: {}'.format(
self.job['task_id'])
)
except Exception as ex:
log.error('Failed to set UPLOADING status: {}'.format(ex))
def check_tiles(self, chunk_key, tiles):
"""
Check the chunk's tile map for missing tiles. If any are missing,
generate the proper stringified JSON for putting those missing tiles
back in the tile upload queue.
Args:
chunk_key (str): Identifies chunk of tiles.
tiles (): List of tiles uploaded for the chunk.
Yields:
(str): JSON string for sending to SQS tile upload queue.
"""
# Only using encode|decode_*_key methods, so don't need to provide a
# config.
ingest_backend = BossBackend(None)
chunk_key_parts = ingest_backend.decode_chunk_key(chunk_key)
chunk_x = chunk_key_parts['x_index']
chunk_y = chunk_key_parts['y_index']
chunk_z = chunk_key_parts['z_index']
t = chunk_key_parts['t_index']
num_tiles = chunk_key_parts['num_tiles']
z_start = chunk_z * self.job['z_chunk_size']
for tile_z in range(z_start, z_start + num_tiles):
# First arg is a list of [collection, experiment, channel] ids.
tile_key = ingest_backend.encode_tile_key(
self._get_project_info(), self.job['resolution'], chunk_x, chunk_y, tile_z, t)
if tile_key in tiles:
continue
msg = {
'job_id': self.job['task_id'],
'upload_queue_arn': self.job['upload_queue'],
'ingest_queue_arn': self.job['ingest_queue'],
'chunk_key': chunk_key,
'tile_key': tile_key
}
log.info(f'Re-enqueuing tile: {tile_key} belonging to chunk: {chunk_key}')
yield json.dumps(msg)
def enqueue_missing_tiles(self, queue, msgs):
"""
Send messages for missing tiles to the upload queue.
Args:
queue (SQS.Queue): The upload queue.
msgs (Iterator[str]): Stringified JSON messages.
Returns:
(bool): True if at least one message was enqueued.
"""
enqueued_msgs = False
while True:
batch = []
for i in range(SQS_BATCH_SIZE):
try:
batch.append({
'Id': str(i),
'MessageBody': next(msgs),
'DelaySeconds': 0
})
except StopIteration:
break
if len(batch) == 0:
break
retry = 3
while retry > 0:
resp = queue.send_messages(Entries=batch)
if 'Failed' in resp and len(resp['Failed']) > 0:
time.sleep(SQS_RETRY_TIMEOUT)
ids = [f['Id'] for f in resp['Failed']]
batch = [b for b in batch if b['Id'] in ids]
retry -= 1
if retry == 0:
log.error('Could not send {}/{} messages to queue {}'.format(
len(resp['Failed']), len(batch), queue.url))
break
else:
enqueued_msgs = True
break
return enqueued_msgs
```
#### File: activities/tests/_test_case_with_patch_object.py
```python
import unittest
from unittest.mock import patch, MagicMock
class TestCaseWithPatchObject(unittest.TestCase):
"""
Provide alternative way to setup mocks w/o need for decorator and extra
arguments for each test method.
"""
def patch_object(self, obj, name, **kwargs):
"""
Setup mocks without need for decorator and extra arguments for each test.
Args:
obj (object): Object or module that will have one of its members mocked.
name (str): Name of member to mock.
kwargs (keyword args): Additional arguments passed to patch.object().
Returns:
(MagicMock)
"""
patch_wrapper = patch.object(obj, name, autospec=True, **kwargs)
magic_mock = patch_wrapper.start()
# This ensures the patch is removed when the test is torn down.
self.addCleanup(patch_wrapper.stop)
return magic_mock
```
#### File: boss-tools/lambda/batch_enqueue_cuboids_lambda.py
```python
import boto3
def handler(event, context):
queue = event['config']['object_store_config']['index_cuboids_keys_queue']
msgs = event['cuboid_msgs']
sqs = boto3.client('sqs')
resp = sqs.send_message_batch(
QueueUrl=queue,
Entries=msgs
)
if 'Failed' in resp and len(resp['Failed']) > 0:
print('send_message_batch() failed: {}'.format(resp))
failed_msg_ids = [f['Id'] for f in resp['Failed']]
msgs = [m for m in msgs if m['Id'] in failed_msg_ids]
# Return only the failed messages. The step function will have a
# choice state that will reinvoke this lambda if event['cuboids_msgs']
# is not empty..
event['cuboid_msgs'] = msgs
event['enqueue_done'] = False
else:
# Indicate that all messages were sent.
event['cuboid_msgs'] = []
event['enqueue_done'] = True
return event
```
#### File: boss-tools/lambda/downsample_volume.py
```python
import sys
import json
import logging
import blosc
import boto3
import hashlib
import numpy as np
from PIL import Image
from spdb.c_lib.ndtype import CUBOIDSIZE
from spdb.c_lib import ndlib
from spdb.spatialdb import AWSObjectStore
from spdb.spatialdb.object import ObjectIndices
from random import randrange
from botocore.exceptions import ClientError
from bossutils.multidimensional import XYZ, Buffer
from bossutils.multidimensional import range as xyz_range
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
LOOKUP_KEY_MAX_N = 100 # DP NOTE: Taken from spdb.spatialdb.object
EXCEPTIONS_NOT_RELATED_TO_KEY = ('ProvisionedThroughputExceededException', 'ThrottlingException')
np_types = {
'uint64': np.uint64,
'uint16': np.uint16,
'uint8': np.uint8,
}
#### Helper functions and classes ####
def HashedKey(*args, version = None):
""" BOSS Key creation function
Takes a list of different key string elements, joins them with the '&' char,
and prepends the MD5 hash of the key to the key.
Args (Common usage):
collection_id
experiment_id
channel_id
resolution
time_sample
morton (str): Morton ID of cube
Keyword Args:
version : Optional Object version, not part of the hashed value
"""
key = '&'.join([str(arg) for arg in args if arg is not None])
digest = hashlib.md5(key.encode()).hexdigest()
key = '{}&{}'.format(digest, key)
if version is not None:
key = '{}&{}'.format(key, version)
return key
class S3Bucket(object):
"""Wrapper for calls to S3
Wraps boto3 calls to upload and download data from S3
"""
def __init__(self, bucket):
self.bucket = bucket
self.s3 = boto3.client('s3')
def _check_error(self, resp, action):
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception("Error {} cuboid to/from S3".format(action))
def get(self, key):
try:
resp = self.s3.get_object(Key = key,
Bucket = self.bucket)
except ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 404:
# Cube doesn't exist
return None
if e.response['Error']['Code'] not in EXCEPTIONS_NOT_RELATED_TO_KEY:
print("S3 key in get_object error: {}".format(key))
raise
data = resp['Body'].read()
return data
def put(self, key, data):
resp = self.s3.put_object(Key = key,
Body = data,
Bucket = self.bucket)
self._check_error(resp, "writing")
class S3IndexKey(dict):
"""Key object for DynamoDB S3 Index table
May also contain optional attributes.
Attributes:
obj_key (str): HashedKey of the stored data
version (int): Reserved for future use.
job_hash: Identifies owning collection.
job_range: Identifies experiment, channel, resolution, and ingest job id.
lookup_key (str): Identifies owning channel (used by lookup-key-index).
"""
def __init__(self, obj_key, version=0, job_hash=None, job_range=None, lookup_key=None):
super().__init__()
self['object-key'] = {'S': obj_key}
self['version-node'] = {'N': str(version)}
if job_hash is not None:
self['ingest-job-hash'] = {'S': str(job_hash)}
if job_range is not None:
self['ingest-job-range'] = {'S': job_range}
if lookup_key is not None:
self['lookup-key'] = {'S': lookup_key}
class S3DynamoDBTable(object):
"""Wrapper for calls to DynamoDB
Wraps boto3 calls to create and update DynamoDB entries.
Supports updates for S3 Index tables
"""
def __init__(self, table, region):
self.table = table
self.ddb = boto3.client('dynamodb', region_name=region)
def put(self, item):
self.ddb.put_item(TableName = self.table,
Item = item,
ReturnConsumedCapacity = 'NONE',
ReturnItemCollectionMetrics = 'NONE')
def exists(self, key):
resp = self.ddb.get_item(TableName = self.table,
Key = key,
ConsistentRead=True,
ReturnConsumedCapacity='NONE')
return 'Item' in resp
#### Main lambda logic ####
def downsample_volume(args, target, step, dim, use_iso_key):
"""Downsample a volume into a single cube
Download `step` cubes from S3, downsample them into a single cube, upload
to S3 and update the S3 index for the new cube.
Args:
args {
collection_id (int)
experiment_id (int)
channel_id (int)
annotation_channel (bool)
data_type (str) 'uint8' | 'uint16' | 'uint64'
s3_bucket (URL)
s3_index (str)
resolution (int) The resolution to downsample. Creates resolution + 1
type (str) 'isotropic' | 'anisotropic'
iso_resolution (int) if resolution >= iso_resolution && type == 'anisotropic' downsample both
aws_region (str) AWS region to run in such as us-east-1
}
target (XYZ) : Corner of volume to downsample
step (XYZ) : Extent of the volume to downsample
dim (XYZ) : Dimensions of a single cube
use_iso_key (boolean) : If the BOSS keys should include an 'ISO=' flag
"""
log.debug("Downsampling {}".format(target))
# Hard coded values
version = 0
t = 0
dim_t = 1
iso = 'ISO' if use_iso_key else None
# If anisotropic and resolution is when neariso is reached, the first
# isotropic downsample needs to use the anisotropic data. Future isotropic
# downsamples will use the previous isotropic data.
parent_iso = None if args['resolution'] == args['iso_resolution'] else iso
col_id = args['collection_id']
exp_id = args['experiment_id']
chan_id = args['channel_id']
data_type = args['data_type']
annotation_chan = args['annotation_channel']
resolution = args['resolution']
s3 = S3Bucket(args['s3_bucket'])
s3_index = S3DynamoDBTable(args['s3_index'], args['aws_region'])
# Download all of the cubes that will be downsamples
volume = Buffer.zeros(dim * step, dtype=np_types[data_type], order='C')
volume.dim = dim
volume.cubes = step
volume_empty = True # abort if the volume doesn't exist in S3
for offset in xyz_range(step):
if args.get('test'):
# Enable Test Mode
# This is where the cubes downsamples are all taken from 0/0/0
# so that the entire frame doesn't have to be populated to test
# the code paths that downsample cubes
cube = offset # use target 0/0/0
else:
cube = target + offset
obj_key = HashedKey(parent_iso, col_id, exp_id, chan_id, resolution, t, cube.morton, version=version)
data = s3.get(obj_key)
if data:
data = blosc.decompress(data)
# DP ???: Check to see if the buffer is all zeros?
data = Buffer.frombuffer(data, dtype=np_types[data_type])
data.resize(dim)
#log.debug("Downloaded cube {}".format(cube))
volume[offset * dim: (offset + 1) * dim] = data
volume_empty = False
if volume_empty:
log.debug("Completely empty volume, not downsampling")
return
# Create downsampled cube
new_dim = XYZ(*CUBOIDSIZE[resolution + 1])
cube = Buffer.zeros(new_dim, dtype=np_types[data_type], order='C')
cube.dim = new_dim
cube.cubes = XYZ(1,1,1)
downsample_cube(volume, cube, annotation_chan)
target = target / step # scale down the output
# Save new cube in S3
obj_key = HashedKey(iso, col_id, exp_id, chan_id, resolution + 1, t, target.morton, version=version)
compressed = blosc.compress(cube, typesize=(np.dtype(cube.dtype).itemsize))
s3.put(obj_key, compressed)
# Update indicies
# Same key scheme, but without the version
obj_key = HashedKey(iso, col_id, exp_id, chan_id, resolution + 1, t, target.morton)
# Create S3 Index if it doesn't exist
idx_key = S3IndexKey(obj_key, version)
if not s3_index.exists(idx_key):
ingest_job = 0 # Valid to be 0, as posting a cutout uses 0
idx_key = S3IndexKey(obj_key,
version,
col_id,
'{}&{}&{}&{}'.format(exp_id, chan_id, resolution + 1, ingest_job),
# Replaced call to SPDB AWSObjectStore.generate_lookup_key, as SPDB master doesn't contain this call
# AWSObjectStore.generate_lookup_key(col_id, exp_id, chan_id, resolution + 1)
'{}&{}&{}&{}&{}'.format(col_id, exp_id, chan_id, resolution + 1, randrange(LOOKUP_KEY_MAX_N)))
s3_index.put(idx_key)
def downsample_cube(volume, cube, is_annotation):
"""Downsample the given Buffer into the target Buffer
Note: Both volume and cube both have the following attributes
dim (XYZ) : The dimensions of the cubes contained in the Buffer
cubes (XYZ) : The number of cubes of size dim contained in the Buffer
dim * cubes == Buffer.shape
Args:
volume (Buffer) : Raw numpy array of input cube data
cube (Buffer) : Raw numpy array for output data
is_annotation (boolean) : If the downsample should be an annotation downsample
"""
#log.debug("downsample_cube({}, {}, {})".format(volume.shape, cube.shape, is_annotation))
if is_annotation:
# Use a C implementation to downsample each value
ndlib.addAnnotationData_ctype(volume, cube, volume.cubes.zyx, volume.dim.zyx)
else:
if volume.dtype == np.uint8:
image_type = 'L'
elif volume.dtype == np.uint16:
image_type = 'I;16'
else:
raise Exception("Unsupported type for image downsampling '{}'".format(volume.dtype))
for z in range(cube.dim.z):
# DP NOTE: For isotropic downsample this skips Z slices, instead of trying to merge them
slice = volume[z * volume.cubes.z, :, :]
image = Image.frombuffer(image_type,
(volume.shape.x, volume.shape.y),
slice.flatten(),
'raw',
image_type,
0, 1)
cube[z, :, :] = Buffer.asarray(image.resize((cube.shape.x, cube.shape.y), Image.BILINEAR))
def handler(args, context):
def convert(args_, key):
args_[key] = XYZ(*args_[key])
convert(args, 'step')
convert(args, 'dim')
sqs = boto3.resource('sqs')
cubes = sqs.Queue(args['cubes_arn'])
msgs = cubes.receive_messages(MaxNumberOfMessages = args['bucket_size'],
WaitTimeSeconds = 5)
for msg in msgs:
downsample_volume(args['args'],
XYZ(*json.loads(msg.body)),
args['step'],
args['dim'],
args['use_iso_flag'])
msg.delete()
```
#### File: boss-tools/lambda/invoke_index_supervisor_lambda.py
```python
import start_sfn_lambda
def handler(event, context):
# Expected by initial choice state of Index.Supervisor.
event['queue_empty'] = False
# Tell start_sfn_lambda which step function to start.
event['sfn_arn'] = event['id_supervisor_step_fcn']
return start_sfn_lambda.handler(event, context)
```
#### File: boss-tools/lmbdtest/test_delete_tile_index_entry_lambda.py
```python
from lambdafcns.delete_tile_index_entry_lambda import handler
import botocore
import boto3
import moto
import unittest
from unittest.mock import patch
class TestDeleteTileIndexEntryLambda(unittest.TestCase):
def setUp(self):
self.table_name = 'test.index.boss'
self.region_name = 'us-east-1'
table_params = self.get_tile_schema()
self.mock_dynamo = moto.mock_dynamodb2()
self.mock_dynamo.start()
self.dynamo = boto3.client('dynamodb', region_name=self.region_name)
self.dynamo.create_table(TableName=self.table_name, **table_params)
def tearDown(self):
self.mock_dynamo.stop()
def test_delete(self):
fake_key = 'fakekey'
task_id = 47
event = {
'region': self.region_name,
'tile_index': self.table_name,
'chunk_key': fake_key,
'task_id': task_id
}
context = None
self.dynamo.put_item(
TableName=self.table_name,
Item={
'chunk_key': {'S': fake_key},
'task_id': {'N': str(task_id)}
}
)
handler(event, context)
resp = self.dynamo.get_item(
TableName=self.table_name,
Key={
'chunk_key': {'S': fake_key},
'task_id': {'N': str(task_id)}
}
)
self.assertNotIn('Item', resp)
def test_delete_key_doesnt_exist(self):
"""
For this test, execution w/o error is passing.
"""
fake_key = 'nonexistantkey'
task_id = 97
event = {
'region': self.region_name,
'tile_index': self.table_name,
'chunk_key': fake_key,
'task_id': task_id
}
context = None
handler(event, context)
def get_tile_schema(self):
"""
Tile index schema from ndingest/nddynamo/schemas/boss_tile_index.json.
Not loading directly because we don't know where this repo will live
on each dev's machine.
Returns:
(dict)
"""
return {
"KeySchema": [
{
"AttributeName": "chunk_key",
"KeyType": "HASH"
},
{
"AttributeName": "task_id",
"KeyType": "RANGE"
}
],
"AttributeDefinitions": [
{
"AttributeName": "chunk_key",
"AttributeType": "S"
},
{
"AttributeName": "task_id",
"AttributeType": "N"
}
],
"GlobalSecondaryIndexes": [
{
"IndexName": "task_index",
"KeySchema": [
{
"AttributeName": "task_id",
"KeyType": "HASH"
}
],
"Projection": {
"ProjectionType": "ALL"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
}
],
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
}
```
#### File: boss-tools/lmbdtest/test_ingest_queue_upload_volumetric_lambda.py
```python
import boto3
from lambdafcns import ingest_queue_upload_volumetric_lambda as iqu
import json
import math
import unittest
from unittest.mock import patch, MagicMock
@patch('boto3.resource')
class TestIngestQueueUploadLambda(unittest.TestCase):
def tile_count(self, kwargs):
x = math.ceil((kwargs["x_stop"] - kwargs["x_start"]) / kwargs["x_tile_size"])
y = math.ceil((kwargs["y_stop"] - kwargs["y_start"]) / kwargs["y_tile_size"])
z = math.ceil((kwargs["z_stop"] - kwargs["z_start"]) / kwargs["z_chunk_size"])
t = math.ceil((kwargs["t_stop"] - kwargs["t_start"]) / kwargs["t_tile_size"])
return x * y * z * t
def test_all_messages_are_there(self, fake_resource):
"""
This test will show that when items are being genereated by multiple lambdas, the sum of those items
will be the exact same set that would be generated by a single lambda creating them all.
Test_all_messages_are_there tests() first it creates
set of messages that all fit into a single lambda populating a dictionary with all the values returned.
Then it runs the create_messages 4 more times each time with the appropriate items_to_skip and
MAX_NUM_ITEMS_PER_LAMBDA set. It pulls the tile key out of the dictionary to verify that all the items were
accounted for. In the end there should be no items left in the dictionary.
This test can with many different values for tile sizes and starts and stop vaules and num_lambdas can be
changed.
Args:
fake_resource:
Returns:
"""
args = {
"upload_sfn": "IngestUpload",
"x_start": 0,
"x_stop": 2048,
"y_start": 0,
"y_stop": 2048,
"z_start": 0,
"z_stop": 128,
"t_start": 0,
"t_stop": 1,
"project_info": [
"1",
"2",
"3"
],
"ingest_queue": "https://queue.amazonaws.com/...",
"job_id": 11,
"upload_queue": "https://queue.amazonaws.com/...",
"x_tile_size": 1024,
"y_tile_size": 1024,
"t_tile_size": 1,
"z_tile_size": 1,
"resolution": 0,
"items_to_skip": 0,
'MAX_NUM_ITEMS_PER_LAMBDA': 500000,
'z_chunk_size': 64
}
# Walk create_messages generator as a single lambda would and populate the dictionary with all Keys like this
# "Chunk --- items".
dict = {}
msgs = iqu.create_messages(args)
for msg_json in msgs:
ct_key = self.generate_chunk_tile_key(msg_json)
print(ct_key)
if ct_key not in dict:
dict[ct_key] = 1
else:
self.fail("Dictionary already contains key: ".format(ct_key))
# Verify correct count of items in dictionary
dict_length = len(dict.keys())
item_count = self.tile_count(args)
print("Item Count: {}".format(item_count))
self.assertEqual(dict_length, item_count)
# loop through create_messages() num_lambda times pulling out each tile from the dictionary.
num_lambdas = 2
args["MAX_NUM_ITEMS_PER_LAMBDA"] = math.ceil(dict_length / num_lambdas)
for skip in range(0, dict_length, args["MAX_NUM_ITEMS_PER_LAMBDA"]):
args["items_to_skip"] = skip
#print("Skip: " + str(skip))
msgs = iqu.create_messages(args)
for msg_json in msgs:
ct_key = self.generate_chunk_tile_key(msg_json)
if ct_key in dict:
del dict[ct_key]
else:
self.fail("Dictionary does not contains key: ".format(ct_key))
# Verify Dictionary has no left over items.
self.assertEqual(len(dict), 0)
def generate_chunk_tile_key(self, msg_json):
"""
Generate a key to track messages for testing.
Args:
msg_json (str): JSON message encoded as string intended for the upload queue.
Returns:
(str): Unique key identifying message.
"""
msg = json.loads(msg_json)
return msg["chunk_key"]
``` |
{
"source": "jhuapl-boss/cvdb",
"score": 2
} |
#### File: cvdb/cvdb/cloudvolumedb.py
```python
import numpy as np
from cloudvolume import CloudVolume
from .cube import Cube
from .error import CVDBError
class CloudVolumeDB:
"""
Wrapper interface for cloudvolume read access to bossDB.
"""
def __init__(self, cv_config=None):
self.cv_config = cv_config
# Main READ interface method
def cutout(
self,
resource,
corner,
extent,
resolution,
time_sample_range=None,
filter_ids=None,
iso=False,
access_mode="cache",
):
"""Extract a cube of arbitrary size. Need not be aligned to cuboid boundaries.
corner represents the location of the cutout and extent the size. As an example in 1D, if asking for
a corner of 3 and extent of 2, this would be the values at 3 and 4.
Args:
resource (spdb.project.BossResource): Data model info based on the request or target resource
corner ((int, int, int)): the xyz location of the corner of the cutout
extent ((int, int, int)): the xyz extents
resolution (int): the resolution level
time_sample_range : ignored
filter_ids (optional[list]): ignored
iso (bool): ignored
access_mode (str): ignored
Returns:
cube.Cube: The cutout data stored in a Cube instance
Raises:
(CVDBError)
"""
channel = resource.get_channel()
out_cube = Cube.create_cube(resource, extent)
# NOTE: Refer to Tim's changes for channel method to check storage type.
if channel.storage_type != "cloudvol":
raise CVDBError(
f"Storage type {channel.storage_type} not configured for cloudvolume.",
701,
)
# NOTE: Refer to Tim's changes for S3 bucket and path.
try:
# Accessing HTTPS version of dataset. This is READ-ONLY and PUBLIC-ONLY, but much faster to download.
vol = CloudVolume(
f"s3://{channel.bucket}/{channel.cv_path}",
mip=resolution,
use_https=True,
fill_missing=True,
)
# Data is downloaded by providing XYZ indicies.
data = vol[
corner[0] : corner[0] + extent[0],
corner[1] : corner[1] + extent[1],
corner[2] : corner[2] + extent[2],
]
# Data returned as cloudvolume VolumeCutout object in XYZT order.
# Here we recast it to numpy array and transpose it to TZYX order.
data = np.array(data.T)
except Exception as e:
raise CVDBError(f"Error downloading cloudvolume data: {e}")
out_cube.set_data(data)
return out_cube
# Main WRITE interface method
def write_cuboid(
self,
resource,
corner,
resolution,
cuboid_data,
time_sample_start=0,
iso=False,
to_black=False,
):
""" Write a 3D/4D volume to the key-value store. Used by API/cache in consistent mode as it reconciles writes
If cuboid_data.ndim == 4, data in time-series format - assume t,z,y,x
If cuboid_data.ndim == 3, data not in time-series format - assume z,y,x
Args:
resource (project.BossResource): Data model info based on the request or target resource
corner ((int, int, int)): the xyz locatiotn of the corner of the cuout
resolution (int): the resolution level
cuboid_data (numpy.ndarray): Matrix of data to write as cuboids
time_sample_start (int): if cuboid_data.ndim == 3, the time sample for the data
if cuboid_data.ndim == 4, the time sample for cuboid_data[0, :, :, :]
iso (bool): Flag indicating if you want to write to the "isotropic" version of a channel, if available
to_black (bool): Flag indicating is this cuboid is a cutout_to_black cuboid.
Returns:
None
"""
raise NotImplementedError
``` |
{
"source": "jhuapl-boss/heaviside",
"score": 2
} |
#### File: heaviside/tests/test_compile.py
```python
import os
import sys
import json
import unittest
from io import StringIO
try:
from unittest import mock
except ImportError:
import mock
import heaviside
Path = heaviside.utils.Path
cur_dir = Path(os.path.dirname(os.path.realpath(__file__)))
class TestCompile(unittest.TestCase):
def execute(self, filename, error_msg):
filepath = cur_dir / 'sfn' / filename
try:
out = heaviside.compile(filepath)
self.assertFalse(True, "compile() should result in an exception")
except heaviside.exceptions.CompileError as ex:
actual = str(ex).split('\n')[-1]
expected = "Syntax Error: {}".format(error_msg)
self.assertEqual(actual, expected)
def test_unterminated_quote(self):
self.execute('error_unterminated_quote.sfn', 'Unterminated quote')
def test_unterminated_multiquote(self):
self.execute('error_unterminated_multiquote.sfn', 'EOF in multi-line string')
def test_invalid_heartbeat(self):
self.execute('error_invalid_heartbeat.sfn', "Heartbeat must be less than timeout (defaults to 60)")
def test_invalid_heartbeat2(self):
self.execute('error_invalid_heartbeat2.sfn', "'0' is not a positive integer")
def test_invalid_timeout(self):
self.execute('error_invalid_timeout.sfn', "'0' is not a positive integer")
def test_unexpected_catch(self):
self.execute('error_unexpected_catch.sfn', "Pass state cannot contain a Catch modifier")
def test_unexpected_data(self):
self.execute('error_unexpected_data.sfn', "Succeed state cannot contain a Data modifier")
def test_unexpected_heartbeat(self):
self.execute('error_unexpected_heartbeat.sfn', "Pass state cannot contain a Heartbeat modifier")
def test_unexpected_input(self):
self.execute('error_unexpected_input.sfn', "Fail state cannot contain a Input modifier")
def test_unexpected_output(self):
self.execute('error_unexpected_output.sfn', "Fail state cannot contain a Output modifier")
def test_unexpected_result(self):
self.execute('error_unexpected_result.sfn', "Fail state cannot contain a Result modifier")
def test_unexpected_retry(self):
self.execute('error_unexpected_retry.sfn', "Pass state cannot contain a Retry modifier")
def test_unexpected_timeout(self):
self.execute('error_unexpected_timeout.sfn', "Pass state cannot contain a Timeout modifier")
def test_unexpected_token(self):
self.execute('error_unexpected_token.sfn', 'Invalid syntax')
def test_invalid_retry_delay(self):
self.execute('error_invalid_retry_delay.sfn', "'0' is not a positive integer")
def test_invalid_retry_backoff(self):
self.execute('error_invalid_retry_backoff.sfn', "Backoff rate should be >= 1.0")
def test_invalid_wait_seconds(self):
self.execute('error_invalid_wait_seconds.sfn', "'0' is not a positive integer")
def test_invalid_multiple_input(self):
self.execute('error_invalid_multiple_input.sfn', "Pass state can only contain one Input modifier")
def test_invalid_state_name(self):
self.execute('error_invalid_state_name.sfn', "Name exceedes 128 characters")
def test_duplicate_state_name(self):
self.execute('error_duplicate_state_name.sfn', "Duplicate state name 'Test'")
def test_invalid_goto_target(self):
self.execute('error_invalid_goto_target.sfn', "Goto target 'Target' doesn't exist")
def test_invalid_task_service(self):
self.execute('error_invalid_task_service.sfn', "Invalid Task service")
def test_missing_task_function_argument(self):
self.execute('error_missing_task_function_argument.sfn', "Lambda task requires a function name argument")
def test_missing_task_function(self):
self.execute('error_missing_task_function.sfn', "DynamoDB task requires a function to call")
def test_unexpected_task_function(self):
self.execute('error_unexpected_task_function.sfn', "Unexpected function name")
def test_invalid_task_function(self):
self.execute('error_invalid_task_function.sfn', "Invalid Task function")
def test_invalid_task_arn(self):
self.execute('error_invalid_task_arn.sfn', "ARN must start with 'arn:aws:'")
def test_unexpected_task_argument(self):
self.execute('error_unexpected_task_argument.sfn', "Unexpected argument")
def test_unexpected_task_keyword_argument(self):
self.execute('error_unexpected_task_keyword_argument.sfn', "Unexpected keyword argument")
def test_invalid_task_sync_value(self):
self.execute('error_invalid_task_sync_value.sfn', "Synchronous value must be a boolean")
def test_invalid_task_keyword_argument(self):
self.execute('error_invalid_task_keyword_argument.sfn', "Invalid keyword argument")
def test_missing_task_keyword_argument(self):
self.execute('error_missing_task_keyword_argument.sfn', "Missing required keyword arguments: JobDefinition, JobQueue")
def test_visitor(self):
class TestVisitor(heaviside.ast.StateVisitor):
def handle_task(self, task):
task.arn = 'modified'
hsd = u"""Lambda('function')"""
out = heaviside.compile(hsd, visitors=[TestVisitor()])
out = json.loads(out)
self.assertEqual(out['States']['Line1']['Resource'], 'modified')
def test_invalid_iterator(self):
self.execute('error_iterator_used_by_non_map_state.sfn', "Pass state cannot contain a Iterator modifier")
def test_map_without_iterator(self):
self.execute('error_map_has_no_iterator.sfn', 'Map state must have an iterator')
def test_map_iterator_has_duplicate_state_name(self):
self.execute('error_map_iterator_duplicate_state_name.sfn', "Duplicate state name 'DuplicateName'")
```
#### File: heaviside/tests/test_statemachine.py
```python
import unittest
try:
from unittest import mock
except ImportError:
import mock
from .utils import MockSession
from heaviside import StateMachine
class TestStateMachine(unittest.TestCase):
@mock.patch('heaviside.create_session', autospec=True)
def test_constructor(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
machine = StateMachine('name')
self.assertEqual(machine.arn, 'XXX')
calls = [
mock.call.list_state_machines()
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.compile', autospec=True)
@mock.patch('heaviside.create_session', autospec=True)
def test_build(self, mCreateSession, mCompile):
iSession = MockSession()
iSession.region_name = 'region'
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
mCompile.return_value = {}
machine = StateMachine('name')
sfn = "Pass()"
actual = machine.build(sfn)
expected = {}
self.assertEqual(actual, expected)
calls = [
mock.call(sfn, region=machine.region, account_id=machine.account_id, visitors=[])
]
self.assertEqual(mCompile.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_resolve_role(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
iam = iSession.client('iam')
iam.get_role.return_value = {
'Role': {
'Arn': 'YYY'
}
}
machine = StateMachine('name')
arn = machine._resolve_role('role')
self.assertEqual(arn, 'YYY')
calls = [
mock.call.get_role(RoleName = 'role')
]
self.assertEqual(iam.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_create(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[]
}
client.create_state_machine.return_value = {
'stateMachineArn': 'XXX'
}
_resolve_role = mock.MagicMock()
_resolve_role.return_value = 'arn'
build = mock.MagicMock()
build.return_value = {}
machine = StateMachine('name')
machine._resolve_role = _resolve_role
machine.build = build
machine.create('source', 'role')
self.assertEqual(machine.arn, 'XXX')
calls = [mock.call('role')]
self.assertEqual(_resolve_role.mock_calls, calls)
calls = [mock.call('source')]
self.assertEqual(build.mock_calls, calls)
calls = [
mock.call.list_state_machines(),
mock.call.create_state_machine(name = 'name',
definition = {},
roleArn = 'arn')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_create_exists(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
machine = StateMachine('name')
with self.assertRaises(Exception):
machine.create('source', 'role')
self.assertEqual(machine.arn, 'XXX')
calls = [
mock.call.list_state_machines(),
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_delete(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
machine = StateMachine('name')
machine.delete()
self.assertEqual(machine.arn, None)
calls = [
mock.call.list_state_machines(),
mock.call.delete_state_machine(stateMachineArn = 'XXX')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_delete_exception(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[]
}
machine = StateMachine('name')
with self.assertRaises(Exception):
machine.delete(True)
self.assertEqual(machine.arn, None)
calls = [
mock.call.list_state_machines(),
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_start(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
client.start_execution.return_value = {
'executionArn': 'YYY'
}
machine = StateMachine('name')
arn = machine.start({}, 'run')
self.assertEqual(arn, 'YYY')
calls = [
mock.call.list_state_machines(),
mock.call.start_execution(stateMachineArn = 'XXX',
name = 'run',
input = '{}')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_start_doesnt_exist(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[]
}
machine = StateMachine('name')
with self.assertRaises(Exception):
machine.start({}, 'run')
calls = [
mock.call.list_state_machines(),
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_stop(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
machine = StateMachine('name')
machine.stop('arn', 'error', 'cause')
calls = [
mock.call.list_state_machines(),
mock.call.stop_execution(executionArn = 'arn',
error = 'error',
cause = 'cause')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.create_session', autospec=True)
def test_status(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
client.describe_execution.return_value = {
'status': 'status'
}
machine = StateMachine('name')
status = machine.status('arn')
self.assertEqual(status, 'status')
calls = [
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'arn')
]
self.assertEqual(client.mock_calls, calls)
@mock.patch('heaviside.time.sleep', autospec=True)
@mock.patch('heaviside.create_session', autospec=True)
def test_wait_success(self, mCreateSession, mSleep):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
client.describe_execution.side_effect = [
{'status': 'RUNNING'},
{'status': 'SUCCESS', 'output': '{}'}
]
machine = StateMachine('name')
output = machine.wait('arn')
self.assertEqual(output, {})
calls = [
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'arn'),
mock.call.describe_execution(executionArn = 'arn')
]
self.assertEqual(client.mock_calls, calls)
calls = [
mock.call(10)
]
self.assertEqual(mSleep.mock_calls, calls)
@mock.patch('heaviside.time.sleep', autospec=True)
@mock.patch('heaviside.create_session', autospec=True)
def _test_wait_xxx(self, error, mCreateSession, mSleep):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
client.describe_execution.side_effect = [
{'status': 'RUNNING'},
{'status': error}
]
client.get_execution_history.return_value = {
'events':[{
'execution{}EventDetails'.format(error): {}
}]
}
machine = StateMachine('name')
if error is None:
with self.assertRaises(Exception):
machine.wait('arn')
else:
output = machine.wait('arn')
self.assertEqual(output, {})
calls = [
mock.call.list_state_machines(),
mock.call.describe_execution(executionArn = 'arn'),
mock.call.describe_execution(executionArn = 'arn'),
mock.call.get_execution_history(executionArn = 'arn',
reverseOrder = True)
]
self.assertEqual(client.mock_calls, calls)
calls = [
mock.call(10)
]
self.assertEqual(mSleep.mock_calls, calls)
def test_wait_failed(self):
self._test_wait_xxx('Failed')
def test_wait_aborted(self):
self._test_wait_xxx('Aborted')
def test_wait_timedout(self):
self._test_wait_xxx('TimedOut')
def test_wait_exception(self):
self._test_wait_xxx(None)
@mock.patch('heaviside.create_session', autospec=True)
def test_running_arns(self, mCreateSession):
iSession = MockSession()
mCreateSession.return_value = (iSession, '123456')
client = iSession.client('stepfunctions')
client.list_state_machines.return_value = {
'stateMachines':[{
'name': 'name',
'stateMachineArn': 'XXX'
}]
}
client.list_executions.return_value = {
'executions': [
{'executionArn': 'arn1'},
{'executionArn': 'arn2'}
]
}
machine = StateMachine('name')
arns = machine.running_arns()
self.assertEqual(arns, ['arn1', 'arn2'])
calls = [
mock.call.list_state_machines(),
mock.call.list_executions(stateMachineArn = 'XXX',
statusFilter = 'RUNNING')
]
self.assertEqual(client.mock_calls, calls)
``` |
{
"source": "jhuapl-boss/ingest-client",
"score": 2
} |
#### File: jhuapl-boss/ingest-client/debug.py
```python
from ingestclient.utils.queue import QueueRecovery
from six.moves import input
import argparse
import sys
def get_confirmation(prompt):
"""Method to confirm decisions
Args:
prompt(str): Question to ask the user
Returns:
(bool): True indicating yes, False indicating no
"""
decision = False
while True:
confirm = input("{} (y/n): ".format(prompt))
if confirm.lower() == "y":
decision = True
break
elif confirm.lower() == "n":
decision = False
break
else:
print("Enter 'y' or 'n' for 'yes' or 'no'")
return decision
def main():
parser = argparse.ArgumentParser(description="Client for debugging the ingest process",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="Visit https://docs.theBoss.io for more details")
parser.add_argument("queue_name",
help="URL of the ingest SQS queue")
parser.add_argument("data_dir",
default=None,
help="Data Directory")
parser.add_argument("--download", "-d",
default=None,
action="store_true",
help="Download all messages in a queue")
parser.add_argument("--upload", "-u",
default=None,
action="store_true",
help="Upload all messages in a queue")
parser.add_argument("--invoke", "-i",
default=None,
action="store_true",
help="Trigger lambda invocations to cleanup ingest")
parser.add_argument("--x_tile_size", "-x",
default=None,
help="X tile size, needed for re-invoking ingest lambdas")
parser.add_argument("--y_tile_size", "-y",
default=None,
help="y tile size, needed for re-invoking ingest lambdas")
args = parser.parse_args()
qr = QueueRecovery(args.queue_name)
if args.download:
# Trying to download
print("Downloading messages from {}".format(args.queue_name))
qr.simple_store_messages(args.data_dir)
if args.upload:
# Trying to upload
print("Uploading messages to {}".format(args.queue_name))
qr.restore_messages(args.data_dir)
if args.invoke:
print("Triggering messages in {}".format(args.queue_name))
qr.invoke_ingest(args.data_dir, args.x_tile_size, args.y_tile_size)
if __name__ == '__main__':
main()
```
#### File: ingestclient/plugins/hdf5.py
```python
from __future__ import absolute_import
import six
from PIL import Image
import re
import os
import h5py
import numpy as np
from math import floor
import botocore
import logging
from ..utils.filesystem import DynamicFilesystemAbsPath
from .path import PathProcessor
from .tile import TileProcessor
class Hdf5TimeSeriesPathProcessor(PathProcessor):
"""A Path processor for time-series, multi-channel data (e.g. calcium imaging)
Assumes the data is stored (t,y,z, channel) in individual hdf5 files, with 1 hdf5 file per z-slice
"""
def __init__(self):
"""Constructor to add custom class var"""
PathProcessor.__init__(self)
self.regex = None
def setup(self, parameters):
"""Set the params
MUST HAVE THE CUSTOM PARAMETERS: "root_dir": "<path_to_stack_root>",
"extension": "hdf5|h5",
"base_filename": the base filename, see below for how this is parsed,
base_filename string identifies how to insert the z-index value into the filename. Identify a place to insert
the z_index with "<>". If you want to offset add o:number. If you want to zero pad add z:number"
my_base_<> -> my_base_0, my_base_1, my_base_2
<o:200>_my_base_<p:4> -> 200_my_base_0000, 201_my_base_0001, 202_my_base_0002
Includes the "ingest_job" section of the config file automatically
Args:
parameters (dict): Parameters for the dataset to be processed
Returns:
None
"""
self.parameters = parameters
self.regex = re.compile('<(o:\d+)?(p:\d+)?>')
def process(self, x_index, y_index, z_index, t_index=None):
"""
Method to compute the file path for the indicated Z-slice
Args:
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(str): An absolute file path that contains the specified data
"""
if z_index >= self.parameters["ingest_job"]["extent"]["z"][1]:
raise IndexError("Z-index out of range")
# Create base filename
matches = self.regex.findall(self.parameters['base_filename'])
base_str = self.parameters['base_filename']
for m in matches:
if m[0]:
# there is an offset
z_val = int(m[0].split(':')[1]) + z_index
else:
z_val = z_index
if m[1]:
# There is zero padding
z_str = str(z_val).zfill(int(m[1].split(':')[1]))
else:
z_str = str(z_val)
base_str = base_str.replace("<{}{}>".format(m[0], m[1]), z_str)
# prepend root, append extension
return os.path.join(self.parameters['root_dir'], "{}.{}".format(base_str, self.parameters['extension']))
class Hdf5TimeSeriesTileProcessor(TileProcessor):
"""A Tile processor for time-series, multi-channel data (e.g. calcium imaging)
Assumes the data is stored (t, x, y, channel) in individual hdf5 files, with 1 hdf5 file per z-slice
where x is the column dim and y is the row dim
"""
def __init__(self):
"""Constructor to add custom class var"""
TileProcessor.__init__(self)
self.fs = None
def setup(self, parameters):
""" Method to load the file for uploading
Args:
parameters (dict): Parameters for the dataset to be processed
MUST HAVE THE CUSTOM PARAMETERS: "upload_format": "<png|tif>",
"channel_index": integer,
"scale_factor": float,
"dataset": str,
"filesystem": "<s3|local>",
"bucket": (if s3 filesystem)
Returns:
None
"""
self.parameters = parameters
self.fs = DynamicFilesystemAbsPath(parameters['filesystem'], parameters)
def process(self, file_path, x_index, y_index, z_index, t_index=0):
"""
Method to load the image file. Can break the image into smaller tiles to help make ingest go smoother, but
currently must be perfectly divisible
Args:
file_path(str): An absolute file path for the specified tile
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(io.BufferedReader): A file handle for the specified tile
"""
file_path = self.fs.get_file(file_path)
x_range = [self.parameters["ingest_job"]["tile_size"]["x"] * x_index,
self.parameters["ingest_job"]["tile_size"]["x"] * (x_index + 1)]
y_range = [self.parameters["ingest_job"]["tile_size"]["y"] * y_index,
self.parameters["ingest_job"]["tile_size"]["y"] * (y_index + 1)]
# Open hdf5
h5_file = h5py.File(file_path, 'r')
# Save sub-img to png and return handle
tile_data = np.array(h5_file[self.parameters['dataset']][t_index,
x_range[0]:x_range[1],
y_range[0]:y_range[1],
int(self.parameters['channel_index'])])
tile_data = np.swapaxes(tile_data, 0, 1)
tile_data = np.multiply(tile_data, self.parameters['scale_factor'])
tile_data = tile_data.astype(np.uint16)
upload_img = Image.fromarray(tile_data, 'I;16')
output = six.BytesIO()
upload_img.save(output, format=self.parameters["upload_format"].upper())
# Send handle back
return output
class Hdf5TimeSeriesLabelTileProcessor(TileProcessor):
"""A Tile processor for label data packed in a time-series, multi-channel HDF5 (e.g. ROIs for calcium imaging)
Assumes the data is stored (x, y) in individual hdf5 files, with 1 hdf5 file per z-slice
where x is the column dim and y is the row dim
"""
def __init__(self):
"""Constructor to add custom class var"""
TileProcessor.__init__(self)
self.fs = None
def setup(self, parameters):
""" Method to load the file for uploading
Args:
parameters (dict): Parameters for the dataset to be processed
MUST HAVE THE CUSTOM PARAMETERS: "upload_format": "<png|tif>",
"dataset": str,
"filesystem": "<s3|local>",
"bucket": (if s3 filesystem)
Returns:
None
"""
self.parameters = parameters
self.fs = DynamicFilesystemAbsPath(parameters['filesystem'], parameters)
def process(self, file_path, x_index, y_index, z_index, t_index=0):
"""
Method to load the image file. Can break the image into smaller tiles to help make ingest go smoother, but
currently must be perfectly divisible
Args:
file_path(str): An absolute file path for the specified tile
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(io.BufferedReader): A file handle for the specified tile
"""
file_path = self.fs.get_file(file_path)
x_range = [self.parameters["ingest_job"]["tile_size"]["x"] * x_index,
self.parameters["ingest_job"]["tile_size"]["x"] * (x_index + 1)]
y_range = [self.parameters["ingest_job"]["tile_size"]["y"] * y_index,
self.parameters["ingest_job"]["tile_size"]["y"] * (y_index + 1)]
# Open hdf5
h5_file = h5py.File(file_path, 'r')
# Save sub-img to png and return handle
tile_data = np.array(h5_file[self.parameters['dataset']][x_range[0]:x_range[1], y_range[0]:y_range[1]])
tile_data = np.swapaxes(tile_data, 0, 1)
tile_data = tile_data.astype(np.uint32)
upload_img = Image.fromarray(tile_data, 'I')
output = six.BytesIO()
upload_img.save(output, format=self.parameters["upload_format"].upper())
# Send handle back
return output
class Hdf5SlicePathProcessor(PathProcessor):
"""A Path processor for large slices stored in hdf5 files.
Assumes the data is stored in a dataset and an optional offset is stored in a dataset
"""
def __init__(self):
"""Constructor to add custom class var"""
PathProcessor.__init__(self)
self.regex = None
def setup(self, parameters):
"""Set the params
MUST HAVE THE CUSTOM PARAMETERS: "root_dir": "<path_to_stack_root>",
"extension": "hdf5|h5",
"base_filename": the base filename, see below for how this is parsed,
base_filename string identifies how to insert the z-index value into the filename. Identify a place to insert
the z_index with "<>". If you want to offset add o:number. If you want to zero pad add z:number"
my_base_<> -> my_base_0, my_base_1, my_base_2
<o:200>_my_base_<p:4> -> 200_my_base_0000, 201_my_base_0001, 202_my_base_0002
Includes the "ingest_job" section of the config file automatically
Args:
parameters (dict): Parameters for the dataset to be processed
Returns:
None
"""
self.parameters = parameters
self.regex = re.compile('<(o:\d+)?(p:\d+)?>')
def process(self, x_index, y_index, z_index, t_index=None):
"""
Method to compute the file path for the indicated Z-slice
Args:
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(str): An absolute file path that contains the specified data
"""
if z_index >= self.parameters["ingest_job"]["extent"]["z"][1]:
raise IndexError("Z-index out of range")
# Create base filename
matches = self.regex.findall(self.parameters['base_filename'])
base_str = self.parameters['base_filename']
for m in matches:
if m[0]:
# there is an offset
z_val = int(m[0].split(':')[1]) + z_index
else:
z_val = z_index
if m[1]:
# There is zero padding
z_str = str(z_val).zfill(int(m[1].split(':')[1]))
else:
z_str = str(z_val)
base_str = base_str.replace("<{}{}>".format(m[0], m[1]), z_str)
# prepend root, append extension
return os.path.join(self.parameters['root_dir'], "{}.{}".format(base_str, self.parameters['extension']))
class Hdf5SliceTileProcessor(TileProcessor):
"""A Tile processor for large slices stored in hdf5 files.
Assumes the data is stored in a dataset and an optional offset is stored in a dataset
"""
def __init__(self):
"""Constructor to add custom class var"""
TileProcessor.__init__(self)
self.fs = None
def setup(self, parameters):
""" Method to load the file for uploading
Args:
parameters (dict): Parameters for the dataset to be processed
MUST HAVE THE CUSTOM PARAMETERS: "upload_format": "<png|tif>",
"data_name": str,
"offset_name": str,
"extent_name": str,
"offset_origin_x": int,
"offset_origin_y": int,
"filesystem": "<s3|local>",
"bucket": (if s3 filesystem)
Returns:
None
"""
self.parameters = parameters
self.fs = DynamicFilesystemAbsPath(parameters['filesystem'], parameters)
def process(self, file_path, x_index, y_index, z_index, t_index=0):
"""
Method to load the image file.
Args:
file_path(str): An absolute file path for the specified tile
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(io.BufferedReader): A file handle for the specified tile
"""
file_path = self.fs.get_file(file_path)
# Compute global range
tile_x_range = [self.parameters["ingest_job"]["tile_size"]["x"] * x_index,
self.parameters["ingest_job"]["tile_size"]["x"] * (x_index + 1)]
tile_y_range = [self.parameters["ingest_job"]["tile_size"]["y"] * y_index,
self.parameters["ingest_job"]["tile_size"]["y"] * (y_index + 1)]
# Open hdf5
h5_file = h5py.File(file_path, 'r')
# Compute range in actual data, taking offsets into account
x_offset = h5_file[self.parameters['offset_name']][1]
y_offset = h5_file[self.parameters['offset_name']][0]
x_img_extent = h5_file[self.parameters['extent_name']][1]
y_img_extent = h5_file[self.parameters['extent_name']][0]
x_frame_offset = x_offset + self.parameters['offset_origin_x']
y_frame_offset = y_offset + self.parameters['offset_origin_x']
x1 = max(tile_x_range[0], x_frame_offset)
y1 = max(tile_y_range[0], y_frame_offset)
x2 = min(tile_x_range[1], x_frame_offset + x_img_extent)
y2 = min(tile_y_range[1], y_frame_offset + y_img_extent)
if self.parameters['datatype'] == "uint8":
datatype = np.uint8
elif self.parameters['datatype']== "uint16":
datatype = np.uint16
else:
raise Exception("Unsupported datatype: {}".format(self.parameters['datatype']))
# Allocate Tile
tile_data = np.zeros((self.parameters["ingest_job"]["tile_size"]["y"],
self.parameters["ingest_job"]["tile_size"]["x"]),
dtype=datatype, order='C')
# Copy sub-img to tile, save, return
img_y_index_start = max(0, y1 - y_frame_offset)
img_y_index_stop = max(0, y2 - y_frame_offset)
img_x_index_start = max(0, x1 - x_frame_offset)
img_x_index_stop = max(0, x2 - x_frame_offset)
tile_data[y1-tile_y_range[0]:y2-tile_y_range[0],
x1 - tile_x_range[0]:x2 - tile_x_range[0]] = np.array(h5_file[self.parameters['data_name']][
img_y_index_start:img_y_index_stop,
img_x_index_start:img_x_index_stop])
tile_data = tile_data.astype(datatype)
upload_img = Image.fromarray(tile_data)
output = six.BytesIO()
upload_img.save(output, format=self.parameters["upload_format"].upper())
# Send handle back
return output
class Hdf5ChunkPathProcessor(PathProcessor):
"""A Path processor for chunks stored in hdf5 files.
Assumes the data is stored in a dataset and the filename contains the loction. supports an xyz offset
"""
def __init__(self):
"""Constructor to add custom class var"""
PathProcessor.__init__(self)
self.regex = None
def setup(self, parameters):
"""Set the params
MUST HAVE THE CUSTOM PARAMETERS: "root_dir": "<path_to_stack_root>",
"extension": "hdf5|h5",
"prefix": Prefix for the filename
"x_offset": the offset from 0 in the x dim
"y_offset": the offset from 0 in the y dim
"z_offset": the offset from 0 in the z dim
"x_chunk_size": the chunk extent in the x dimension
"y_chunk_size": the chunk extent in the y dimension
"z_chunk_size": the chunk extent in the z dimension
"use_python_convention" <bool>: a flag indicating if ranges use python convention
filename format: prefix_xstart-xstop_ystart-ystop_zstart-zstop.h5
Includes the "ingest_job" section of the config file automatically
Args:
parameters (dict): Parameters for the dataset to be processed
Returns:
None
"""
self.parameters = parameters
def process(self, x_index, y_index, z_index, t_index=None):
"""
Method to compute the file path for the indicated Z-slice
Args:
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(str): An absolute file path that contains the specified data
"""
xstart = (x_index * self.parameters['x_chunk_size']) + self.parameters['x_offset']
xstop = ((x_index + 1) * self.parameters['x_chunk_size']) + self.parameters['x_offset']
if not self.parameters['use_python_convention']:
xstop -= 1
ystart = (y_index * self.parameters['y_chunk_size']) + self.parameters['y_offset']
ystop = ((y_index + 1) * self.parameters['y_chunk_size']) + self.parameters['y_offset']
if not self.parameters['use_python_convention']:
ystop -= 1
zstart = floor((z_index + self.parameters['z_offset']) / self.parameters['z_chunk_size'])
zstop = ((zstart + 1) * self.parameters['z_chunk_size']) + self.parameters['z_offset']
if not self.parameters['use_python_convention']:
zstop -= 1
zstart += self.parameters['z_offset']
# prepend root, append extension
filename = "{}_{}-{}_{}-{}_{}-{}.{}".format(self.parameters['prefix'],
xstart, xstop,
ystart, ystop,
zstart, zstop,
self.parameters['extension'])
return os.path.join(self.parameters['root_dir'], filename)
class Hdf5ChunkTileProcessor(TileProcessor):
"""A Tile processor for large slices stored in hdf5 files.
Assumes the data is stored in a dataset and an optional offset is stored in a dataset
"""
def __init__(self):
"""Constructor to add custom class var"""
TileProcessor.__init__(self)
self.fs = None
def setup(self, parameters):
""" Method to load the file for uploading
Args:
parameters (dict): Parameters for the dataset to be processed
MUST HAVE THE CUSTOM PARAMETERS: "upload_format": "<png|tiff>",
"data_name": str,
"z_chunk_size": the chunk extent in the z dimension,
"filesystem": "<s3|local>",
"bucket": (if s3 filesystem)
Returns:
None
"""
self.parameters = parameters
self.fs = DynamicFilesystemAbsPath(parameters['filesystem'], parameters)
def process(self, file_path, x_index, y_index, z_index, t_index=0):
"""
Method to load the image file.
Args:
file_path(str): An absolute file path for the specified tile
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(io.BufferedReader): A file handle for the specified tile
"""
if self.parameters['datatype'] == "uint8":
datatype = np.uint8
elif self.parameters['datatype'] == "uint16":
datatype = np.uint16
elif self.parameters['datatype'] == "uint32":
datatype = np.uint32
else:
raise Exception("Unsupported datatype: {}".format(self.parameters['datatype']))
try:
file_path = self.fs.get_file(file_path)
# Open hdf5
h5_file = h5py.File(file_path, 'r')
# Compute z-index (plugin assumes xy extent fits in a tile)
z_index = z_index % self.parameters['z_chunk_size']
# Allocate Tile
tile_data = np.array(h5_file[self.parameters['data_name']][z_index, :, :], dtype=datatype, order='C')
except botocore.exceptions.ClientError as err:
logger = logging.getLogger('ingest-client')
logger.info("Could not find chunk. Assuming it's missing and generating blank data.")
# TODO: remove kludge once we have contiguous datasets.
tile_data = np.zeros((512, 512), dtype=datatype, order="C")
except OSError as err:
logger = logging.getLogger('ingest-client')
logger.info("Could not find chunk. Assuming it's missing and generating blank data.")
# TODO: remove kludge once we have contiguous datasets.
tile_data = np.zeros((512, 512), dtype=datatype, order="C")
upload_img = Image.fromarray(tile_data)
output = six.BytesIO()
upload_img.save(output, format=self.parameters["upload_format"].upper())
# Send handle back
return output
class Hdf5SingleFilePathProcessor(PathProcessor):
"""A Path processor for 3D datasets stored in a single hdf5 file.
"""
def __init__(self):
"""Constructor to add custom class var"""
PathProcessor.__init__(self)
def setup(self, parameters):
"""Set the params
MUST HAVE THE CUSTOM PARAMETERS: "filename": "<path_to_filet>",
Includes the "ingest_job" section of the config file automatically
Args:
parameters (dict): Parameters for the dataset to be processed
Returns:
None
"""
self.parameters = parameters
def process(self, x_index, y_index, z_index, t_index=None):
"""
Method to compute the file path for the indicated Z-slice
Args:
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(str): An absolute file path that contains the specified data
"""
# prepend root, append extension
return self.parameters['filename']
class Hdf5SingleFileTileProcessor(TileProcessor):
"""A Tile processor for 3D datasets stored in a single HDF5 file
Assumes the data is stored in a dataset and an optional offset is stored in a dataset
"""
def __init__(self):
"""Constructor to add custom class var"""
TileProcessor.__init__(self)
self.fs = None
def setup(self, parameters):
""" Method to load the file for uploading
Args:
parameters (dict): Parameters for the dataset to be processed
MUST HAVE THE CUSTOM PARAMETERS: "upload_format": "<png|tiff>",
"data_name": str,
"datatype": <uint8|uint16|uint32>
"offset_x": int,
"offset_y": int,
"offset_z": int,
"filesystem": "<s3|local>",
"bucket": (if s3 filesystem)
Returns:
None
"""
self.parameters = parameters
self.fs = DynamicFilesystemAbsPath(parameters['filesystem'], parameters)
def process(self, file_path, x_index, y_index, z_index, t_index=0):
"""
Method to load the image file.
Args:
file_path(str): An absolute file path for the specified tile
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(io.BufferedReader): A file handle for the specified tile
"""
file_path = self.fs.get_file(file_path)
# Compute global range
target_x_range = [self.parameters["ingest_job"]["tile_size"]["x"] * x_index,
self.parameters["ingest_job"]["tile_size"]["x"] * (x_index + 1)]
target_y_range = [self.parameters["ingest_job"]["tile_size"]["y"] * y_index,
self.parameters["ingest_job"]["tile_size"]["y"] * (y_index + 1)]
# Open hdf5
h5_file = h5py.File(file_path, 'r')
# Compute range in actual data, taking offsets into account
x_offset = self.parameters['offset_x']
y_offset = self.parameters['offset_y']
x_tile_size = self.parameters["ingest_job"]["tile_size"]["x"]
y_tile_size = self.parameters["ingest_job"]["tile_size"]["y"]
h5_x_range = [target_x_range[0] + x_offset, target_x_range[1] + x_offset]
h5_y_range = [target_y_range[0] + y_offset, target_y_range[1] + y_offset]
h5_z_slice = z_index + self.parameters['offset_z']
tile_x_range = [0, x_tile_size]
tile_y_range = [0, y_tile_size]
h5_max_x = h5_file[self.parameters['data_name']].shape[2]
h5_max_y = h5_file[self.parameters['data_name']].shape[1]
if h5_x_range[0] < 0:
# insert sub-region into tile
tile_x_range = [h5_x_range[0] * -1, x_tile_size]
h5_x_range[0] = 0
if h5_y_range[0] < 0:
# insert sub-region into tile
tile_y_range = [h5_y_range[0] * -1, y_tile_size]
h5_y_range[0] = 0
if h5_x_range[1] > h5_max_x:
# insert sub-region into tile
tile_x_range = [0, x_tile_size - (h5_x_range[1] - h5_max_x)]
h5_x_range[1] = h5_max_x
if h5_y_range[1] > h5_max_y:
# insert sub-region into tile
tile_y_range = [0, y_tile_size - (h5_y_range[1] - h5_max_y)]
h5_y_range[1] = h5_max_y
if self.parameters['datatype'] == "uint8":
datatype = np.uint8
elif self.parameters['datatype']== "uint16":
datatype = np.uint16
elif self.parameters['datatype']== "uint32":
datatype = np.uint32
else:
raise Exception("Unsupported datatype: {}".format(self.parameters['datatype']))
# Allocate Tile
tile_data = np.zeros((self.parameters["ingest_job"]["tile_size"]["y"],
self.parameters["ingest_job"]["tile_size"]["x"]),
dtype=datatype, order='C')
if h5_z_slice >= 0:
# Copy sub-img to tile, save, return
tile_data[tile_y_range[0]:tile_y_range[1],
tile_x_range[0]:tile_x_range[1]] = np.array(h5_file[self.parameters['data_name']][
h5_z_slice,
h5_y_range[0]:h5_y_range[1],
h5_x_range[0]:h5_x_range[1]])
tile_data = tile_data.astype(datatype)
upload_img = Image.fromarray(tile_data)
output = six.BytesIO()
upload_img.save(output, format=self.parameters["upload_format"].upper())
# Send handle back
return output
```
#### File: ingestclient/plugins/zarr.py
```python
from __future__ import absolute_import
import six
from .path import PathProcessor
from .chunk import ChunkProcessor, ZYX_ORDER
import numpy as np
import zarr
class ZarrPathProcessor(PathProcessor):
"""Class for simple image stacks that only increment in Z, uses the dynamic filesystem utility"""
def __init__(self):
"""Constructor to add custom class var"""
PathProcessor.__init__(self)
def setup(self, parameters):
"""Set the params
Args:
parameters (dict): Parameters for the dataset to be processed
Returns:
None
"""
pass
def process(self, x_index, y_index, z_index, t_index=None):
"""
Method to compute the file path for the indicated tile
Args:
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
t_index(int): The time index
Returns:
(str): An absolute file path that contains the specified data
"""
return ""
class ZarrChunkProcessor(ChunkProcessor):
"""Chunk processor that utilizes Seung Lab's CloudVolume"""
def __init__(self):
ChunkProcessor.__init__(self)
self.vol = None
self.ingest_job = None
def setup(self, parameters):
""" Method to load the file for uploading data. Assumes intern token is set via environment variable or config
default file
Args:
parameters (dict): Parameters for the dataset to be processed. Must
include the following keys:
"backend" : [S3, GCS] for Amazon S3 and Google Cloud Service, respectively.
"bucket" : name of the S3 or GCS bucket containing zarr file.
"volume_name" name of the volume in the zarr file (e.g. "raw")
Returns:
None
"""
self.parameters = parameters
self.ingest_job = self.parameters.pop("ingest_job")
self.cloud_path = self.parameters["cloud_path"]
self.volume_name = self.parameters["volume_name"]
self.bucket = self.cloud_path.split('//')[1]
if self.cloud_path.startswith("s3://"):
from s3fs import S3FileSystem
Zg = zarr.group(store=S3FileSystem().get_mapper(self.bucket))
elif self.cloud_path.startswith("gs://"):
from gcsfs import GCSFileSystem
Zg = zarr.group(store=GCSFileSystem().get_mapper(self.bucket))
else:
raise ValueError("Cloudpath parameter must either start with 's3://' or 'gs://'.")
self.vol = Zg[self.volume_name]
def process(self, file_path, x_index, y_index, z_index):
"""
Method to take a chunk indices and return an ndarray with the correct data
Args:
file_path(str): Ignored.
x_index(int): The tile index in the X dimension
y_index(int): The tile index in the Y dimension
z_index(int): The tile index in the Z dimension
Returns:
(np.ndarray, int): ndarray for the specified chunk, ZYX_ORDER
"""
x_size = self.ingest_job["chunk_size"]["x"]
y_size = self.ingest_job["chunk_size"]["y"]
z_size = self.ingest_job["chunk_size"]["z"]
x_start = x_index * x_size
y_start = y_index * y_size
z_start = z_index * z_size
x_stop = x_start + x_size
y_stop = y_start + y_size
z_stop = z_start + z_size
data = self.vol[z_start:z_stop, y_start:y_stop, x_start:x_stop]
return data, ZYX_ORDER
```
#### File: ingest-client/ingestclient/prep_ingest.py
```python
import argparse
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
import json
import os
# Name of group to assign read-only privileges to.
GOV_TEAM = 'gov_team'
def create_collection(config, rmt):
"""
Ensure collection exists.
Args:
config (dict):
rmt (BossRemote):
Returns:
(CollectionResource)
"""
collection = CollectionResource(
config['collection']['name'], config['collection']['description'])
try:
collection = rmt.create_project(collection)
except Exception as e:
collection = rmt.get_project(collection)
return collection
def create_experiment(config, rmt, collection, coord):
"""
Ensure channel exists.
Args:
config (dict):
rmt (BossRemote):
collection (CollectionResource):
coord (CoordinateFrameResource):
Returns:
(ExperimentResource)
"""
# Default values if not provided in config file.
exp_vals = {
'hierarchy_method': 'anisotropic',
'num_hierarchy_levels': 1,
'num_time_samples': 1,
'time_step': 0,
'time_step_unit': 'seconds'
}
exp_vals.update(config['experiment'])
experiment = ExperimentResource(
config['experiment']['name'], collection.name, coord.name,
config['experiment']['description'],
num_hierarchy_levels=exp_vals['num_hierarchy_levels'],
hierarchy_method=exp_vals['hierarchy_method'],
num_time_samples=exp_vals['num_time_samples'],
time_step=exp_vals['time_step'],
time_step_unit=exp_vals['time_step_unit'])
try:
experiment = rmt.create_project(experiment)
except:
experiment = rmt.get_project(experiment)
return experiment
def create_channel(config, rmt, collection, experiment):
"""
Ensure channel exists.
Args:
config (dict):
rmt (BossRemote):
collection (CollectionResource):
experiment (ExperimentResource):
Returns:
(ChannelResource)
"""
if 'sources' not in config['channel']:
sources = []
else:
sources = config['channel']['sources']
channel = ChannelResource(
config['channel']['name'], collection.name, experiment.name,
type=config['channel']['type'], description=config['channel']['description'],
datatype=config['channel']['datatype'], sources=sources)
try:
channel = rmt.create_project(channel)
except:
channel = rmt.get_project(channel)
return channel
def create_coord_frame(config, rmt):
"""
Ensure coordinate frame exists.
Args:
config (dict):
rmt (BossRemote):
Returns:
(CoordinateFrameResource)
"""
coord = CoordinateFrameResource(config['coordinate_frame']['name'],
config['coordinate_frame']['description'],
config['coordinate_frame']['x_start'],
config['coordinate_frame']['x_stop'],
config['coordinate_frame']['y_start'],
config['coordinate_frame']['y_stop'],
config['coordinate_frame']['z_start'],
config['coordinate_frame']['z_stop'],
config['coordinate_frame']['voxel_size_x'],
config['coordinate_frame']['voxel_size_y'],
config['coordinate_frame']['voxel_size_z'])
try:
coord = rmt.create_project(coord)
except:
coord = rmt.get_project(coord)
return coord
def add_gov_team_permissions(rmt, collection, experiment, channel):
"""
Give read permission to the government team.
Args:
rmt (BossRemote):
collection (CollectionResource):
experiment (ExperimentResource):
channel (ChannelResource):
"""
try:
rmt.add_permissions(GOV_TEAM, collection, ['read'])
except:
print('Failed to automatically add {} group to collection: {}'.format(GOV_TEAM, collection.name))
try:
rmt.add_permissions(GOV_TEAM, experiment, ['read'])
except:
print('Failed to automatically add {} group to experiment: {}'.format(GOV_TEAM, experiment.name))
try:
rmt.add_permissions(GOV_TEAM, channel, ['read'])
except:
print('Failed to automatically add {} group to channel: {}'.format(GOV_TEAM, channel.name))
def main(parser_args=None):
"""
Main entry point of script.
Args:
parser_args(argparse.ArgumentParser): A pre-loaded ArgumentParser instance
"""
if parser_args is None:
args = parse_args()
else:
args = parser_args
rmt = BossRemote(args.intern_cfg)
# Load Configuration File
with open(args.config_file, 'rt') as cfg:
config = json.load(cfg)
collection = create_collection(config, rmt)
coord = create_coord_frame(config, rmt)
experiment = create_experiment(config, rmt, collection, coord)
channel = create_channel(config, rmt, collection, experiment)
if args.govteam:
add_gov_team_permissions(rmt, collection, experiment, channel)
# Update boss-ingest config file with resources names from config file
if args.writecfg:
with open(config['ingest_cfg'], 'rt') as cfg:
ingest_file = json.load(cfg)
if 'database' not in ingest_file:
ingest_file['database'] = {}
ingest_file['database']['collection'] = collection.name
ingest_file['database']['experiment'] = experiment.name
ingest_file['database']['channel'] = channel.name
with open(config['ingest_cfg'], 'wt') as cfg:
json.dump(ingest_file, cfg, indent=2)
print('\n\nRun this command in the ingest-client repo directory to execute the ingest client:')
print('\n export INTERN_TOKEN={}'.format(rmt.project_service.auth))
print(' boss-ingest {}'.format(os.path.abspath(config['ingest_cfg'])))
def parse_args():
"""
Parse command line arguments.
Returns:
(Namespace)
"""
parser = argparse.ArgumentParser(
description='Script that creates the collection/experiment/channel for ingest. ' +
'To supply arguments from a file, provide the filename prepended with an `@`.',
fromfile_prefix_chars = '@')
parser.add_argument(
'--intern_cfg', '-i',
default='boss.cfg',
help='intern config file')
parser.add_argument(
'--writecfg',
action='store_true',
help='Update the boss-ingest config file with names of collection/experiment/channel.')
parser.add_argument(
'--govteam',
action='store_true',
help='Add read-only permissions for the Government team.')
parser.add_argument(
'config_file',
help='Collection/experiment/channel configuration in JSON.')
return parser.parse_args()
if __name__ == '__main__':
main()
```
#### File: ingestclient/test/aws.py
```python
import json
import boto3
from moto import mock_s3
from moto import mock_sqs
import time
VOLUMETRIC_CUBOID_KEY = "md5hash&1&2&3&0&22"
# Currently just passed to Engine.upload_cuboid() to be stored in metadata.
VOLUMETRIC_CHUNK_KEY = "foo"
class Setup(object):
""" Class to handle setting up AWS resources for testing
"""
def __init__(self, region="us-east-1"):
self.mock = True
self.mock_s3 = None
self.mock_sqs = None
self.region = region
def start_mocking(self):
"""Method to start mocking"""
self.mock = True
self.mock_s3 = mock_s3()
self.mock_sqs = mock_sqs()
self.mock_s3.start()
self.mock_sqs.start()
def stop_mocking(self):
"""Method to stop mocking"""
self.mock_s3.stop()
self.mock_sqs.stop()
# ***** Bucket *****
def _create_bucket(self, bucket_name):
"""Method to create the S3 bucket"""
client = boto3.client('s3', region_name=self.region)
_ = client.create_bucket(
ACL='private',
Bucket=bucket_name
)
return client.get_waiter('bucket_exists')
def create_bucket(self, bucket_name):
"""Method to create the S3 bucket storage"""
if self.mock:
with mock_s3():
self._create_bucket(bucket_name)
else:
waiter = self._create_bucket(bucket_name)
# Wait for bucket to exist
waiter.wait(Bucket=bucket_name)
def _delete_bucket(self, bucket_name):
"""Method to delete the S3 bucket"""
s3 = boto3.resource('s3', region_name=self.region)
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.all():
obj.delete()
# Delete bucket
bucket.delete()
return bucket
def delete_bucket(self, bucket_name):
"""Method to create the S3 bucket"""
if self.mock:
with mock_s3():
self._delete_bucket(bucket_name)
else:
bucket = self._delete_bucket(bucket_name)
# Wait for table to be deleted (since this is real)
bucket.wait_until_not_exists()
# ***** END Bucket *****
# ***** SQS Queue *****
def _create_queue(self, queue_name):
"""Method to create a test sqs queue"""
client = boto3.client('sqs', region_name=self.region)
# Set big visibility timeout because nothing is deleting messages (no lambda running on unit tests)
response = client.create_queue(QueueName=queue_name,
Attributes={
'VisibilityTimeout': '500',
'DelaySeconds': '0',
'MaximumMessageSize': '262144'
})
url = response['QueueUrl']
return url
def create_queue(self, queue_name):
"""
Create one or more SQS queues. When mocking, all queues must be created
at once. If not, only the last one created exists.
Args:
queue_name (str|list[str]): Name of queue(s) to create.
Returns:
(str|list[str]): URL(s) of queue(s).
"""
url = []
if not isinstance(queue_name, list):
queue_name = [queue_name]
if self.mock:
with mock_sqs():
url = [self._create_queue(name) for name in queue_name]
else:
url = [self._create_queue(name) for name in queue_name]
time.sleep(30)
if len(url) == 1:
return url[0]
return url
def _delete_queue(self, queue_url):
"""Method to delete a test sqs"""
client = boto3.client('sqs', region_name=self.region)
client.delete_queue(QueueUrl=queue_url)
def delete_queue(self, queue_name):
"""Method to delete a test sqs"""
if self.mock:
with mock_sqs():
self._delete_queue(queue_name)
else:
self._delete_queue(queue_name)
# ***** END Flush SQS Queue *****
def _add_tasks(self, id, secret, queue_url, backend_instance):
"""Push some fake tasks on the task queue"""
client = boto3.client('sqs', region_name=self.region, aws_access_key_id=id,
aws_secret_access_key=secret)
params = {"collection": 1,
"experiment": 2,
"channel": 3,
"resolution": 0,
"x_index": 5,
"y_index": 6,
"z_index": 1,
"t_index": 0,
"num_tiles": 16,
}
self.test_msg = []
for t_idx in range(0, 4):
params["t_index"] = t_idx
proj = [str(params['collection']), str(params['experiment']), str(params['channel'])]
tile_key = backend_instance.encode_tile_key(proj,
params['resolution'],
params['x_index'],
params['y_index'],
params['z_index'],
params['t_index'],
)
chunk_key = backend_instance.encode_chunk_key(params['num_tiles'], proj,
params['resolution'],
params['x_index'],
params['y_index'],
params['z_index'],
params['t_index'],
)
msg = {"tile_key": tile_key, "chunk_key": chunk_key}
client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(msg))
self.test_msg.append(msg)
def add_tasks(self, id, secret, queue_url, backend_instance):
"""Push some fake tasks on the task queue"""
if self.mock:
mock_sqs(self._add_tasks(id, secret, queue_url, backend_instance))
else:
self._add_tasks(id, secret, queue_url, backend_instance)
def _add_volumetric_tasks(self, id, secret, queue_url, backend_instance):
"""Push some fake tasks on the volumetric upload queue"""
client = boto3.client('sqs', region_name=self.region, aws_access_key_id=id,
aws_secret_access_key=secret)
params = {
"cuboids": [
{ "x": 0, "y": 0, "z": 0, "key": VOLUMETRIC_CUBOID_KEY }
],
"collection": 1,
"experiment": 2,
"channel": 3,
"resolution": 0,
"x_index": 0,
"y_index": 0,
"z_index": 0,
"t_index": 0,
"num_tiles": 1
}
proj = [str(params['collection']), str(params['experiment']), str(params['channel'])]
chunk_key = backend_instance.encode_chunk_key(params['num_tiles'], proj,
params['resolution'],
params['x_index'],
params['y_index'],
params['z_index'],
params['t_index'],
)
self.test_msg = []
msg = { "chunk_key": chunk_key, "cuboids": params["cuboids"] }
client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(msg))
self.test_msg.append(msg)
def add_volumetric_tasks(self, id, secret, queue_url, backend_instance):
"""Add fake tasks to the volumetric upload queue"""
if self.mock:
mock_sqs(self._add_volumetric_tasks(id, secret, queue_url, backend_instance))
else:
self._add_volumetric_tasks(id, secret, queue_url, backend_instance)
```
#### File: ingestclient/test/test_engine_volumetric.py
```python
from __future__ import absolute_import
from ingestclient.core.engine import Engine
from ingestclient.core.validator import Validator, BossValidatorV02
from ingestclient.core.backend import Backend, BossBackend
from ingestclient.core.config import Configuration, ConfigFileError
from ingestclient.core.consts import BOSS_CUBOID_X, BOSS_CUBOID_Y, BOSS_CUBOID_Z
from ingestclient.test.aws import Setup, VOLUMETRIC_CUBOID_KEY, VOLUMETRIC_CHUNK_KEY
from ingestclient.plugins.chunk import XYZ_ORDER, ZYX_ORDER, XYZT_ORDER, TZYX_ORDER
import os
import unittest
import sys
#This was added mainly to support python 2.7 testing as well
if sys.version_info >= (3, 3):
#python 3
from unittest.mock import MagicMock
else:
#python 2
from mock import MagicMock
import json
import responses
from pkg_resources import resource_filename
import tempfile
import boto3
import blosc
import numpy as np
class ResponsesMixin(object):
"""Mixin to setup requests mocking for the test class"""
def setUp(self):
self.resp_mock = responses.RequestsMock(assert_all_requests_are_fired=False)
self.resp_mock.__enter__()
self.add_default_response()
super(ResponsesMixin, self).setUp()
def tearDown(self):
super(ResponsesMixin, self).tearDown()
self.resp_mock.__exit__(None, None, None)
def add_default_response(self):
mocked_response = {"id": 23}
self.resp_mock.add(
responses.POST,
'https://api.theboss.io/latest/ingest/',
json=mocked_response,
status=201)
mocked_response = {
"ingest_job": {
"id": 23,
"ingest_queue": "https://aws.com/myqueue1",
"upload_queue": self.upload_queue_url,
"tile_index_queue": self.tile_index_queue_url,
"status": 1,
"tile_count": 500,
},
"ingest_lambda": "my_lambda",
"tile_bucket_name": self.tile_bucket_name,
"ingest_bucket_name": self.ingest_bucket_name,
"KVIO_SETTINGS": {
"settings": "go here"
},
"STATEIO_CONFIG": {
"settings": "go here"
},
"OBJECTIO_CONFIG": {
"settings": "go here"
},
"credentials": self.aws_creds,
"resource": {
"resource": "stuff"
}
}
self.resp_mock.add(
responses.GET,
'https://api.theboss.io/latest/ingest/23',
json=mocked_response,
status=200)
self.resp_mock.add(
responses.DELETE,
'https://api.theboss.io/latest/ingest/23',
status=204)
class EngineBossTestMixin(object):
def test_create_instance(self):
"""Method to test creating an instance from the factory"""
engine = Engine(self.config_file, self.api_token)
assert isinstance(engine, Engine) is True
assert isinstance(engine.backend, Backend) is True
assert isinstance(engine.backend, BossBackend) is True
assert isinstance(engine.validator, Validator) is True
assert isinstance(engine.validator, BossValidatorV02) is True
assert isinstance(engine.config, Configuration) is True
# Schema loaded
assert isinstance(engine.config.schema, dict) is True
assert engine.config.schema["type"] == "object"
def test_missing_file(self):
"""Test creating a Configuration object"""
with self.assertRaises(ConfigFileError):
engine = Engine("/asdfhdfgkjldhsfg.json", self.api_token)
def test_bad_file(self):
"""Test creating a Configuration object"""
with tempfile.NamedTemporaryFile(suffix='.json') as test_file:
with open(test_file.name, 'wt') as test_file_handle:
test_file_handle.write("garbage garbage garbage\n")
with self.assertRaises(ConfigFileError):
engine = Engine(test_file.name, self.api_token)
def test_setup(self):
"""Test setting up the engine - no error should occur"""
engine = Engine(self.config_file, self.api_token)
engine.setup()
def test_create_job(self):
"""Test creating an ingest job - mock server response"""
engine = Engine(self.config_file, self.api_token)
engine.create_job()
assert engine.ingest_job_id == 23
def test_join(self):
"""Test joining an existing ingest job - mock server response"""
engine = Engine(self.config_file, self.api_token, 23)
engine.join()
assert engine.upload_job_queue == self.upload_queue_url
assert engine.job_status == 1
def test_run(self):
"""Test getting a task from the upload queue"""
engine = Engine(self.config_file, self.api_token, 23)
engine.msg_wait_iterations = 0
# Put some stuff on the task queue
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
engine.run()
# Check for tile to exist
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# Using an empty CloudVolume dataset so all values should be 0.
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
unique_vals = np.unique(cuboid)
assert 1 == len(unique_vals)
assert 0 == unique_vals[0]
def test_upload_cuboid_indexing(self):
data = np.random.randint(
0, 256, (BOSS_CUBOID_X, BOSS_CUBOID_Y, BOSS_CUBOID_Z), 'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = data
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
x = 1024
y = 512
z = 16
assert True == engine.upload_cuboid(chunk, x, y, z,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, XYZ_ORDER)
exp_x = slice(x, x + BOSS_CUBOID_X, None)
exp_y = slice(y, y + BOSS_CUBOID_Y, None)
exp_z = slice(z, z + BOSS_CUBOID_Z, None)
chunk.__getitem__.assert_called_with((exp_x, exp_y, exp_z))
def test_upload_cuboid_random_data_xyzt_order(self):
data = np.random.randint(
0, 256, (BOSS_CUBOID_X, BOSS_CUBOID_Y, BOSS_CUBOID_Z, 1), 'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = data
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
assert True == engine.upload_cuboid(chunk, 1024, 2048, 32,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, XYZT_ORDER)
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
assert np.array_equal(np.transpose(data), cuboid)
def test_upload_cuboid_random_data_tzyx_order(self):
data = np.random.randint(
0, 256, (1, BOSS_CUBOID_Z, BOSS_CUBOID_Y, BOSS_CUBOID_X), 'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = data
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
assert True == engine.upload_cuboid(chunk, 1024, 2048, 32,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, TZYX_ORDER)
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
assert np.array_equal(data, cuboid)
def test_upload_cuboid_random_data_xyz_order(self):
data = np.random.randint(
0, 256, (BOSS_CUBOID_X, BOSS_CUBOID_Y, BOSS_CUBOID_Z), 'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = data
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
assert True == engine.upload_cuboid(chunk, 1024, 512, 48,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, XYZ_ORDER)
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
assert np.array_equal(
np.expand_dims(np.transpose(data), 0), cuboid)
def test_upload_cuboid_random_data_zyx_order(self):
data = np.random.randint(
0, 256, (BOSS_CUBOID_Z, BOSS_CUBOID_Y, BOSS_CUBOID_X), 'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = data
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
assert True == engine.upload_cuboid(chunk, 1024, 512, 48,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, ZYX_ORDER)
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
assert np.array_equal(np.expand_dims(data, 0), cuboid)
def test_upload_cuboid_partial_cuboid_zyx_order(self):
missing_z = 3
z_stop = BOSS_CUBOID_Z - missing_z
missing_y = 11
y_stop = BOSS_CUBOID_Y - missing_y
missing_x = 7
x_stop = BOSS_CUBOID_X - missing_x
partial_cuboid = np.random.randint(0, 256, (z_stop, y_stop, x_stop),
'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = partial_cuboid
expected_cuboid = np.pad(
np.expand_dims(partial_cuboid, 0),
((0, 0), (0, missing_z), (0, missing_y), (0, missing_x)),
'constant',
constant_values=0)
assert expected_cuboid.shape == (1, BOSS_CUBOID_Z, BOSS_CUBOID_Y,
BOSS_CUBOID_X)
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
assert True == engine.upload_cuboid(chunk, 1024, 512, 48,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, ZYX_ORDER)
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
assert expected_cuboid.shape == cuboid.shape
assert np.array_equal(expected_cuboid, cuboid)
def test_upload_cuboid_partial_cuboid_xyz_order(self):
missing_z = 3
z_stop = BOSS_CUBOID_Z - missing_z
missing_y = 11
y_stop = BOSS_CUBOID_Y - missing_y
missing_x = 7
x_stop = BOSS_CUBOID_X - missing_x
partial_cuboid = np.random.randint(0, 256, (x_stop, y_stop, z_stop),
'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = partial_cuboid
expected_cuboid = np.pad(
np.expand_dims(np.transpose(partial_cuboid), 0),
((0, 0), (0, missing_z), (0, missing_y), (0, missing_x)),
'constant',
constant_values=0)
assert expected_cuboid.shape == (1, BOSS_CUBOID_Z, BOSS_CUBOID_Y,
BOSS_CUBOID_X)
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
assert True == engine.upload_cuboid(chunk, 1024, 512, 48,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, XYZ_ORDER)
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
assert expected_cuboid.shape == cuboid.shape
assert np.array_equal(expected_cuboid, cuboid)
def test_upload_cuboid_partial_cuboid_tzyx_order(self):
missing_z = 3
z_stop = BOSS_CUBOID_Z - missing_z
missing_y = 11
y_stop = BOSS_CUBOID_Y - missing_y
missing_x = 7
x_stop = BOSS_CUBOID_X - missing_x
partial_cuboid = np.random.randint(0, 256, (1, z_stop, y_stop, x_stop),
'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = partial_cuboid
expected_cuboid = np.pad(
partial_cuboid, ((0, 0), (0, missing_z), (0, missing_y),
(0, missing_x)),
'constant',
constant_values=0)
assert expected_cuboid.shape == (1, BOSS_CUBOID_Z, BOSS_CUBOID_Y,
BOSS_CUBOID_X)
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
assert True == engine.upload_cuboid(chunk, 1024, 512, 48,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, TZYX_ORDER)
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
assert expected_cuboid.shape == cuboid.shape
assert np.array_equal(expected_cuboid, cuboid)
def test_upload_cuboid_partial_cuboid_xyzt_order(self):
missing_z = 3
z_stop = BOSS_CUBOID_Z - missing_z
missing_y = 11
y_stop = BOSS_CUBOID_Y - missing_y
missing_x = 7
x_stop = BOSS_CUBOID_X - missing_x
partial_cuboid = np.random.randint(0, 256, (x_stop, y_stop, z_stop, 1),
'uint8')
chunk = MagicMock(spec=np.ndarray)
chunk.__getitem__.return_value = partial_cuboid
expected_cuboid = np.pad(
np.transpose(partial_cuboid), ((0, 0), (0, missing_z),
(0, missing_y), (0, missing_x)),
'constant',
constant_values=0)
assert expected_cuboid.shape == (1, BOSS_CUBOID_Z, BOSS_CUBOID_Y,
BOSS_CUBOID_X)
engine = Engine(self.config_file, self.api_token, 23)
self.setup_helper.add_volumetric_tasks(self.aws_creds["access_key"],
self.aws_creds['secret_key'],
self.upload_queue_url, engine.backend)
engine.join()
assert True == engine.upload_cuboid(chunk, 1024, 512, 48,
VOLUMETRIC_CUBOID_KEY,
VOLUMETRIC_CHUNK_KEY, XYZT_ORDER)
s3 = boto3.resource('s3')
ingest_bucket = s3.Bucket(self.ingest_bucket_name)
with tempfile.NamedTemporaryFile() as test_file:
with open(test_file.name, 'wb') as raw_data:
ingest_bucket.download_fileobj(VOLUMETRIC_CUBOID_KEY, raw_data)
with open(test_file.name, 'rb') as raw_data:
# dtype set in boss-v0.2-test.json under chunk_processor.params.info.data_type
cuboid = self.s3_object_to_cuboid(raw_data.read(), 'uint8')
assert expected_cuboid.shape == cuboid.shape
assert np.array_equal(expected_cuboid, cuboid)
def s3_object_to_cuboid(self, raw_data, data_type):
data = blosc.decompress(raw_data)
data_mat = np.frombuffer(data, dtype=data_type)
return np.reshape(
data_mat, (1, BOSS_CUBOID_Z, BOSS_CUBOID_Y, BOSS_CUBOID_X),
order='C')
class TestBossEngine(EngineBossTestMixin, ResponsesMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
schema_file = os.path.join(
resource_filename("ingestclient", "schema"),
"boss-v0.2-schema.json")
with open(schema_file, 'r') as file_handle:
s = json.load(file_handle)
cls.mock_schema = {"schema": s}
cls.config_file = os.path.join(
resource_filename("ingestclient", "test/data"),
"boss-v0.2-test.json")
with open(cls.config_file, 'rt') as example_file:
cls.example_config_data = json.load(example_file)
# Setup AWS stuff
cls.setup_helper = Setup()
cls.setup_helper.mock = True
cls.setup_helper.start_mocking()
queue_names = ["test-queue", "test-index-queue"]
cls.upload_queue_url, cls.tile_index_queue_url = cls.setup_helper.create_queue(queue_names)
cls.tile_bucket_name = "test-tile-store"
cls.ingest_bucket_name = "test-cuboid-store"
cls.setup_helper.create_bucket(cls.ingest_bucket_name)
# mock api token
cls.api_token = "a<PASSWORD>"
# mock aws creds
cls.aws_creds = {
"access_key": "asdfasdf",
"secret_key": "asdfasdfasdfadsf"
}
@classmethod
def tearDownClass(cls):
# Stop mocking
cls.setup_helper.stop_mocking()
```
#### File: ingestclient/test/test_plugin_multipage_tiff.py
```python
from __future__ import absolute_import
import os
import unittest
import json
from pkg_resources import resource_filename
from PIL import Image
import numpy as np
from ingestclient.core.config import Configuration
from ingestclient.plugins.multipage_tiff import load_tiff_multipage
class TestSingleMultipageTiff(unittest.TestCase):
def test_SingleTimeTiffPathProcessor_setup(self):
"""Test setting up the path processor"""
pp = self.config.path_processor_class
pp.setup(self.config.get_path_processor_params())
assert pp.parameters["z_0"] == os.path.join(resource_filename("ingestclient", "test/data"),
"test_multipage.tif")
assert pp.parameters["ingest_job"]["extent"]["x"] == [0, 512]
def test_SingleTimeTiffPathProcessor_process(self):
"""Test running the path processor"""
pp = self.config.path_processor_class
pp.setup(self.config.get_path_processor_params())
assert pp.process(0, 0, 0, 0) == os.path.join(resource_filename("ingestclient", "test/data"),
"test_multipage.tif")
def test_SingleTimeTiffPathProcessor_process_invalid(self):
"""Test running the path processor with invalid tile indices"""
pp = self.config.path_processor_class
pp.setup(self.config.get_path_processor_params())
with self.assertRaises(IndexError):
pp.process(1, 0, 0, 0)
with self.assertRaises(IndexError):
pp.process(0, 1, 0, 0)
with self.assertRaises(IndexError):
pp.process(0, 0, 1, 0)
with self.assertRaises(IndexError):
pp.process(0, 0, 0, 11)
def test_SingleTimeTiffTileProcessor_setup(self):
"""Test setting up the tile processor"""
tp = self.config.tile_processor_class
tp.setup(self.config.get_tile_processor_params())
assert tp.parameters["datatype"] == "uint16"
assert tp.parameters["ingest_job"]["extent"]["y"] == [0, 256]
def test_SingleTimeTiffTileProcessor_process(self):
"""Test running the tile processor"""
pp = self.config.path_processor_class
pp.setup(self.config.get_path_processor_params())
tp = self.config.tile_processor_class
tp.setup(self.config.get_tile_processor_params())
filename = pp.process(0, 0, 0, 0)
handle = tp.process(filename, 0, 0, 0, 3)
# Open handle as image file
test_img = Image.open(handle)
test_img = np.array(test_img, dtype="uint16")
# Open original data
truth_img = load_tiff_multipage(filename)
truth_img = np.array(truth_img, dtype="uint16")
truth_img = truth_img[3, :, :]
# Make sure the same
np.testing.assert_array_equal(truth_img, test_img)
@classmethod
def setUpClass(cls):
cls.config_file = os.path.join(resource_filename("ingestclient", "test/data"), "boss-v0.1-singleMultipageTiff.json")
with open(cls.config_file, 'rt') as example_file:
cls.example_config_data = json.load(example_file)
# inject the file path since we don't want to hardcode
cls.example_config_data["client"]["path_processor"]["params"]["z_0"] = os.path.join(resource_filename("ingestclient",
"test/data"),
"test_multipage.tif")
cls.config = Configuration(cls.example_config_data)
cls.config.load_plugins()
```
#### File: ingestclient/utils/backoff.py
```python
from random import randint
def get_wait_time(retry_num):
"""
Compute time for exponential backoff.
Args:
retry_num (int): Retry attempt number to determine wait time.
Returns:
(int): Amount of time to wait.
"""
return 2 ** (retry_num+3)
def get_wait_time_rand(retry_num):
"""
Compute time for exponential backoff using a random element.
Args:
retry_num (int): Retry attempt number to determine random number range.
Returns:
(int): Amount of time to wait.
"""
return randint(1, 2 ** (retry_num+3))
``` |
{
"source": "jhuapl-boss/intern",
"score": 2
} |
#### File: intern/convenience/array.py
```python
from typing import Optional, Union, Tuple
import abc
import json
from collections import namedtuple
from urllib.parse import unquote
from intern.service.boss.httperrorlist import HTTPErrorList
from .uri import parse_fquri
# Pip-installable imports
import numpy as np
from intern.resource.boss.resource import (
CollectionResource,
ChannelResource,
CoordinateFrameResource,
ExperimentResource,
)
from intern.service.boss.metadata import MetadataService
from intern.remote.boss import BossRemote
# A named tuple that represents a bossDB URI.
bossdbURI = namedtuple(
"bossdbURI", ["collection", "experiment", "channel", "resolution"]
)
_DEFAULT_BOSS_OPTIONS = {
"protocol": "https",
"host": "api.bossdb.io",
"token": "<PASSWORD>",
}
class VolumeProvider(abc.ABC):
"""
A provider for the common get/put cutout operations on a Remote.
TODO: This should ultimately be subsumed back into the Remote API.
"""
def get_channel(self, channel: str, collection: str, experiment: str):
...
def get_project(self, resource):
...
def create_project(self, resource):
...
def get_cutout(
self,
channel: ChannelResource,
resolution: int,
xs: Tuple[int, int],
ys: Tuple[int, int],
zs: Tuple[int, int],
):
...
def create_cutout(
self,
channel: ChannelResource,
resolution: int,
xs: Tuple[int, int],
ys: Tuple[int, int],
zs: Tuple[int, int],
data,
):
...
class _InternVolumeProvider(VolumeProvider):
"""
A VolumeProvider that backends the intern.BossRemote API.
This is used instead of directly accessing the BossRemote so that the
convenience `array` can be easily stripped out. (The array module was
originally a visitor from another Python package called `emboss`, so moving
VolumeProvider endpoints back into the Remote API is an outstanding TODO.)
"""
def __init__(self, boss: BossRemote = None):
if boss is None:
try:
boss = BossRemote()
except:
boss = BossRemote(_DEFAULT_BOSS_OPTIONS)
self.boss = boss
def get_channel(self, channel: str, collection: str, experiment: str):
return self.boss.get_channel(channel, collection, experiment)
def get_project(self, resource):
return self.boss.get_project(resource)
def create_project(self, resource):
return self.boss.create_project(resource)
def get_cutout(
self,
channel: ChannelResource,
resolution: int,
xs: Tuple[int, int],
ys: Tuple[int, int],
zs: Tuple[int, int],
):
return self.boss.get_cutout(channel, resolution, xs, ys, zs)
def create_cutout(
self,
channel: ChannelResource,
resolution: int,
xs: Tuple[int, int],
ys: Tuple[int, int],
zs: Tuple[int, int],
data,
):
return self.boss.create_cutout(channel, resolution, xs, ys, zs, data)
def _construct_boss_url(boss, col, exp, chan, res, xs, ys, zs) -> str:
# TODO: use boss host
return f"https://api.theboss.io/v1/cutout/{col}/{exp}/{chan}/{res}/{xs[0]}:{xs[1]}/{ys[0]}:{ys[1]}/{zs[0]}:{zs[1]}"
def parse_bossdb_uri(uri: str) -> bossdbURI:
"""
Parse a bossDB URI and handle malform errors.
Arguments:
uri (str): URI of the form bossdb://<collection>/<experiment>/<channel>
Returns:
bossdbURI
"""
t = uri.split("://")[1].split("/")
if len(t) == 3:
return bossdbURI(t[0], t[1], t[2], None)
if len(t) == 4:
return bossdbURI(t[0], t[1], t[2], int(t[3]))
raise ValueError(f"Cannot parse URI {uri}.")
class AxisOrder:
XYZ = "XYZ"
ZYX = "ZYX"
class _MetadataProvider:
"""
Serves as a dictionary-like API for resource metadata.
"""
def __init__(self, dataset) -> None:
"""
Create a new metadata provider.
Arguments:
dataset (array)
"""
self._array = dataset
self._resource = dataset._channel
self._remote = dataset.volume_provider.boss
def keys(self):
return self._remote.list_metadata(self._resource)
def items(self):
for key in self.keys():
yield (key, self[key])
def __delitem__(self, key):
return self._remote.delete_metadata(self._resource, [key])
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def __getitem__(self, key):
try:
return self._remote.get_metadata(self._resource, [key])[key]
except HTTPErrorList as err:
raise KeyError(
f"The key {key!s} was not found in the metadata database."
) from err
def __setitem__(self, key, value):
return self._remote.create_metadata(self._resource, {key: value})
def update_item(self, key, value):
return self._remote.update_metadata(self._resource, {key: value})
def bulk_update(self, items: dict):
return self._remote.create_metadata(self._resource, items)
def bulk_delete(self, keys: list):
return self._remote.delete_metadata(self._resource, keys)
class array:
"""
An intern/bossDB-backed numpy array.
Like a numpy.memmap array, an `intern.array` is backed by data that lives
outside of conventional memory. The data can live in, for example, a bossDB
that lives in AWS, or it can live in a local or remote bossphorus instance.
Data are downloaded when a request is made. This means that even "simple"
commands like `array#[:]sum()` are very network-heavy (don't do this!).
Examples:
>>> import intern.array
>>> data = array("bossdb://collection/experiment/channel")
>>> downloaded_sample = data[100, 100:200, 100:200]
"""
def __init__(
self,
channel: Union[ChannelResource, Tuple, str],
resolution: int = 0,
volume_provider: VolumeProvider = None,
axis_order: str = AxisOrder.ZYX,
create_new: bool = False,
description: Optional[str] = None,
dtype: Optional[str] = None,
extents: Optional[Tuple[int, int, int]] = None,
voxel_size: Optional[Tuple[int, int, int]] = None,
voxel_unit: Optional[str] = None,
downsample_levels: int = 6,
downsample_method: Optional[str] = "anisotropic",
coordinate_frame_name: Optional[str] = None,
coordinate_frame_desc: Optional[str] = None,
collection_desc: Optional[str] = None,
experiment_desc: Optional[str] = None,
source_channel: Optional[str] = None,
boss_config: Optional[dict] = None,
) -> None:
"""
Construct a new intern-backed array.
Arguments:
channel (intern.resource.boss.ChannelResource): The channel from
which data will be downloaded.
resolution (int: 0): The native resolution or MIP to use
volume_provider (VolumeProvider): The remote-like to use
axis_order (str = AxisOrder.ZYX): The axis-ordering to use for data
cutouts. Defaults to ZYX. DOES NOT affect the `voxel_size` or
`extents` arguments to this constructor.
create_new (bool: False): Whether to create new Resources if they
do not exist. Does not work with public token.
dtype (str): Only required if `create_new = True`. Specifies the
numpy-style datatype for this new dataset (e.g. "uint8").
description (str): Only required if `create_new = True`. Sets the
description for the newly-created collection, experiment,
channel, and coordframe resources.
extents: Optional[Tuple[int, int, int]]: Only required if
`create_new = True`. Specifies the total dataset extents of
this new dataset, in ZYX order.
voxel_size: Optional[Tuple[int, int, int]]: Only required if
`create_new = True`. Specifies the voxel dimensions of this new
dataset, in ZYX order.
voxel_unit: Optional[str]: Only required if `create_new = True`.
Specifies the voxel-dimension unit. For example, "nanometers".
downsample_levels (int: 6): The number of downsample levels.
downsample_method (Optional[str]): The type of downsample to use.
If unset, defaults to 'anisotropic'.
coordinate_frame_name (Optional[str]): If set, the name to use for
the newly created coordinate frame. If not set, the name of the
coordinate frame will be chosen automatically.
coordinate_frame_desc (Optional[str]): If set, the description text
to use for the newly created coordinate frame. If not set, the
description will be chosen automatically.
collection_desc (Optional[str]): The description text to use for a
newly created collection. If not set, the description will be
chosen automatically.
experiment_desc (Optional[str]): The description text to use for a
newly created experiment. If not set, the description will be
chosen automatically.
source_channel (Optional[str]): The channel to use as the source
for this new channel, if `create_new` is True and this is
going to be an annotation channel (dtype!=uint8).
boss_config (Optional[dict]): The BossRemote configuration dict to
use in order to authenticate with a BossDB remote. This option
is mutually exclusive with the VolumeProvider configuration. If
the `volume_provider` arg is set, this will be ignored.
"""
self.axis_order = axis_order
# Handle custom Remote:
self.volume_provider = volume_provider
if volume_provider is None:
if boss_config:
self.volume_provider = _InternVolumeProvider(BossRemote(boss_config))
else:
self.volume_provider = _InternVolumeProvider()
if create_new:
# We'll need at least `extents` and `voxel_size`.
description = description or "Created with intern"
dtype = dtype or "uint8"
if extents is None:
raise ValueError(
"If `create_new` is True, you must specify the extents of the new coordinate frame as a [x, y, z]."
)
if voxel_size is None:
raise ValueError(
"If `create_new` is True, you must specify the voxel_size of the new coordinate frame as a [x, y, z]."
)
uri = parse_bossdb_uri(channel)
# create collection if it doesn't exist:
try:
# Try to get an existing collection:
collection = self.volume_provider.get_project(
CollectionResource(uri.collection)
)
except:
# Create the collection:
collection = CollectionResource(
uri.collection, description=collection_desc or description
)
self.volume_provider.create_project(collection)
# create coordframe if it doesn't exist:
try:
# Try to get an existing coordframe:
coordframe = self.volume_provider.get_project(
CoordinateFrameResource(
coordinate_frame_name or f"CF_{uri.collection}_{uri.experiment}"
)
)
except:
# Default to nanometers if a voxel unit isn't provided
voxel_unit = voxel_unit or "nanometers"
# Create the coordframe:
coordframe = CoordinateFrameResource(
coordinate_frame_name or f"CF_{uri.collection}_{uri.experiment}",
description=coordinate_frame_desc or description,
x_start=0,
y_start=0,
z_start=0,
x_stop=extents[2],
y_stop=extents[1],
z_stop=extents[0],
x_voxel_size=voxel_size[2],
y_voxel_size=voxel_size[1],
z_voxel_size=voxel_size[0],
voxel_unit=voxel_unit,
)
self.volume_provider.create_project(coordframe)
# create experiment if it doesn't exist:
try:
# Try to get an existing experiment:
experiment = self.volume_provider.get_project(
ExperimentResource(uri.experiment, uri.collection)
)
except:
# Create the experiment:
experiment = ExperimentResource(
uri.experiment,
uri.collection,
description=experiment_desc or description,
coord_frame=coordframe.name,
num_hierarchy_levels=downsample_levels,
hierarchy_method=downsample_method,
)
self.volume_provider.create_project(experiment)
# create channel if it doesn't exist:
try:
# Try to get an existing channel:
channel = self.volume_provider.get_project(
ChannelResource(uri.channel, uri.collection, uri.experiment)
)
except:
# Create the channel:
channel = ChannelResource(
uri.channel,
uri.collection,
uri.experiment,
description=description,
type="image" if dtype in ["uint8", "uint16"] else "annotation",
datatype=dtype,
sources=[source_channel] if source_channel else [],
)
self.volume_provider.create_project(channel)
self.resolution = resolution
# If the channel is set as a Resource, then use that resource.
if isinstance(channel, ChannelResource):
self._channel = channel
# If it is set as a string, then parse the channel and generate an
# intern.Resource from a bossDB URI.
elif isinstance(channel, str):
uri = parse_bossdb_uri(channel)
self.resolution = (
uri.resolution if not (uri.resolution is None) else self.resolution
)
self._channel = self.volume_provider.get_channel(
uri.channel, uri.collection, uri.experiment
)
else:
raise NotImplementedError(
"You must specify a channel of the form "
"'bossdb://collection/experiment/channel' or you must "
"provide an intern.Remote."
)
# Set empty experiment (will be dict)
self._exp = None
# Set empty coordframe (will be dict)
self._coord_frame = None
# Set col/exp/chan based upon the channel or URI provided.
self.collection_name = self._channel.coll_name
self.experiment_name = self._channel.exp_name
self.channel_name = self._channel.name
# Create a pointer to the metadata for the channel.
self._channel_metadata = _MetadataProvider(self)
@property
def metadata(self):
"""
Returns a pointer to the metadata provider.
"""
return self._channel_metadata
@property
def dtype(self):
"""
Return the datatype of the array.
Will default to the dtype of the channel.
"""
return self._channel.datatype
@property
def url(self):
"""
Get a pointer to this Channel on the BossDB page.
"""
return f"{self.volume_provider.boss._project._base_protocol}://{self.volume_provider.boss._project._base_url}/v1/mgmt/resources/{self.collection_name}/{self.experiment_name}/{self.channel_name}"
@property
def visualize(self):
"""
Get a pointer to this Channel on the BossDB page.
"""
return "https://neuroglancer.bossdb.io/#!{'layers':{'image':{'source':'boss://__replace_me__'}}}".replace(
"__replace_me__",
f"{self.volume_provider.boss._project._base_protocol}://{self.volume_provider.boss._project._base_url}/{self.collection_name}/{self.experiment_name}/{self.channel_name}",
)
@property
def shape(self):
"""
Get the dimensions (numpy-flavored) of the array.
Will return (1, 1, 1) if a coordinate frame does not exist (as in cases
of pre-v2 bossphorus instances); this will not restrict indexing.
"""
# Set experiment if unset:
if self._exp is None:
self._populate_exp()
# Set cframe if unset:
if self._coord_frame is None:
self._populate_coord_frame()
# From the coordinate frame, get the x, y, and z sizes. Note that this
# is the SIZE, not the extents; in other words, a cframe that starts at
# x=10 and extends to x=110 will have a size of 100 here.
if self.axis_order == AxisOrder.XYZ:
return (
int(
(self._coord_frame.y_stop - self._coord_frame.y_start)
/ (2 ** self.resolution)
),
int(
(self._coord_frame.x_stop - self._coord_frame.x_start)
/ (2 ** self.resolution)
),
(self._coord_frame.z_stop - self._coord_frame.z_start),
)
elif self.axis_order == AxisOrder.ZYX:
return (
(self._coord_frame.z_stop - self._coord_frame.z_start),
int(
(self._coord_frame.y_stop - self._coord_frame.y_start)
/ (2 ** self.resolution)
),
int(
(self._coord_frame.x_stop - self._coord_frame.x_start)
/ (2 ** self.resolution)
),
)
@property
def voxel_size(self):
"""
Get the dimensions (numpy-flavored) of the array.
Will return (1, 1, 1) if a coordinate frame does not exist (as in cases
of pre-v2 bossphorus instances); this will not restrict indexing.
"""
# Set experiment if unset:
if self._exp is None:
self._populate_exp()
# Set cframe if unset:
if self._coord_frame is None:
self._populate_coord_frame()
if self.axis_order == AxisOrder.XYZ:
vox_size = (
self._coord_frame.x_voxel_size,
self._coord_frame.y_voxel_size,
self._coord_frame.z_voxel_size,
)
elif self.axis_order == AxisOrder.ZYX:
vox_size = (
self._coord_frame.z_voxel_size,
self._coord_frame.y_voxel_size,
self._coord_frame.x_voxel_size,
)
return vox_size
@property
def voxel_unit(self):
if self._coord_frame is None:
self._populate_coord_frame()
return self._coord_frame.voxel_unit
def _populate_exp(self):
"""
Populate the experiment component of this array.
Cache the results for later.
"""
self._exp = self.volume_provider.get_project(
ExperimentResource(self._channel.exp_name, self._channel.coll_name)
)
def _populate_coord_frame(self):
"""
Populate the array coordinate frame.
Cache the results for later.
"""
if self._exp is None:
self._populate_exp()
self._coord_frame = self.volume_provider.get_project(
CoordinateFrameResource(self._exp.coord_frame)
)
@property
def downsample_status(self):
"""
Return the downsample status of the underlying channel.
"""
return self._channel.downsample_status
@property
def available_resolutions(self):
"""
Return a list of available resolutions for this channel.
Arguments:
None
Returns:
List[int]: A list of resolutions at which this dataset can be downloaded
"""
self._populate_exp()
return list(range(dataset._exp.num_hierarchy_levels))
def __getitem__(self, key: Tuple) -> np.array:
"""
Get a subarray or subvolume.
Uses one of two indexing methods:
1. Start/Stop (`int:int`)
2. Single index (`int`)
Each element of the key can be one of those two options. For example,
myarray[1, 1:100, 2]
"""
# If the user has requested XYZ mode, the first thing to do is reverse
# the array indices. Then you can continue this fn without any
# additional changes.
if self.axis_order == AxisOrder.XYZ:
key = (key[2], key[1], key[0])
# Next, we need to get the shape of the dataset. We do this currently
# by getting the coordinate frame, which means that we need the
# coordframe data and experiment data if we don't have it already. In
# the future, we may also want to allow the user to specify general
# shape information so that we can avoid calling the API.
# Populate the experiment metadata if unset:
if self._exp is None:
self._populate_exp()
# Populate the coordinate frame metadata if not yet set:
if self._coord_frame is None:
self._populate_coord_frame()
# Now we can begin. There is a wide variety of indexing options
# available, including single-integer indexing, tuple-of-slices
# indexing, tuple-of-int indexing...
# First we'll address if the user presents a single integer.
# ```
# my_array[500]
# ```
# In this case, the user is asking for a single Z slice (or single X
# slice if in XYZ order... But that's a far less common use case.)
# We will get the full XY extents and download a single 2D array:
if isinstance(key, int):
# Get the full Z slice:
xs = (0, self.shape[2])
ys = (0, self.shape[1])
zs = (key, key + 1)
else:
# We also support indexing with units. For example, you can ask for
# ```
# my_array[0:10, 0:10, 0:10, "nanometers"]
# ```
# which will download as many pixels as are required in order to
# download 10nm in each dimension. We do this by storing a
# "normalized units" measure which is a rescale factor for each
# dimension (in the same order, e.g. ZYX, as the array).
_normalize_units = (1, 1, 1)
if isinstance(key[-1], str) and len(key) == 4:
if key[-1] != self._coord_frame.voxel_unit:
raise NotImplementedError(
"Can only reference voxels in native size format which is "
f"{self._coord_frame.voxel_unit} for this dataset."
)
_normalize_units = self.voxel_size
# We will now do the following codeblock three times, for X,Y,Z:
# First, we check to see if this index is a single integer. If so,
# the user is requesting a 2D array with zero depth along this
# dimension. For example, if the user asks for
# ```
# my_data[0:120, 0:120, 150]
# ```
# Then "150" suggests that the user just wants one single X slice.
if isinstance(key[2], int):
xs = (key[2], key[2] + 1)
else:
# If the key is a Slice, then it has .start and .stop attrs.
# (The user is requesting an array with more than one slice
# in this dimension.)
start = key[2].start if key[2].start else 0
stop = key[2].stop if key[2].stop else self.shape[0]
start = int(start / _normalize_units[0])
stop = int(stop / _normalize_units[0])
# Cast the coords to integers (since Boss needs int coords)
xs = (int(start), int(stop))
# Do the same thing again for the next dimension: Either a single
# integer, or a slice...
if isinstance(key[1], int):
ys = (key[1], key[1] + 1)
else:
start = key[1].start if key[1].start else 0
stop = key[1].stop if key[1].stop else self.shape[1]
start = start / _normalize_units[1]
stop = stop / _normalize_units[1]
ys = (int(start), int(stop))
# Do the same thing again for the last dimension: Either a single
# integer, or a slice...
if isinstance(key[0], int):
zs = (key[0], key[0] + 1)
else:
start = key[0].start if key[0].start else 0
stop = key[0].stop if key[0].stop else self.shape[2]
start = start / _normalize_units[2]
stop = stop / _normalize_units[2]
zs = (int(start), int(stop))
# Finally, we can perform the cutout itself, using the x, y, and z
# coordinates that we computed in the previous step.
cutout = self.volume_provider.get_cutout(
self._channel, self.resolution, xs, ys, zs
)
# Data are returned in ZYX order:
if self.axis_order == AxisOrder.XYZ:
data = np.rollaxis(np.rollaxis(cutout, 1), 2)
elif self.axis_order == AxisOrder.ZYX:
data = cutout
# If any of the dimensions are of length 1, it's because the user
# requested a single slice in their key; flatten the array in that
# dimension. For example, if you request `[10, 0:10, 0:10]` then the
# result should be 2D (no Z component).
_shape = data.shape
if _shape[0] == 1:
data = data[0, :, :]
if _shape[1] == 1:
data = data[:, 0, :]
if _shape[2] == 1:
data = data[:, :, 0]
return data
def __setitem__(self, key: Tuple, value: np.array) -> np.array:
"""
Set a subarray or subvolume.
Uses one of two indexing methods:
1. Start/Stop (`int:int`)
2. Single index (`int`)
Each element of the key can be one of those two options. For example,
myarray[1, 1:100, 2]
Start-only (`10:`) or stop-only (`:10`) indexing is unsupported.
"""
if self.axis_order == AxisOrder.XYZ:
key = (key[2], key[1], key[0])
# Set experiment if unset:
if self._exp is None:
self._populate_exp()
# Set cframe if unset:
if self._coord_frame is None:
self._populate_coord_frame()
_normalize_units = (1, 1, 1)
if isinstance(key[-1], str) and len(key) == 4:
if key[-1] != self._coord_frame.voxel_unit:
raise NotImplementedError(
"Can only reference voxels in native size format which is "
f"{self._coord_frame.voxel_unit} for this dataset."
)
_normalize_units = self.voxel_size
if isinstance(key[2], int):
xs = (key[2], key[2] + 1)
else:
start = key[2].start if key[2].start else 0
stop = key[2].stop if key[2].stop else self.shape[0]
start = start / _normalize_units[0]
stop = stop / _normalize_units[0]
xs = (int(start), int(stop))
if isinstance(key[1], int):
ys = (key[1], key[1] + 1)
else:
start = key[1].start if key[1].start else 0
stop = key[1].stop if key[1].stop else self.shape[1]
start = start / _normalize_units[1]
stop = stop / _normalize_units[1]
ys = (int(start), int(stop))
if isinstance(key[0], int):
zs = (key[0], key[0] + 1)
else:
start = key[0].start if key[0].start else 0
stop = key[0].stop if key[0].stop else self.shape[2]
start = start / _normalize_units[2]
stop = stop / _normalize_units[2]
zs = (int(start), int(stop))
if len(value.shape) == 2:
# TODO: Support other 2D shapes as well
value = np.array([value])
cutout = self.volume_provider.create_cutout(
self._channel, self.resolution, xs, ys, zs, value
)
def arrays_from_neuroglancer(url: str):
"""
Construct array(s) from a neuroglancer link.
Arguments:
url (str): The neuroglancer link to parse
Returns:
Dict[str, array]: A dictionary of arrays, where each is keyed by
the name of the channel in neuroglancer.
"""
ngl_state = json.loads(unquote(url).split("#!")[1])
arrays = {}
for source in ngl_state["layers"]:
source_url = ""
if "boss://" in source["source"]:
source_url = source["source"]
elif (
isinstance(source["source"], dict) and "boss://" in source["source"]["url"]
):
source_url = source["source"]["url"]
else:
continue
remote, channel = parse_fquri(source_url)
arrays[source["name"]] = array(
channel=channel, volume_provider=_InternVolumeProvider(remote)
)
return arrays
def volumes_from_neuroglancer(
url: str, radius_zyx: Tuple[int, int, int] = (10, 1024, 1024)
):
"""
Download numpy arrays from BossDB based upon a neuroglancer URL.
Arguments:
url (str): The neuroglancer link to parse
radius_zyx (Tuple[int, int, int]): The amount of data along each axis
to download, centered at the position from the URL.
Returns:
Dict[str, np.ndarray]: A dictionary of np.arrays, where each is keyed
by the name of the channel in neuroglancer.
"""
ngl_state = json.loads(unquote(url).split("#!")[1])
x, y, z = ngl_state["position"]
zr, yr, xr = radius_zyx
arrays = arrays_from_neuroglancer(url)
return {
key: dataset[z - zr : z + zr, y - yr : y + yr, x - xr : x + xr]
for key, dataset in arrays.items()
}
``` |
{
"source": "jhuapl-boss/ndingest",
"score": 2
} |
#### File: ndingest/nddynamo/tileindexdb.py
```python
from __future__ import print_function
from __future__ import absolute_import
from ndingest.settings.settings import Settings
settings = Settings.load()
import botocore
import boto3
from boto3.dynamodb.conditions import Key, Attr
from operator import div
from ndctypelib import XYZMorton
from ndingest.util.util import Util
UtilClass = Util.load()
class TileIndexDB:
def __init__(
self, project_name, region_name=settings.REGION_NAME, endpoint_url=None
):
# creating the resource
table_name = TileIndexDB.getTableName()
dynamo = boto3.resource(
"dynamodb",
region_name=region_name,
endpoint_url=endpoint_url,
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
self.table = dynamo.Table(table_name)
self.project_name = project_name
@staticmethod
def createTable(region_name=settings.REGION_NAME, endpoint_url=None):
"""Create the ingest database in dynamodb"""
# creating the resource
table_name = TileIndexDB.getTableName()
dynamo = boto3.resource(
"dynamodb",
region_name=region_name,
endpoint_url=endpoint_url,
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
try:
table = dynamo.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "supercuboid_key", "KeyType": "HASH"}],
AttributeDefinitions=[
{"AttributeName": "supercuboid_key", "AttributeType": "S"},
{"AttributeName": "task_id", "AttributeType": "N"},
],
GlobalSecondaryIndexes=[
{
"IndexName": "task_index",
"KeySchema": [{"AttributeName": "task_id", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10,
},
},
],
ProvisionedThroughput={
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10,
},
)
except Exception as e:
print(e)
raise
@staticmethod
def deleteTable(region_name=settings.REGION_NAME, endpoint_url=None):
"""Delete the ingest database in dynamodb"""
# creating the resource
table_name = TileIndexDB.getTableName()
dynamo = boto3.resource(
"dynamodb",
region_name=region_name,
endpoint_url=endpoint_url,
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
try:
table = dynamo.Table(table_name)
table.delete()
except Exception as e:
print(e)
raise
@staticmethod
def getTableName():
"""Return table name"""
return settings.DYNAMO_TILEINDEX_TABLE
def generatePrimaryKey(
self, channel_name, resolution, x_index, y_index, z_index, t_index=0
):
"""Generate key for each supercuboid"""
morton_index = XYZMorton(
map(div, [x_index, y_index, z_index], settings.SUPER_CUBOID_SIZE)
)
return UtilClass.generateCuboidKey(
self.project_name, channel_name, resolution, morton_index, t_index
)
def supercuboidReady(self, z_index, zindex_list):
"""Verify if we have all tiles for a given supercuboid"""
return zindex_list == set(
range(
z_index / settings.SUPER_CUBOID_SIZE[2],
settings.SUPER_CUBOID_SIZE[2],
1,
)
)
def putItem(
self, channel_name, resolution, x_index, y_index, z_index, t_index=0, task_id=0
):
"""Updating item for a give slice number"""
supercuboid_key = self.generatePrimaryKey(
channel_name, resolution, x_index, y_index, z_index, t_index
)
try:
response = self.table.update_item(
Key={"supercuboid_key": supercuboid_key},
UpdateExpression="ADD zindex_list :z_index SET task_id = :task_id",
ExpressionAttributeValues={
":z_index": set([z_index]),
":task_id": task_id,
},
ReturnValues="ALL_NEW",
)
return supercuboid_key, self.supercuboidReady(
z_index, response["Attributes"]["zindex_list"]
)
except botocore.exceptions.ClientError as e:
print(e)
raise
def getItem(self, supercuboid_key):
"""Get the item from the ingest table"""
try:
response = self.table.get_item(
Key={"supercuboid_key": supercuboid_key},
ConsistentRead=True,
ReturnConsumedCapacity="INDEXES",
)
# TODO write a yield function to pop one item at a time
return response["Item"] if "Item" in response else None
except Exception as e:
print(e)
raise
def getTaskItems(self, task_id):
"""Get all the items for a given task from the ingest table"""
try:
response = self.table.query(
IndexName="task_index",
KeyConditionExpression="task_id = :task_id",
ExpressionAttributeValues={":task_id": task_id},
)
for item in response["Items"]:
yield item
except Exception as e:
print(e)
raise
def deleteItem(self, supercuboid_key):
"""Delete item from database"""
try:
response = self.table.delete_item(Key={"supercuboid_key": supercuboid_key})
return response
except botocore.exceptions.ClientError as e:
print(e)
raise
```
#### File: ndingest/ndqueue/cleanupqueue.py
```python
from __future__ import absolute_import
from __future__ import print_function
from ndingest.settings.settings import Settings
settings = Settings.load()
import json
import boto3
import botocore
from ndingest.ndqueue.ndqueue import NDQueue
import random
class CleanupQueue(NDQueue):
# Static variable to hold random number added to test queue names.
test_queue_id = -1
def __init__(self, nd_proj, region_name=settings.REGION_NAME, endpoint_url=None):
"""Create resources for the queue"""
self.queue_name = CleanupQueue.generateQueueName(nd_proj)
return super(CleanupQueue, self).__init__(
self.queue_name, region_name, endpoint_url
)
@staticmethod
def generateNeurodataQueueName(nd_proj):
return "&".join(nd_proj.generateProjectInfo() + ["CLEANUP"]).replace("&", "-")
@staticmethod
def generateBossQueueName(nd_proj):
if not settings.TEST_MODE and not NDQueue.test_mode:
return "{}-delete-{}".format(settings.DOMAIN, nd_proj.job_id)
if CleanupQueue.test_queue_id == -1:
CleanupQueue.test_queue_id = random.randint(0, 999)
return "test{}-{}-delete-{}".format(
CleanupQueue.test_queue_id, settings.DOMAIN, nd_proj.job_id
)
@staticmethod
def createQueue(nd_proj, region_name=settings.REGION_NAME, endpoint_url=None):
"""Create the cleanup queue"""
# creating the resource
queue_name = CleanupQueue.generateQueueName(nd_proj)
sqs = boto3.resource(
"sqs",
region_name=region_name,
endpoint_url=endpoint_url,
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
try:
# creating the queue, if the queue already exists catch exception
response = sqs.create_queue(
QueueName=queue_name,
Attributes={
"DelaySeconds": "0",
"MaximumMessageSize": "262144",
"MessageRetentionPeriod": "1209600", # 14 days.
},
)
return queue_name
except Exception as e:
print(e)
raise
@staticmethod
def deleteQueue(
nd_proj,
region_name=settings.REGION_NAME,
endpoint_url=None,
delete_deadletter_queue=False,
):
"""Delete the cleanup queue.
Also delete the dead letter queue if delete_deadletter_queue is true.
Args:
nd_proj (IngestProj): Project settings used to generate queue's name.
region_name (optional[string]): AWS region queue lives in. Extracted from settings.ini if not provided.
endpoint_url (optional[string]): Provide if using a mock or fake Boto3 service.
delete_deadletter_queue (optional[bool]): Also delete the dead letter queue. Defaults to False.
"""
# creating the resource
queue_name = CleanupQueue.generateQueueName(nd_proj)
NDQueue.deleteQueueByName(
queue_name, region_name, endpoint_url, delete_deadletter_queue
)
@staticmethod
def generateQueueName(nd_proj):
"""Generate the queue name based on project information"""
return CleanupQueue.getNameGenerator()(nd_proj)
def sendMessage(self, tile_info):
"""Send a message to upload queue"""
return super(CleanupQueue, self).sendMessage(json.dumps(tile_info))
def sendBatchMessages(self, tile_infos, delay_seconds=0):
"""Send up to 10 messages at once to the cleanup queue.
Returned dict contains two keys: 'Successful' and 'Failed'. Each key is
an array dicts with the common key: 'Id'. The value associated with 'Id'
is the index into the original list of messages passed in. Use this to
determine which messages were successfully enqueued vs failed.
Args:
tile_infos (list[dict]): List of up to 10 message bodies (dicts).
delay_seconds (optional[int]): Optional delay for processing of messages.
Returns:
(dict): Contains keys 'Successful' and 'Failed'.
"""
jsonized = []
for info in tile_infos:
jsonized.append(json.dumps(info))
return super(CleanupQueue, self).sendBatchMessages(jsonized, delay_seconds)
def receiveMessage(self, number_of_messages=1):
"""Receive a message from the cleanup queue"""
message_list = super(CleanupQueue, self).receiveMessage(
number_of_messages=number_of_messages
)
if message_list is None:
raise StopIteration
else:
for message in message_list:
yield message.message_id, message.receipt_handle, json.loads(
message.body
)
def deleteMessage(self, message_id, receipt_handle, number_of_messages=1):
"""Delete a message from the cleanup queue"""
return super(CleanupQueue, self).deleteMessage(
message_id, receipt_handle, number_of_messages=number_of_messages
)
```
#### File: ndingest/ndqueue/ndingest.py
```python
import re
import urllib2
import json
import requests
import jsonschema
import django
django.setup()
from django.conf import settings
import ndproj
from ndwsingest import IngestData
from ndschema import PROJECT_SCHEMA, DATASET_SCHEMA, CHANNEL_SCHEMA
from ndtype import READONLY_FALSE, REDIS, S3_TRUE
from nduser.models import Project
from nduser.models import Dataset
from nduser.models import Token
from nduser.models import Channel
from nduser.models import User
from ndwserror import NDWSError
import logging
logger = logging.getLogger("neurodata")
def NDIngest(webargs, post_data):
"""Create a project using a JSON file"""
# setting state values for error handling
TOKEN_CREATED = False
PROJECT_CREATED = False
CHANNEL_CREATED = False
DATASET_CREATED = False
nd_dict = json.loads(post_data)
try:
dataset_dict = nd_dict["dataset"]
project_dict = nd_dict["project"]
channels = nd_dict["channels"]
metadata_dict = nd_dict["metadata"]
except Exception as e:
logger.error("Missing requred fields of dataset,project,channels,metadata.")
return json.dumps(
"Missing required fields of dataset,project,channels,metadata. Please check if one of them is not missing."
)
# try:
# DATASET_SCHEMA.validate(dataset_dict)
# except Exception as e:
# logger.error("Invalid Dataset schema")
# return json.dumps("Invalid Dataset schema")
# try:
# PROJECT_SCHEMA.validate(project_dict)
# except Exception as e:
# logger.error("Invalid Project schema")
# return json.dumps("Invalid Project schema")
# try:
# import pdb; pdb.set_trace()
# CHANNEL_SCHEMA.validate(channels)
# except Exception as e:
# print("Invalid Channel schema")
# return json.dumps("Invalid Channel schema")
ds = extractDatasetDict(dataset_dict)
pr, tk = extractProjectDict(project_dict)
pr.host = "localhost"
# pr.kvengine = REDIS
pr.s3backend = S3_TRUE
if pr.project_name in ["unittest", "unittest2"]:
pr.host = "localhost"
ch_list = []
for channel_name, value in channels.iteritems():
channel_dict = channels[channel_name]
ch_list.append(extractChannelDict(channel_dict))
try:
# Setting the user_ids to brain for now
ds.user_id = 1
pr.user_id = 1
tk.user_id = 1
# Checking if the posted dataset already exists
# Setting the foreign key for dataset
if Dataset.objects.filter(dataset_name=ds.dataset_name).exists():
stored_ds = Dataset.objects.get(dataset_name=ds.dataset_name)
if compareModelObjects(stored_ds, ds):
pr.dataset_id = stored_ds.dataset_name
else:
logger.error(
"Dataset {} already exists and is different then the chosen dataset".format(
ds.dataset_name
)
)
return json.dumps(
"Dataset {} already exists and is different then the chosen dataset. Please choose a different dataset name".format(
ds.dataset_name
)
)
else:
ds.save()
DATASET_CREATED = True
pr.dataset_id = ds.dataset_name
# Checking if the posted project already exists
# Setting the foreign key for project
if Project.objects.filter(project_name=pr.project_name).exists():
stored_pr = Project.objects.get(project_name=pr.project_name)
# Checking if the existing project is same as the posted one
if compareModelObjects(stored_pr, pr):
if Token.objects.filter(token_name=tk.token_name).exists():
stored_tk = Token.objects.get(token_name=tk.token_name)
tk.project_id = stored_pr.project_name
# Checking if the existing token is same as the posted one
if compareModelObjects(stored_tk, tk):
pass
else:
if DATASET_CREATED:
ds.delete()
logger.error("Token {} already exists.".format(tk.token_name))
return json.dumps(
"Token {} already exists. Please choose a different token name.".format(
tk.token_name
)
)
else:
tk.project_id = stored_pr.project_name
tk.save()
TOKEN_CREATED = True
else:
if DATASET_CREATED:
ds.delete()
if TOKEN_CREATED:
tk.delete()
logger.error("Project {} already exists.".format(pr.project_name))
return json.dumps(
"Project {} already exists. Please choose a different project name".format(
pr.project_name
)
)
else:
pr.save()
try:
pd = ndproj.NDProjectsDB.getProjDB(pr)
pd.newNDProject()
PROJECT_CREATED = True
except Exception as e:
if TOKEN_CREATED:
tk.delete()
if PROJECT_CREATED:
pr.delete()
if DATASET_CREATED:
ds.delete()
logger.error(
"There was an error in creating the project {} database".format(
pr.project_name
)
)
return json.dumps(
"There was an error in creating the project {} database".format(
pr.project_name
)
)
tk.project_id = pr.project_name
tk.save()
TOKEN_CREATED = True
channel_object_list = []
# Iterating over channel list to store channels
for (ch, data_url, file_format, file_type) in ch_list:
ch.project_id = pr.project_name
ch.user_id = 1
# Checking if the channel already exists or not
if not Channel.objects.filter(
channel_name=ch.channel_name, project=pr.project_name
).exists():
ch.save()
# Maintain a list of channel objects created during this iteration and delete all even if one fails
channel_object_list.append(ch)
try:
pd = ndproj.NDProjectsDB.getProjDB(pr)
pd.newNDChannel(ch.channel_name)
CHANNEL_CREATED = True
except Exception as e:
if TOKEN_CREATED:
tk.delete()
if CHANNEL_CREATED:
for ch_obj in channel_object_list:
ch_obj.delete()
if PROJECT_CREATED:
pr.delete()
if DATASET_CREATED:
ds.delete()
logger.error(
"There was an error creating in the channel {} table".format(
ch.channel_name
)
)
return json.dumps(
"There was an error in creating the channel {} table.".format(
ch.channel_name
)
)
else:
logger.error("Channel {} already exists.".format(ch.channel_name))
return json.dumps(
"Channel {} already exists. Please choose a different channel name.".format(
ch.channel_name
)
)
# checking if the posted data_url has a trialing slash or not. This becomes an issue in auto-ingest
if data_url.endswith("/"):
# removing the trailing slash if there exists one
data_url = data_url[:-1]
# call SQS here not celery
# KL TODO
# from spdb.tasks import ingest
# ingest(tk.token_name, ch.channel_name, ch.resolution, data_url, file_format, file_type)
# ingest.delay(tk.token_name, ch.channel_name, ch.resolution, data_url, file_format, file_type)
# Posting to LIMS system
postMetadataDict(metadata_dict, pr.project_name)
except Exception as e:
# KL TODO Delete data from the LIMS systems
try:
pd
except NameError:
if pr is not None:
pd = ndproj.NDProjectsDB.getProjDB(pr.project_name)
if PROJECT_CREATED:
pd.deleteNDProject()
logger.error(
"Error saving models. There was an error in the information posted"
)
return json.dumps("FAILED. There was an error in the information you posted.")
return json.dumps("SUCCESS. The ingest process has now started.")
def createChannel(webargs, post_data):
"""Create a list of channels using a JSON file"""
# Get the token and load the project
try:
m = re.match("(\w+)/createChannel/$", webargs)
token_name = m.group(1)
except Exception as e:
print("Error in URL format")
raise
nd_dict = json.loads(post_data)
try:
channels = nd_dict["channels"]
except Exception as e:
print("Missing requred fields")
return json.dumps("Missing channels field. Ensure that Channel field exists")
tk = Token.objects.get(token_name=token_name)
ur = User.objects.get(id=tk.user_id)
pr = Project.objects.get(project_name=tk.project_id)
ch_list = []
for channel_name, value in channels.iteritems():
channel_dict = channels[channel_name]
ch_list.append(extractChannelDict(channel_dict, channel_only=True))
try:
# First iterating over the channel list to check if all the channels don't exist
for ch in ch_list:
if Channel.objects.filter(
channel_name=ch.channel_name, project=pr.project_name
).exists():
print("Channel already exists")
return json.dumps(
"Channel {} already exists for this project. Specify a different channel name".format(
ch.channel_name
)
)
# Iterating over channel list to store channels
for ch in ch_list:
ch.project_id = pr.project_name
# Setting the user_ids based on token user_id
ch.user_id = tk.user_id
ch.save()
# Create channel database using the ndproj interface
pd = ndproj.NDProjectsDB.getProjDB(pr.project_name)
pd.newNDChannel(ch.channel_name)
except Exception as e:
print("Error saving models")
# return the JSON file with failed
return json.dumps("Error saving models")
# return the JSON file with success
return json.dumps("SUCCESS. The information in the channel was correct.")
def deleteChannel(webargs, post_data):
"""Delete a list of channels using a JSON file"""
# Get the token and load the project
try:
m = re.match("(\w+)/deleteChannel/$", webargs)
token_name = m.group(1)
except Exception as e:
print("Error in URL format")
raise
nd_dict = json.loads(post_data)
try:
channels = nd_dict["channels"]
except Exception as e:
print("Missing requred fields")
raise
tk = Token.objects.get(token_name=token_name)
ur = User.objects.get(id=tk.user_id)
pr = Project.objects.get(project_name=tk.project_id)
try:
# Iterating over channel list to store channels
for channel_name in channels:
# Checking if the channel already exists or not
if Channel.objects.get(channel_name=channel_name, project=pr.project_name):
ch = Channel.objects.get(
channel_name=channel_name, project=pr.project_name
)
# Checking if channel is readonly or not
if ch.readonly == READONLY_FALSE:
# delete channel table using the ndproj interface
pd = ndproj.NDProjectsDB().getProjDB(pr.project_name)
pd.deleteNDChannel(ch.channel_name)
ch.delete()
return_json = "SUCCESS"
except Exception as e:
print("Error saving models")
return_json = "FAILED"
return json.dumps(return_json)
def postMetadataDict(metadata_dict, project_name):
"""Post metdata to the LIMS system"""
try:
url = "http://{}/metadata/ocp/set/{}/".format(
settings.LIMS_SERVER, project_name
)
req = urllib2.Request(url, json.dumps(metadata_dict))
req.add_header("Content-Type", "application/json")
response = urllib2.urlopen(req)
except urllib2.URLError as e:
print("Failed URL {}".format(url))
pass
def extractDatasetDict(ds_dict):
"""Generate a dataset object from the JSON flle"""
ds = Dataset()
try:
ds.dataset_name = ds_dict["dataset_name"]
imagesize = [ds.ximagesize, ds.yimagesize, ds.zimagesize] = ds_dict["imagesize"]
[ds.xvoxelres, ds.yvoxelres, ds.zvoxelres] = ds_dict["voxelres"]
except Exception as e:
print("Missing required fields")
raise
if "offset" in ds_dict:
[ds.xoffset, ds.yoffset, ds.zoffset] = ds_dict["offset"]
if "timerange" in ds_dict:
[ds.starttime, ds.endtime] = ds_dict["timerange"]
if "scaling" in ds_dict:
ds.scalingoption = ds_dict["scaling"]
if "scalinglevels" in ds_dict:
ds.scalinglevels = ds_dict["scalinglevels"]
else:
ds.scalinglevels = computeScalingLevels(imagesize)
return ds
def computeScalingLevels(imagesize):
"""Dynamically decide the scaling levels"""
ximagesz, yimagesz, zimagesz = imagesize
scalinglevels = 0
# When both x and y dimensions are below 1000 or one is below 100 then stop
while (ximagesz > 1000 or yimagesz > 1000) and ximagesz > 500 and yimagesz > 500:
ximagesz = ximagesz // 2
yimagesz = yimagesz // 2
scalinglevels += 1
return scalinglevels
def extractProjectDict(pr_dict):
"""Generate a project object from the JSON flle"""
pr = Project()
tk = Token()
try:
pr.project_name = pr_dict["project_name"]
except Exception as e:
print("Missing required fields")
raise
if "token_name" in pr_dict:
tk.token_name = pr_dict["token_name"]
else:
tk.token_name = pr_dict["project_name"]
if "public" in pr_dict:
tk.public = pr_dict["public"]
return pr, tk
def extractChannelDict(ch_dict, channel_only=False):
"""Generate a channel object from the JSON flle"""
ch = Channel()
try:
ch.channel_name = ch_dict["channel_name"]
ch.channel_datatype = ch_dict["datatype"]
ch.channel_type = ch_dict["channel_type"]
if not channel_only:
data_url = ch_dict["data_url"]
file_format = ch_dict["file_format"]
file_type = ch_dict["file_type"]
except Exception as e:
print("Missing requried fields")
raise
if "exceptions" in ch_dict:
ch.exceptions = ch_dict["exceptions"]
if "resolution" in ch_dict:
ch.resolution = ch_dict["resolution"]
if "windowrange" in ch_dict:
ch.startwindow, ch.endwindow = ch_dict["windowrange"]
if "readonly" in ch_dict:
ch.readonly = ch_dict["readonly"]
if not channel_only:
return (ch, data_url, file_format, file_type)
else:
return ch
def createJson(dataset, project, channel_list, metadata={}, channel_only=False):
"""Genarate ND json object"""
nd_dict = {}
nd_dict["channels"] = {}
if not channel_only:
nd_dict["dataset"] = createDatasetDict(*dataset)
nd_dict["project"] = createProjectDict(*project)
nd_dict["metadata"] = metadata
for channel_name, value in channel_list.iteritems():
value = value + (channel_only,)
nd_dict["channels"][channel_name] = createChannelDict(*value)
return json.dumps(nd_dict, sort_keys=True, indent=4)
def postMetadataDict(metadata_dict, project_name):
"""Post metdata to the LIMS system"""
try:
url = "http://{}/lims/{}/".format(settings.LIMS_SERVER, project_name)
req = urllib2.Request(url, json.dumps(metadata_dict))
req.add_header("Content-Type", "application/json")
response = urllib2.urlopen(req)
except urllib2.URLError as e:
print("Failed URL {}".format(url))
pass
def extractDatasetDict(ds_dict):
"""Generate a dataset object from the JSON flle"""
ds = Dataset()
try:
ds.dataset_name = ds_dict["dataset_name"]
imagesize = [ds.ximagesize, ds.yimagesize, ds.zimagesize] = ds_dict["imagesize"]
[ds.xvoxelres, ds.yvoxelres, ds.zvoxelres] = ds_dict["voxelres"]
except Exception as e:
print("Missing required fields")
raise
if "offset" in ds_dict:
[ds.xoffset, ds.yoffset, ds.zoffset] = ds_dict["offset"]
if "timerange" in ds_dict:
[ds.starttime, ds.endtime] = ds_dict["timerange"]
if "scaling" in ds_dict:
ds.scalingoption = ds_dict["scaling"]
if "scalinglevels" in ds_dict:
ds.scalinglevels = ds_dict["scalinglevels"]
else:
ds.scalinglevels = computeScalingLevels(imagesize)
return ds
def computeScalingLevels(imagesize):
"""Dynamically decide the scaling levels"""
ximagesz, yimagesz, zimagesz = imagesize
scalinglevels = 0
# When both x and y dimensions are below 1000 or one is below 100 then stop
while (ximagesz > 1000 or yimagesz > 1000) and ximagesz > 500 and yimagesz > 500:
ximagesz = ximagesz // 2
yimagesz = yimagesz // 2
scalinglevels += 1
return scalinglevels
def createJson(dataset, project, channel_list, metadata={}, channel_only=False):
"""Genarate ND json object"""
nd_dict = {}
nd_dict["channels"] = {}
if not channel_only:
nd_dict["dataset"] = createDatasetDict(*dataset)
nd_dict["project"] = createProjectDict(*project)
nd_dict["metadata"] = metadata
for channel_name, value in channel_list.iteritems():
nd_dict["channels"][channel_name] = createChannelDict(*value)
return json.dumps(nd_dict, sort_keys=True, indent=4)
def createDatasetDict(
dataset_name,
imagesize,
voxelres,
offset=[0, 0, 0],
timerange=[0, 0],
scalinglevels=0,
scaling=0,
):
"""Generate the dataset dictionary"""
# dataset format = (dataset_name, [ximagesz, yimagesz, zimagesz], [[xvoxel, yvoxel, zvoxel], [xoffset, yoffset, zoffset], timerange scalinglevels, scaling)
dataset_dict = {}
dataset_dict["dataset_name"] = dataset_name
dataset_dict["imagesize"] = imagesize
dataset_dict["voxelres"] = voxelres
if offset is not None:
dataset_dict["offset"] = offset
if timerange is not None:
dataset_dict["timerange"] = timerange
if scalinglevels is not None:
dataset_dict["scalinglevels"] = scalinglevels
if scaling is not None:
dataset_dict["scaling"] = scaling
return dataset_dict
def createChannelDict(
channel_name,
datatype,
channel_type,
data_url,
file_format,
file_type,
exceptions=0,
resolution=0,
windowrange=[0, 0],
readonly=0,
channel_only=False,
):
"""Genearte the project dictionary"""
# channel format = (channel_name, datatype, channel_type, data_url, file_type, file_format, exceptions, resolution, windowrange, readonly)
channel_dict = {}
channel_dict["channel_name"] = channel_name
channel_dict["datatype"] = datatype
channel_dict["channel_type"] = channel_type
if exceptions is not None:
channel_dict["exceptions"] = exceptions
if resolution is not None:
channel_dict["resolution"] = resolution
if windowrange is not None:
channel_dict["windowrange"] = windowrange
if readonly is not None:
channel_dict["readonly"] = readonly
if not channel_only:
channel_dict["data_url"] = data_url
channel_dict["file_format"] = file_format
channel_dict["file_type"] = file_type
return channel_dict
def createProjectDict(project_name, token_name="", public=0):
"""Genarate the project dictionary"""
# project format = (project_name, token_name, public)
project_dict = {}
project_dict["project_name"] = project_name
if token_name is not None:
project_dict["token_name"] = project_name if token_name == "" else token_name
if public is not None:
project_dict["public"] = public
return project_dict
def compareModelObjects(obj1, obj2, excluded_keys=["_state"]):
"""Compare two model objects"""
for key, value in obj1.__dict__.items():
if key in excluded_keys:
continue
if obj2.__dict__[key] == value:
pass
else:
return False
return True
```
#### File: ndingest/ndqueue/ndmessage.py
```python
import boto3
from serializer import Serializer
class NDMessage(object):
def __init__(self):
return NotImplemented
def encode(self):
return NotImplemented
def decode(self):
return NotImplemented
```
#### File: ndingest/ndsns/ndsns.py
```python
from __future__ import print_function
from __future__ import absolute_import
from ndingest.settings.settings import Settings
settings = Settings.load()
import boto3
import botocore
class NDSns:
def __init__(self, topic_arn, region_name=settings.REGION_NAME, endpoint_url=None):
"""Initialize a topic"""
# topic_name = NDSns.getTopicName(nd_proj)
sns = boto3.resource("sns", region_name=region_name, endpoint_url=endpoint_url)
# for test in sns.topics.all():
# print sns
try:
self.topic = sns.Topic(topic_arn)
except Exception as e:
print(e)
raise
@staticmethod
def createTopic(nd_proj, region_name=settings.REGION_NAME, endpoint_url=None):
"""Create a topic"""
topic_name = NDSns.getTopicName(nd_proj)
sns = boto3.resource("sns", region_name=region_name, endpoint_url=endpoint_url)
try:
topic = sns.create_topic(Name=topic_name)
return topic.arn
except Exception as e:
print(e)
raise
@staticmethod
def deleteTopic(topic_arn, region_name=settings.REGION_NAME, endpoint_url=None):
"""Delete the topic"""
# topic_name = NDSns.getTopicName(nd_proj)
sns = boto3.resource("sns", region_name=region_name, endpoint_url=endpoint_url)
try:
topic = sns.Topic(topic_arn)
topic.delete()
except Exception as e:
print(e)
raise
@staticmethod
def getTopicName(nd_proj):
"""Generate the topic name based on project information"""
# does not line &
return "-".join(nd_proj.generateProjectInfo())
def publish(self, target_arn, message):
"""Publish a message"""
try:
response = self.topic.publish(TargetArn=target_arn, Message=message)
return response
except Exception as e:
print(e)
raise
def subscribe(self, lambda_arn):
"""Subscribe to a topic"""
try:
subscription = self.topic.subscribe(Protocol="lambda", Endpoint=lambda_arn)
except Exception as e:
print(e)
raise
```
#### File: ndingest/settings/bosssettings.py
```python
from .settings import Settings
try:
from ConfigParser import Error
except:
from configparser import Error
import os
class BossSettings(Settings):
"""Global settings for the Boss version of ndingest.
Attributes:
_domain (string): Domain name that ndingest is running in. Periods will be replaced by dashes for AWS naming compatibility (for queues, etc). Lazily populated.
_test_mode (bool): True if the environment variable NDINGEST_TEST set.
"""
def __init__(self, file_name, fp=None):
super(BossSettings, self).__init__(file_name, fp)
self._domain = None
self._test_mode = "NDINGEST_TEST" in os.environ
def setPath(self):
"""Add path to other libraries"""
return NotImplemented
@property
def PROJECT_NAME(self):
return "Boss"
@property
def REGION_NAME(self):
"""Defaults to us-east-1."""
try:
return self.parser.get("aws", "region")
except Error:
return "us-east-1"
@property
def AWS_ACCESS_KEY_ID(self):
"""Defaults to None. Normally only set this for testing."""
try:
return self.parser.get("aws", "access_key_id")
except Error:
return None
@property
def AWS_SECRET_ACCESS_KEY(self):
"""Defaults to None. Normally only set this for testing."""
try:
return self.parser.get("aws", "secret_key")
except Error:
return None
@property
def S3_CUBOID_BUCKET(self):
if self._test_mode:
return "test-{}".format(self.parser.get("aws", "cuboid_bucket"))
return self.parser.get("aws", "cuboid_bucket")
@property
def S3_TILE_BUCKET(self):
if self._test_mode:
return "test-{}".format(self.parser.get("aws", "tile_bucket"))
return self.parser.get("aws", "tile_bucket")
@property
def DYNAMO_CUBOIDINDEX_TABLE(self):
if self._test_mode:
return "test_{}".format(self.parser.get("aws", "cuboid_index_table"))
return self.parser.get("aws", "cuboid_index_table")
@property
def DYNAMO_TILEINDEX_TABLE(self):
if self._test_mode:
return "test_{}".format(self.parser.get("aws", "tile_index_table"))
return self.parser.get("aws", "tile_index_table")
@property
def MAX_TASK_ID_SUFFIX(self):
return int(self.parser.get("aws", "max_task_id_suffix"))
@property
def UPLOAD_TASK_QUEUE(self):
return self.parser.get("aws", "upload_task_queue_url")
@property
def UPLOAD_TASK_DEADLETTER_QUEUE(self):
return self.parser.get("aws", "upload_task_deadletter_queue_url")
@property
def INGEST_QUEUE(self):
return self.parser.get("aws", "ingest_queue_url")
@property
def INGEST_DEADLETTER_QUEUE(self):
return self.parser.get("aws", "ingest_deadletter_queue_url")
@property
def SUPER_CUBOID_SIZE(self):
return [int(i) for i in self.parser.get("spdb", "SUPER_CUBOID_SIZE").split(",")]
@property
def UPLOAD_TASK_QUEUE(self):
return self.parser.get("aws", "upload_task_queue_url")
@property
def UPLOAD_TASK_DEADLETTER_QUEUE(self):
return self.parser.get("aws", "upload_task_deadletter_queue_url")
@property
def IAM_POLICY_PATH(self):
"""Path to use when creating an IAM policy.
Must return '/' or a string beginning and ending with '/'.
"""
return "/ingest/"
@property
def DYNAMO_TEST_ENDPOINT(self):
"""URL of local DynamoDB instance for testing."""
try:
return self.parser.get("testing", "dynamo_endpoint")
except Error:
return None
@property
def DYNAMO_ENDPOINT(self):
"""Alias to match what Neurodata uses in case of developer confusion."""
return self.DYNAMO_TEST_ENDPOINT
@property
def DOMAIN(self):
"""Domain ndingest library is running in.
For example: 'api.boss.io'
In the above example, 'api-boss-io' will be returned for
compatibility with AWS naming restrictions.
Returns:
(string): Periods will be replaced with dashes for compatibility with AWS naming restrictions.
"""
if self._domain is None:
self._domain = self.parser.get("boss", "domain").replace(".", "-")
return self._domain
@property
def TEST_MODE(self):
"""If true, then ndingest tests are running. This will affect the names of queues.
Returns:
(bool)
"""
return self._test_mode
```
#### File: ndingest/settings/ndsettings.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .settings import Settings
import os
import sys
class NDSettings(Settings):
def __init__(self, file_name):
super(NDSettings, self).__init__(file_name)
self.setPath()
def setPath(self):
"""Add path to other libraries"""
# sys.path.append('..')
BASE_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__), self.parser.get("path", "BASE_PATH")
)
)
NDLIB_PATH = os.path.join(BASE_PATH, self.parser.get("path", "NDLIB_PATH"))
SPDB_PATH = os.path.join(BASE_PATH, self.parser.get("path", "SPDB_PATH"))
sys.path += [NDLIB_PATH, SPDB_PATH]
@property
def PROJECT_NAME(self):
return self.parser.get("proj", "PROJECT_NAME")
@property
def REGION_NAME(self):
return self.parser.get("aws", "REGION_NAME")
@property
def AWS_ACCESS_KEY_ID(self):
return self.parser.get("aws", "AWS_ACCESS_KEY_ID")
@property
def AWS_SECRET_ACCESS_KEY(self):
return self.parser.get("aws", "AWS_SECRET_ACCESS_KEY")
@property
def S3_CUBOID_BUCKET(self):
return self.parser.get("s3", "S3_CUBOID_BUCKET")
@property
def S3_TILE_BUCKET(self):
return self.parser.get("s3", "S3_TILE_BUCKET")
@property
def DYNAMO_CUBOIDINDEX_TABLE(self):
return self.parser.get("dynamo", "DYNAMO_CUBOIDINDEX_TABLE")
@property
def DYNAMO_TILEINDEX_TABLE(self):
return self.parser.get("dynamo", "DYNAMO_TILEINDEX_TABLE")
@property
def SUPER_CUBOID_SIZE(self):
return [
int(i) for i in self.parser.get("cuboid", "SUPER_CUBOID_SIZE").split(",")
]
@property
def DEV_MODE(self):
return self.parser.getboolean("proj", "DEV_MODE")
@property
def S3_ENDPOINT(self):
if self.DEV_MODE:
return self.parser.get("s3", "S3_DEV_ENDPOINT")
else:
return None
@property
def DYNAMO_ENDPOINT(self):
if self.DEV_MODE:
return self.parser.get("dynamo", "DYNAMO_DEV_ENDPOINT")
else:
return None
@property
def SQS_ENDPOINT(self):
if self.DEV_MODE:
return self.parser.get("sqs", "SQS_DEV_ENDPOINT")
else:
return None
@property
def SNS_ENDPOINT(self):
if self.DEV_MODE:
return self.parser.get("sns", "SNS_DEV_ENDPOINT")
else:
return None
@property
def LAMBDA_FUNCTION_LIST(self):
return [i for i in self.parser.get("lambda", "LAMBDA_FUNCTION_LIST").split(",")]
```
#### File: ndingest/util/ndutil.py
```python
from __future__ import print_function
from __future__ import absolute_import
import hashlib
from util.util import Util
class NDUtil(Util):
@staticmethod
def generateCuboidKey(
project_name, channel_name, resolution, morton_index, time_index=0
):
"""Generate the key for the supercube"""
hashm = hashlib.md5()
hashm.update(
"{}&{}&{}&{}&{}".format(
project_name, channel_name, resolution, morton_index, time_index
)
)
return "{}&{}&{}&{}&{}&{}".format(
hashm.hexdigest(),
project_name,
channel_name,
resolution,
morton_index,
time_index,
)
```
#### File: ndingest/test/conftest.py
```python
import boto3
from moto import mock_s3, mock_iam, mock_sqs
import os
import pytest
from ndingest.settings.settings import Settings
# Enable test mode by setting the expected env variable that
# settings/bosssettings.py looks for.
os.environ['NDINGEST_TEST'] = '1'
settings = Settings.load()
from io import BytesIO
from ndingest.ndingestproj.ingestproj import IngestProj
ProjClass = IngestProj.load()
if settings.PROJECT_NAME == "Boss":
nd_proj = ProjClass("testCol", "kasthuri11", "image", 0, 124)
else:
nd_proj = ProjClass("kasthuri11", "image", "0")
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
@pytest.fixture(scope="function")
def iam(aws_credentials):
with mock_iam():
yield boto3.client("iam", region_name="us-east-1")
@pytest.fixture(scope="function")
def sqs(aws_credentials):
with mock_sqs():
# yield boto3.client('sqs', region_name='us-east-1')
yield boto3.resource("sqs", region_name="us-east-1")
@pytest.fixture(scope="function")
def tile_bucket(s3, iam):
from ndingest.ndbucket.tilebucket import TileBucket
TileBucket.createBucket()
yield TileBucket(nd_proj.project_name)
TileBucket.deleteBucket()
```
#### File: ndingest/test/test_bossutil.py
```python
import pytest
import unittest
from ndingest.util.bossutil import BossUtil, TILE_INGEST, VOLUMETRIC_INGEST
from ndingest.ndingestproj.bossingestproj import BossIngestProj
from ndingest.settings.settings import Settings
settings = Settings.load()
import warnings
@pytest.fixture(scope="function")
def boss_util_fixtures(tile_bucket, sqs):
job_id = 123
nd_proj = BossIngestProj("testCol", "kasthuri11", "image", 0, job_id)
from ndingest.ndqueue.uploadqueue import UploadQueue
UploadQueue.createQueue(nd_proj)
upload_queue = UploadQueue(nd_proj)
from ndingest.ndqueue.tileindexqueue import TileIndexQueue
TileIndexQueue.createQueue(nd_proj)
tile_index_queue = TileIndexQueue(nd_proj)
def get_test_data():
return (nd_proj, upload_queue, tile_index_queue, tile_bucket)
yield get_test_data
UploadQueue.deleteQueue(nd_proj)
TileIndexQueue.deleteQueue(nd_proj)
class TestBossUtil:
def _setup(self, boss_util_fixtures):
"""
Create all member variables. This was originally derived from
unittest.TestCase. Put in every test method.
"""
test_data = boss_util_fixtures()
self.job_id = test_data[0].job_id
self.upload_queue = test_data[1]
self.tile_index_queue = test_data[2]
self.tile_bucket = test_data[3]
def test_create_ingest_policy_tile(self, boss_util_fixtures):
self._setup(boss_util_fixtures)
policy = BossUtil.generate_ingest_policy(
self.job_id,
self.upload_queue,
self.tile_index_queue,
self.tile_bucket.bucket.name,
ingest_type=TILE_INGEST,
)
from ndingest.ndbucket.tilebucket import TileBucket
try:
assert settings.IAM_POLICY_PATH == policy.path
assert policy.default_version is not None
statements = policy.default_version.document["Statement"]
assert 3 == len(statements)
for stmt in statements:
if stmt["Sid"] == "ClientUploadQueuePolicy":
for perm in [
"sqs:ReceiveMessage",
"sqs:GetQueueAttributes",
"sqs:DeleteMessage",
]:
assert perm in stmt["Action"]
assert len(stmt["Action"]) == 3
assert self.upload_queue.arn == stmt["Resource"]
elif stmt["Sid"] == "ClientTileBucketPolicy":
assert "s3:PutObject" in stmt["Action"]
assert len(stmt["Action"]) == 1
assert (
TileBucket.buildArn(self.tile_bucket.bucket.name)
== stmt["Resource"]
)
elif stmt["Sid"] == "ClientIndexQueuePolicy":
assert "sqs:SendMessage" in stmt["Action"]
assert len(stmt["Action"]) == 1
assert self.tile_index_queue.arn == stmt["Resource"]
finally:
policy.delete()
def test_create_ingest_policy_volumetric(self, boss_util_fixtures):
self._setup(boss_util_fixtures)
policy = BossUtil.generate_ingest_policy(
self.job_id,
self.upload_queue,
self.tile_index_queue,
self.tile_bucket.bucket.name,
ingest_type=VOLUMETRIC_INGEST,
)
from ndingest.ndbucket.tilebucket import TileBucket
try:
assert settings.IAM_POLICY_PATH == policy.path
assert policy.default_version is not None
statements = policy.default_version.document["Statement"]
assert 2 == len(statements)
for stmt in statements:
if stmt["Sid"] == "ClientUploadQueuePolicy":
for perm in [
"sqs:ReceiveMessage",
"sqs:GetQueueAttributes",
"sqs:DeleteMessage",
]:
assert perm in stmt["Action"]
assert 3 == len(stmt["Action"])
assert self.upload_queue.arn == stmt["Resource"]
elif stmt["Sid"] == "ClientTileBucketPolicy":
assert "s3:PutObject" in stmt["Action"]
assert len(stmt["Action"]) == 1
assert (
TileBucket.buildArn(self.tile_bucket.bucket.name)
== stmt["Resource"]
)
finally:
policy.delete()
def test_delete_ingest_policy(self, boss_util_fixtures):
self._setup(boss_util_fixtures)
BossUtil.generate_ingest_policy(
self.job_id,
self.upload_queue,
self.tile_index_queue,
self.tile_bucket.bucket.name,
)
assert BossUtil.delete_ingest_policy(self.job_id)
``` |
{
"source": "jhuapl-boss/spdb",
"score": 2
} |
#### File: spatialdb/test/setup.py
```python
from pkg_resources import resource_filename
import json
import os
from moto import mock_s3
from moto import mock_dynamodb2
from moto import mock_sqs
import boto3
from botocore.exceptions import ClientError
import time
from spdb.project.test.resource_setup import get_image_dict, get_anno_dict
from spdb.project import BossResourceBasic
from spdb.spatialdb.object import get_region
import random
import os
def get_account_id():
"""Method to get the AWS account ID
Returns:
(str)
"""
return boto3.client('sts').get_caller_identity()['Account']
def load_test_config_file():
"""Load the ini file with the integration test environment
If no SPDB_TEST_CONFIG is not defined then the Boss configuration file
is attempted to be loaded
Returns:
(ConfigParser)
"""
config_file = os.environ.get('SPDB_TEST_CONFIG', '/etc/boss/boss.config')
if not os.path.exists(config_file):
raise RuntimeError("SPDB_TEST_CONFIG '{}' doesn't exist".format(config_file))
import configparser
config = configparser.ConfigParser()
config.optionxform = str # perserves the case of the keys
config.read(config_file)
return config
def get_test_configuration():
"""Method to get the integration test configuration info for spdb
Returns:
(dict, dict, dict): A tuple of dictionaries (kvio_config, state_config, object_store_config, s3_flush_queue_name)
"""
config = load_test_config_file()
# Get domain info
parts = config['aws']['cache'].split('.')
domain = "{}.{}".format(parts[1], parts[2])
# kvio settings
kvio_config = {"cache_host": config['aws']['cache'],
"cache_db": 1,
"read_timeout": 86400}
# state settings
state_config = {"cache_state_host": config['aws']['cache-state'], "cache_state_db": 1}
_, domain = config['aws']['cuboid_bucket'].split('.', 1)
s3_flush_queue_name = "intTest.S3FlushQueue.{}".format(domain).replace('.', '-')
account_id = "{}".format(get_account_id())
account_id = account_id[:5]
object_store_config = {"s3_flush_queue": "https://queue.amazonaws.com/{}/{}".format(get_account_id(),
s3_flush_queue_name),
"cuboid_bucket": "inttest.{}.{}".format(account_id, config['aws']['cuboid_bucket']),
"page_in_lambda_function": config['lambda']['page_in_function'],
"page_out_lambda_function": config['lambda']['flush_function'],
"s3_index_table": "intTest.{}".format(config['aws']['s3-index-table']),
"id_index_table": "intTest.{}".format(config['aws']['id-index-table']),
"id_count_table": "intTest.{}".format(config['aws']['id-count-table'])}
return kvio_config, state_config, object_store_config, s3_flush_queue_name
class SetupTests(object):
""" Class to handle setting up tests, including support for mocking
"""
def __init__(self):
self.mock = True
self.mock_s3 = None
self.mock_dynamodb = None
self.mock_sqs = None
self.DYNAMODB_SCHEMA = resource_filename('spdb', 'spatialdb/dynamo/s3_index_table.json')
self.ID_INDEX_SCHEMA = resource_filename('spdb', 'spatialdb/dynamo/id_index_schema.json')
self.ID_COUNT_SCHEMA = resource_filename('spdb', 'spatialdb/dynamo/id_count_schema.json')
def start_mocking(self):
"""Method to start mocking"""
self.mock = True
self.mock_s3 = mock_s3()
self.mock_dynamodb = mock_dynamodb2()
self.mock_sqs = mock_sqs()
self.mock_s3.start()
self.mock_dynamodb.start()
self.mock_sqs.start()
def stop_mocking(self):
"""Method to stop mocking"""
self.mock_s3.stop()
self.mock_dynamodb.stop()
self.mock_sqs.stop()
# ***** Cuboid Index Table *****
def _create_index_table(self, table_name, schema_file):
"""Method to create the S3 index table"""
# Load json spec
with open(schema_file) as handle:
json_str = handle.read()
table_params = json.loads(json_str)
endpoint_url = None
if 'LOCAL_DYNAMODB_URL' in os.environ:
endpoint_url = os.environ['LOCAL_DYNAMODB_URL']
# Create table
client = boto3.client('dynamodb', region_name=get_region(), endpoint_url=endpoint_url)
_ = client.create_table(TableName=table_name, **table_params)
return client.get_waiter('table_exists')
def create_index_table(self, table_name, schema_file):
"""Method to create DynamoDB index table"""
if self.mock:
mock_dynamodb2(self._create_index_table(table_name, schema_file))
else:
waiter = self._create_index_table(table_name, schema_file)
# Wait for actual table to be ready.
self.wait_table_create(table_name)
def _delete_index_table(self, table_name):
"""Method to delete the S3 index table"""
endpoint_url = None
if 'LOCAL_DYNAMODB_URL' in os.environ:
endpoint_url = os.environ['LOCAL_DYNAMODB_URL']
client = boto3.client('dynamodb', region_name=get_region(), endpoint_url=endpoint_url)
client.delete_table(TableName=table_name)
def delete_index_table(self, table_name):
"""Method to create the S3 index table"""
if self.mock:
mock_dynamodb2(self._delete_index_table(table_name))
else:
self._delete_index_table(table_name)
# Wait for table to be deleted (since this is real)
self.wait_table_delete(table_name)
def wait_table_create(self, table_name):
"""Poll dynamodb at a 2s interval until the table creates."""
endpoint_url = None
if 'LOCAL_DYNAMODB_URL' in os.environ:
endpoint_url = os.environ['LOCAL_DYNAMODB_URL']
client = boto3.client('dynamodb', region_name=get_region(), endpoint_url=endpoint_url)
cnt = 0
while True:
time.sleep(2)
cnt += 1
if cnt > 50:
# Give up waiting.
return
try:
print('-', end='', flush=True)
resp = client.describe_table(TableName=table_name)
if resp['Table']['TableStatus'] == 'ACTIVE':
return
except:
# May get an exception if table doesn't currently exist.
pass
def wait_table_delete(self, table_name):
"""Poll dynamodb at a 2s interval until the table deletes."""
endpoint_url = None
if 'LOCAL_DYNAMODB_URL' in os.environ:
endpoint_url = os.environ['LOCAL_DYNAMODB_URL']
client = boto3.client('dynamodb', region_name=get_region(), endpoint_url=endpoint_url)
cnt = 0
while True:
time.sleep(2)
cnt += 1
if cnt > 50:
# Give up waiting.
return
try:
print('-', end='', flush=True)
resp = client.describe_table(TableName=table_name)
except:
# Exception thrown when table doesn't exist.
return
# ***** END Cuboid Index Table END *****
# ***** Cuboid Bucket *****
def _create_cuboid_bucket(self, bucket_name):
"""Method to create the S3 bucket for cuboid storage"""
client = boto3.client('s3', region_name=get_region())
_ = client.create_bucket(
ACL='private',
Bucket=bucket_name
)
return client.get_waiter('bucket_exists')
def create_cuboid_bucket(self, bucket_name):
"""Method to create the S3 bucket for cuboid storage"""
if self.mock:
mock_s3(self._create_cuboid_bucket(bucket_name))
else:
waiter = self._create_cuboid_bucket(bucket_name)
# Wait for bucket to exist
waiter.wait(Bucket=bucket_name)
def _delete_cuboid_bucket(self, bucket_name):
"""Method to delete the S3 bucket for cuboid storage"""
s3 = boto3.resource('s3', region_name=get_region())
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.all():
obj.delete()
# Delete bucket
bucket.delete()
return bucket
def delete_cuboid_bucket(self, bucket_name):
"""Method to create the S3 bucket for cuboid storage"""
if self.mock:
mock_s3(self._delete_cuboid_bucket(bucket_name))
else:
bucket = self._delete_cuboid_bucket(bucket_name)
# Wait for table to be deleted (since this is real)
bucket.wait_until_not_exists()
# ***** END Cuboid Bucket *****
# ***** Flush SQS Queue *****
def _create_flush_queue(self, queue_name):
"""Method to create a test sqs for flushing cubes"""
client = boto3.client('sqs', region_name=get_region())
response = client.create_queue(QueueName=queue_name)
url = response['QueueUrl']
return url
def create_flush_queue(self, queue_name):
"""Method to create a test sqs for flushing cubes"""
if self.mock:
url = mock_sqs(self._create_flush_queue(queue_name))
else:
url = self._create_flush_queue(queue_name)
time.sleep(10)
return url
def _delete_flush_queue(self, queue_url):
"""Method to delete a test sqs for flushing cubes"""
client = boto3.client('sqs', region_name=get_region())
client.delete_queue(QueueUrl=queue_url)
def delete_flush_queue(self, queue_name):
"""Method to delete a test sqs for flushing cubes"""
if self.mock:
mock_sqs(self._delete_flush_queue(queue_name))
else:
self._delete_flush_queue(queue_name)
# ***** END Flush SQS Queue *****
def get_image8_dict(self):
"""Method to get the config dictionary for an image8 resource"""
data = get_image_dict()
return data
def get_image16_dict(self):
"""Method to get the config dictionary for an image16 resource"""
data = self.get_image8_dict()
data['channel']['datatype'] = 'uint16'
data['boss_key'] = 'col1&exp1&ch5'
data['lookup_key'] = '4&3&23'
return data
def get_anno64_dict(self):
"""Method to get the config dictionary for an image16 resource"""
data = get_anno_dict()
return data
class AWSSetupLayer(object):
"""A nose2 layer for setting up temporary AWS resources for testing ONCE per run"""
setup_helper = SetupTests()
kvio_config = None
state_config = None
object_store_config = None
@classmethod
def setUp(cls):
# Turn of mocking (since this is used only during integration tests)
cls.setup_helper.mock = False
# Get SPDB config
cls.kvio_config, cls.state_config, cls.object_store_config, cls.s3_flush_queue_name = get_test_configuration()
# Setup AWS
print('Creating Temporary AWS Resources', end='', flush=True)
try:
cls.setup_helper.create_index_table(cls.object_store_config["s3_index_table"],
cls.setup_helper.DYNAMODB_SCHEMA)
except ClientError:
cls.setup_helper.delete_index_table(cls.object_store_config["s3_index_table"])
cls.setup_helper.create_index_table(cls.object_store_config["s3_index_table"],
cls.setup_helper.DYNAMODB_SCHEMA)
try:
cls.setup_helper.create_index_table(cls.object_store_config["id_index_table"],
cls.setup_helper.ID_INDEX_SCHEMA)
except ClientError:
cls.setup_helper.delete_index_table(cls.object_store_config["id_index_table"])
cls.setup_helper.create_index_table(cls.object_store_config["id_index_table"],
cls.setup_helper.ID_INDEX_SCHEMA)
try:
cls.setup_helper.create_index_table(cls.object_store_config["id_count_table"],
cls.setup_helper.ID_COUNT_SCHEMA)
except ClientError:
cls.setup_helper.delete_index_table(cls.object_store_config["id_count_table"])
cls.setup_helper.create_index_table(cls.object_store_config["id_count_table"],
cls.setup_helper.ID_COUNT_SCHEMA)
try:
cls.setup_helper.create_cuboid_bucket(cls.object_store_config["cuboid_bucket"])
except ClientError:
cls.setup_helper.delete_cuboid_bucket(cls.object_store_config["cuboid_bucket"])
cls.setup_helper.create_cuboid_bucket(cls.object_store_config["cuboid_bucket"])
try:
cls.object_store_config["s3_flush_queue"] = cls.setup_helper.create_flush_queue(cls.s3_flush_queue_name)
except ClientError:
try:
cls.setup_helper.delete_flush_queue(cls.object_store_config["s3_flush_queue"])
except:
pass
time.sleep(61)
cls.object_store_config["s3_flush_queue"] = cls.setup_helper.create_flush_queue(cls.s3_flush_queue_name)
print('Done', flush=True)
@classmethod
def tearDown(cls):
print('\nDeleting Temporary AWS Resources', end='', flush=True)
try:
cls.setup_helper.delete_index_table(cls.object_store_config["s3_index_table"])
except:
pass
try:
cls.setup_helper.delete_index_table(cls.object_store_config["id_index_table"])
except:
pass
try:
cls.setup_helper.delete_index_table(cls.object_store_config["id_count_table"])
except:
pass
try:
cls.setup_helper.delete_cuboid_bucket(cls.object_store_config["cuboid_bucket"])
except:
pass
try:
cls.setup_helper.delete_flush_queue(cls.object_store_config["s3_flush_queue"])
except:
pass
print('Done', flush=True)
@classmethod
def clear_flush_queue(cls):
"""
Empty the S3 flush queue. Should be used during tearDown() of tests that
use the cutout service to avoid a failing test from affecting other
tests.
"""
sqs = boto3.client('sqs', region_name=get_region())
url = cls.object_store_config['s3_flush_queue']
while True:
resp = sqs.receive_message(QueueUrl=url, MaxNumberOfMessages=10)
if 'Messages' not in resp:
break
msgs = [{'Id': msg['MessageId'], 'ReceiptHandle': msg['ReceiptHandle']} for msg in resp['Messages']]
sqs.delete_message_batch(QueueUrl=url, Entries=msgs)
```
#### File: spatialdb/test/test_object_indices.py
```python
from spdb.spatialdb.object_indices import (ObjectIndices, LAST_PARTITION_KEY,
REV_ID)
import botocore
import numpy as np
import os
from spdb.c_lib.ndlib import XYZMorton
from spdb.c_lib.ndtype import CUBOIDSIZE
from spdb.project import BossResourceBasic
from spdb.project.test.resource_setup import get_anno_dict
from spdb.spatialdb.object import AWSObjectStore
from spdb.spatialdb import SpatialDB
from spdb.spatialdb.cube import Cube
import unittest
from unittest.mock import patch, DEFAULT, ANY
import random
from spdb.project import BossResourceBasic
from spdb.spatialdb.test.setup import SetupTests
from spdb.spatialdb.error import SpdbError
class ObjectIndicesTestMixin(object):
def setUp(self):
# Randomize the look-up key so tests don't mess with each other
self.resource._lookup_key = "1&2&{}".format(random.randint(4, 1000))
def test_make_ids_strings_ignore_zeros(self):
zeros = np.zeros(4, dtype='uint64')
expected = []
actual = self.obj_ind._make_ids_strings(zeros)
self.assertEqual(expected, actual)
def test_make_ids_strings_mix(self):
arr = np.zeros(4, dtype='uint64')
arr[0] = 12345
arr[2] = 9876
expected = ['12345', '9876']
actual = self.obj_ind._make_ids_strings(arr)
self.assertEqual(expected, actual)
@unittest.skip('Skipping - currently indexing disabled')
def test_update_id_indices_ignores_zeros(self):
"""
Never send id 0 to the DynamoDB id index or cuboid index! Since
0 is the default value before an id is assigned to a voxel, this
would blow way past DynamoDB limits.
"""
resolution = 0
version = 0
_id = 300
id_str_list = ['{}'.format(_id)]
cube_data = np.zeros(5, dtype='uint64')
cube_data[2] = _id
key = 'some_obj_key'
exp_channel_key = self.obj_ind.generate_channel_id_key(self.resource, resolution, _id)
with patch.object(self.obj_ind.dynamodb, 'update_item') as mock_update_item:
mock_update_item.return_value = {
'ResponseMetadata': { 'HTTPStatusCode': 200 }
}
# Method under test.
self.obj_ind.update_id_indices(self.resource, resolution, [key], [cube_data], version)
# Expect only 2 calls because there's only 1 non-zero id.
self.assertEqual(2, mock_update_item.call_count)
# First call should update s3 cuboid index.
kall0 = mock_update_item.mock_calls[0]
_, _, kwargs0 = kall0
self.assertEqual(id_str_list, kwargs0['ExpressionAttributeValues'][':ids']['NS'])
# Second call should update id index.
kall1 = mock_update_item.mock_calls[1]
_, _, kwargs1 = kall1
self.assertEqual(exp_channel_key, kwargs1['Key']['channel-id-key']['S'])
def test_get_cuboids_single_chunk(self):
"""
Test behavior when there is only one chunk of cuboids associated with
an object id.
"""
res = 0
obj_id = 2555
version = 0
morton_id = '23'
with patch.object(self.obj_ind.dynamodb, 'get_item') as mock_get_item:
mock_get_item.return_value = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'Item': {
'cuboid-set': {'SS': [morton_id]}
}
}
exp_key = AWSObjectStore.generate_object_key(
self.resource, res, 0, morton_id)
# Method under test.
actual = self.obj_ind.get_cuboids(
self.resource, res, obj_id, version)
self.assertEqual([exp_key], actual)
def test_get_cuboids_multiple_chunks(self):
"""
Test behavior when morton ids associated with an id span more than one
chunk in Dynamo.
"""
res = 0
obj_id = 2555
version = 0
morton_id1 = '23'
morton_id2 = '58'
with patch.object(self.obj_ind.dynamodb, 'get_item') as mock_get_item:
mock_get_item.side_effect = [
{
'ResponseMetadata': {'HTTPStatusCode': 200},
'Item': {
'cuboid-set': {'SS': [morton_id1]},
LAST_PARTITION_KEY: {'N': '1'}
}
},
{
'ResponseMetadata': {'HTTPStatusCode': 200},
'Item': {
'cuboid-set': {'SS': [morton_id2]}
}
}
]
exp_key1 = AWSObjectStore.generate_object_key(
self.resource, res, 0, morton_id1)
exp_key2 = AWSObjectStore.generate_object_key(
self.resource, res, 0, morton_id2)
# Method under test.
actual = self.obj_ind.get_cuboids(
self.resource, res, obj_id, version)
self.assertCountEqual([exp_key1, exp_key2], actual)
def test_get_loose_bounding_box(self):
resolution = 0
time_sample = 0
[x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]
pos0 = [4, 4, 4]
pos1 = [2, 1, 3]
pos2 = [6, 7, 5]
mort0 = XYZMorton(pos0)
mort1 = XYZMorton(pos1)
mort2 = XYZMorton(pos2)
key0 = AWSObjectStore.generate_object_key(self.resource, resolution, time_sample, mort0)
key1 = AWSObjectStore.generate_object_key(self.resource, resolution, time_sample, mort1)
key2 = AWSObjectStore.generate_object_key(self.resource, resolution, time_sample, mort2)
id = 2234
with patch.object(self.obj_ind, 'get_cuboids') as fake_get_cuboids:
fake_get_cuboids.return_value = [key0, key1, key2]
# Method under test.
actual = self.obj_ind.get_loose_bounding_box(self.resource, resolution, id)
expected = {
'x_range': [2*x_cube_dim, (6+1)*x_cube_dim],
'y_range': [1*y_cube_dim, (7+1)*y_cube_dim],
'z_range': [3*z_cube_dim, (5+1)*z_cube_dim],
't_range': [0, 1]
}
self.assertEqual(expected, actual)
def test_get_loose_bounding_box_not_found(self):
"""Make sure None returned if id is not in channel."""
resolution = 0
time_sample = 0
id = 2234
with patch.object(self.obj_ind, 'get_cuboids') as fake_get_cuboids:
fake_get_cuboids.return_value = []
actual = self.obj_ind.get_loose_bounding_box(
self.resource, resolution, id)
expected = None
self.assertEqual(expected, actual)
@patch('spdb.spatialdb.SpatialDB', autospec=True)
def test_tight_bounding_box_x_axis_single_cuboid(self, mock_spdb):
"""Loose bounding box only spans a single cuboid."""
resolution = 0
[x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]
id = 12345
x_rng = [0, x_cube_dim]
y_rng = [0, y_cube_dim]
z_rng = [0, z_cube_dim]
t_rng = [0, 1]
cube = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube.data[0][7][128][10] = id
cube.data[0][7][128][11] = id
cube.data[0][7][128][12] = id
mock_spdb.cutout.return_value = cube
expected = (10, 12)
# Method under test.
actual = self.obj_ind._get_tight_bounding_box_x_axis(
mock_spdb.cutout, self.resource, resolution, id,
x_rng, y_rng, z_rng, t_rng)
self.assertEqual(expected, actual)
self.assertEqual(1, mock_spdb.cutout.call_count)
@patch('spdb.spatialdb.SpatialDB', autospec=True)
def test_tight_bounding_box_x_axis_multiple_cuboids(self, mock_spdb):
"""Loose bounding box spans multiple cuboids."""
resolution = 0
[x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]
id = 12345
x_rng = [0, 2*x_cube_dim]
y_rng = [0, y_cube_dim]
z_rng = [0, z_cube_dim]
t_rng = [0, 1]
cube = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube.data[0][7][128][10] = id
cube.data[0][7][128][11] = id
cube.data[0][7][128][12] = id
cube2 = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube2.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube2.data[0][7][128][3] = id
cube2.data[0][7][128][4] = id
# Return cube on the 1st call to cutout and cube2 on the 2nd call.
mock_spdb.cutout.side_effect = [cube, cube2]
expected = (10, 516)
# Method under test.
actual = self.obj_ind._get_tight_bounding_box_x_axis(
mock_spdb.cutout, self.resource, resolution, id,
x_rng, y_rng, z_rng, t_rng)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_spdb.cutout.call_count)
@patch('spdb.spatialdb.SpatialDB', autospec=True)
def test_tight_bounding_box_y_axis_single_cuboid(self, mock_spdb):
"""Loose bounding box only spans a single cuboid."""
resolution = 0
[x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]
id = 12345
x_rng = [0, x_cube_dim]
y_rng = [0, y_cube_dim]
z_rng = [0, z_cube_dim]
t_rng = [0, 1]
cube = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube.data[0][7][200][10] = id
cube.data[0][7][201][10] = id
cube.data[0][7][202][10] = id
mock_spdb.cutout.return_value = cube
expected = (200, 202)
# Method under test.
actual = self.obj_ind._get_tight_bounding_box_y_axis(
mock_spdb.cutout, self.resource, resolution, id,
x_rng, y_rng, z_rng, t_rng)
self.assertEqual(expected, actual)
self.assertEqual(1, mock_spdb.cutout.call_count)
@patch('spdb.spatialdb.SpatialDB', autospec=True)
def test_tight_bounding_box_y_axis_multiple_cuboids(self, mock_spdb):
"""Loose bounding box spans multiple cuboids."""
resolution = 0
[x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]
id = 12345
x_rng = [0, x_cube_dim]
y_rng = [0, 2*y_cube_dim]
z_rng = [0, z_cube_dim]
t_rng = [0, 1]
cube = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube.data[0][7][509][11] = id
cube.data[0][7][510][11] = id
cube.data[0][7][511][11] = id
cube2 = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube2.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube2.data[0][7][0][11] = id
cube2.data[0][7][1][11] = id
# Return cube on the 1st call to cutout and cube2 on the 2nd call.
mock_spdb.cutout.side_effect = [cube, cube2]
expected = (509, 513)
# Method under test.
actual = self.obj_ind._get_tight_bounding_box_y_axis(
mock_spdb.cutout, self.resource, resolution, id,
x_rng, y_rng, z_rng, t_rng)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_spdb.cutout.call_count)
@patch('spdb.spatialdb.SpatialDB', autospec=True)
def test_tight_bounding_box_z_axis_single_cuboid(self, mock_spdb):
"""Loose bounding box only spans a single cuboid."""
resolution = 0
[x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]
id = 12345
x_rng = [0, x_cube_dim]
y_rng = [0, y_cube_dim]
z_rng = [0, z_cube_dim]
t_rng = [0, 1]
cube = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube.data[0][12][200][10] = id
cube.data[0][13][200][10] = id
cube.data[0][14][200][10] = id
mock_spdb.cutout.return_value = cube
expected = (12, 14)
# Method under test.
actual = self.obj_ind._get_tight_bounding_box_z_axis(
mock_spdb.cutout, self.resource, resolution, id,
x_rng, y_rng, z_rng, t_rng)
self.assertEqual(expected, actual)
self.assertEqual(1, mock_spdb.cutout.call_count)
def test_create_id_counter_key(self):
self.resource._lookup_key = "1&2&3"
key = self.obj_ind.generate_reserve_id_key(self.resource)
self.assertEqual(key, '14a343245e1adb6297e43c12e22770ad&1&2&3')
def test_reserve_id_wrong_type(self):
img_data = self.setup_helper.get_image8_dict()
img_resource = BossResourceBasic(img_data)
with self.assertRaises(SpdbError):
start_id = self.obj_ind.reserve_ids(img_resource, 10)
@patch('spdb.spatialdb.SpatialDB', autospec=True)
def test_tight_bounding_box_z_axis_multiple_cuboids(self, mock_spdb):
"""Loose bounding box spans multiple cuboids."""
resolution = 0
[x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]
id = 12345
x_rng = [0, x_cube_dim]
y_rng = [0, y_cube_dim]
z_rng = [0, 2*z_cube_dim]
t_rng = [0, 1]
cube = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube.data[0][13][509][11] = id
cube.data[0][14][509][11] = id
cube.data[0][15][509][11] = id
cube2 = Cube.create_cube(
self.resource, (x_cube_dim, y_cube_dim, z_cube_dim))
cube2.data = np.zeros((1, z_cube_dim, y_cube_dim, x_cube_dim))
cube2.data[0][0][509][11] = id
cube2.data[0][1][509][11] = id
# Return cube on the 1st call to cutout and cube2 on the 2nd call.
mock_spdb.cutout.side_effect = [cube, cube2]
expected = (13, 17)
# Method under test.
actual = self.obj_ind._get_tight_bounding_box_z_axis(
mock_spdb.cutout, self.resource, resolution, id,
x_rng, y_rng, z_rng, t_rng)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_spdb.cutout.call_count)
def test_get_tight_bounding_box_ranges(self):
"""Ensure that ranges are Python style ranges: [x, y).
In other words, make sure the max indices are incremented by 1.
"""
resolution = 0
[x_cube_dim, y_cube_dim, z_cube_dim] = CUBOIDSIZE[resolution]
id = 12345
x_rng = [0, x_cube_dim]
y_rng = [0, y_cube_dim]
z_rng = [0, 2*z_cube_dim]
t_rng = [0, 1]
# Don't need real one because will provide fake
# _get_tight_bounding_box_*_axis().
cutout_fcn = None
with patch.object(self.obj_ind, '_get_tight_bounding_box_x_axis') as fake_get_x_axis:
with patch.object(self.obj_ind, '_get_tight_bounding_box_y_axis') as fake_get_y_axis:
with patch.object(self.obj_ind, '_get_tight_bounding_box_z_axis') as fake_get_z_axis:
x_min_max = (35, 40)
y_min_max = (100, 105)
z_min_max = (22, 26)
fake_get_x_axis.return_value = x_min_max
fake_get_y_axis.return_value = y_min_max
fake_get_z_axis.return_value = z_min_max
# Method under test.
actual = self.obj_ind.get_tight_bounding_box(
cutout_fcn, self.resource, resolution, id,
x_rng, y_rng, z_rng, t_rng)
self.assertIn('x_range', actual)
self.assertIn('y_range', actual)
self.assertIn('z_range', actual)
self.assertIn('t_range', actual)
self.assertEqual(x_min_max[0], actual['x_range'][0])
self.assertEqual(1+x_min_max[1], actual['x_range'][1])
self.assertEqual(y_min_max[0], actual['y_range'][0])
self.assertEqual(1+y_min_max[1], actual['y_range'][1])
self.assertEqual(z_min_max[0], actual['z_range'][0])
self.assertEqual(1+z_min_max[1], actual['z_range'][1])
self.assertEqual(t_rng, actual['t_range'])
def test_write_cuboid_chunk_0(self):
"""
When chunk number is 0, the key passed to write_cuboid_dynamo() should
be unmodified.
"""
with patch.object(self.obj_ind, 'write_cuboid_dynamo') as fake_write_cuboid_dynamo:
res = 0
id = 5555
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
chunk_num = 0
version = 0
morton = 3
rev_id = 10
lookup_key = '1&4&2&0'
max_capacity = 100
# Method under test.
actual = self.obj_ind.write_cuboid(
max_capacity, morton, key, chunk_num, rev_id, lookup_key, version)
fake_write_cuboid_dynamo.assert_called_with(morton, key, rev_id, lookup_key, version)
self.assertEqual(chunk_num, actual)
def test_write_cuboid_chunk_n(self):
"""
Key sent to write_cuboid_dynamo() should have the chunk number appended
to it when it is non-zero.
"""
with patch.object(self.obj_ind, 'write_cuboid_dynamo') as fake_write_cuboid_dynamo:
res = 0
id = 5555
chunk_num = 2
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
exp_key = '{}&{}'.format(key, chunk_num)
version = 0
morton = 3
rev_id = 10
lookup_key = '1&4&2&0'
max_capacity = 100
# Method under test.
actual = self.obj_ind.write_cuboid(
max_capacity, morton, key, chunk_num, rev_id, lookup_key, version)
fake_write_cuboid_dynamo.assert_called_with(
morton, exp_key, rev_id, lookup_key, version)
self.assertEqual(chunk_num, actual)
def test_write_cuboid_partition_full_413(self):
"""
Write of id to partition specified by key is full, so chunk_num should
be incremented and id should be written to new parition. This test
uses error code 413.
"""
with patch.object(self.obj_ind, 'write_cuboid_dynamo') as fake_write_cuboid_dynamo:
# Raise exception on first call to simulate a full partition.
resp = { 'Error': { 'Code': '413' } }
fake_write_cuboid_dynamo.side_effect = [
botocore.exceptions.ClientError(resp, 'update_item'),
{}
]
res = 0
id = 5555
chunk_num = 2
new_chunk_num = chunk_num + 1
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
exp_key1 = '{}&{}'.format(key, chunk_num)
exp_key2 = '{}&{}'.format(key, new_chunk_num)
version = 0
morton = 8
rev_id = 10
lookup_key = '1&4&2&0'
max_capacity = 100
# Method under test.
actual = self.obj_ind.write_cuboid(
max_capacity, morton, key, chunk_num, rev_id, lookup_key, version)
# Should try to write to new partition after first try raises.
exp_calls = [
unittest.mock.call(morton, exp_key1, rev_id, lookup_key, version),
unittest.mock.call(morton, exp_key2, None, lookup_key, version)
]
fake_write_cuboid_dynamo.assert_has_calls(exp_calls)
# Should return chunk number of new partition.
self.assertEqual(new_chunk_num, actual)
def test_write_cuboid_partition_full_validation_exception(self):
"""
Write of id to partition specified by key is full, so chunk_num should
be incremented and id should be written to new parition. This test
uses error code ValidationException.
"""
with patch.object(self.obj_ind, 'write_cuboid_dynamo') as fake_write_cuboid_dynamo:
# Raise exception on first call to simulate a full partition.
resp = { 'Error': { 'Code': 'ValidationException' } }
fake_write_cuboid_dynamo.side_effect = [
botocore.exceptions.ClientError(resp, 'update_item'),
{}
]
res = 0
id = 5555
chunk_num = 2
new_chunk_num = chunk_num + 1
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
exp_key1 = '{}&{}'.format(key, chunk_num)
exp_key2 = '{}&{}'.format(key, new_chunk_num)
version = 0
morton = 8
rev_id = 10
lookup_key = '1&4&2&0'
max_capacity = 100
# Method under test.
actual = self.obj_ind.write_cuboid(
max_capacity, morton, key, chunk_num, rev_id, lookup_key, version)
# Should try to write to new partition after first try raises.
exp_calls = [
unittest.mock.call(morton, exp_key1, rev_id, lookup_key, version),
unittest.mock.call(morton, exp_key2, None, lookup_key, version)
]
fake_write_cuboid_dynamo.assert_has_calls(exp_calls)
# Should return chunk number of new partition.
self.assertEqual(new_chunk_num, actual)
def test_write_cuboid_max_capacity_exceeded(self):
"""
Write of id to partition specified by key succeeds but set max capacity
for that chunk is exceeded. In this case, the returned chunk_num
should be incremented.
"""
with patch.object(self.obj_ind, 'write_cuboid_dynamo') as fake_write_cuboid_dynamo:
fake_write_cuboid_dynamo.return_value = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'ConsumedCapacity': {'CapacityUnits': 105.0}
}
res = 0
id = 5555
chunk_num = 2
new_chunk_num = chunk_num + 1
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
exp_key1 = '{}&{}'.format(key, chunk_num)
version = 0
morton = 8
rev_id = 10
lookup_key = '1&4&2&0'
max_capacity = 100
# Method under test.
actual = self.obj_ind.write_cuboid(
max_capacity, morton, key, chunk_num, rev_id, lookup_key, version)
exp_calls = [
unittest.mock.call(morton, exp_key1, rev_id, lookup_key, version)
]
fake_write_cuboid_dynamo.assert_has_calls(exp_calls)
# Should return chunk number of new partition.
self.assertEqual(new_chunk_num, actual)
def test_lookup_found(self):
with patch.object(self.obj_ind.dynamodb, 'query') as fake_dynamodb_query:
fake_dynamodb_query.side_effect = [{'Count': 0}, {'Count': 1}]
res = 0
id = 5555
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
morton = 8
last_chunk_num = 1
version = 0
actual = self.obj_ind.lookup(morton, key, last_chunk_num, version)
self.assertTrue(actual[0])
self.assertEqual(1, actual[1])
def test_lookup_not_found(self):
with patch.object(self.obj_ind.dynamodb, 'query') as fake_dynamodb_query:
fake_dynamodb_query.side_effect = [{'Count': 0}, {'Count': 0}]
res = 0
id = 5555
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
morton = 8
last_chunk_num = 1
version = 0
actual = self.obj_ind.lookup(morton, key, last_chunk_num, version)
self.assertFalse(actual[0])
self.assertEqual(-1, actual[1])
# moto not parsing KeyConditionExpression properly - last tried v1.1.25.
@unittest.skip('Waiting for moto to be fixed')
def test_lookup_with_dynamo(self):
res = 0
id = 5555
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
morton = 8
last_chunk_num = 1
version = 0
actual = self.obj_ind.lookup(morton, key, last_chunk_num, version)
self.assertFalse(actual[0])
self.assertEqual(-1, actual[1])
def test_get_last_partition_key_and_rev_id(self):
"""
Test when there is only one chunk for the entire object id.
"""
with patch.object(self.obj_ind.dynamodb, 'get_item') as fake_dynamodb_get_item:
expected_chunk = 0
expected_rev_id = 25
fake_dynamodb_get_item.return_value = {
'Item': {
LAST_PARTITION_KEY: { 'N': str(expected_chunk) },
REV_ID: { 'N': str(expected_rev_id) }
},
'ResponseMetadata': { 'HTTPStatusCode': 200 }
}
res = 0
id = 5555
version = 0
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
# Method under test.
actual = self.obj_ind.get_last_partition_key_and_rev_id(key, version)
self.assertEqual(expected_chunk, actual[0])
self.assertEqual(expected_rev_id, actual[1])
def test_get_last_partition_key_and_rev_id_multiple_chunks(self):
"""
When there multiple chunks, the revision id must come from the last
chunk.
"""
with patch.object(self.obj_ind.dynamodb, 'get_item') as fake_dynamodb_get_item:
expected_chunk = 2
first_chunk_rev_id = 229
expected_rev_id = 25
fake_dynamodb_get_item.side_effect = [
{
# Data from chunk 0.
'Item': {
LAST_PARTITION_KEY: { 'N': str(expected_chunk) },
REV_ID: { 'N': str(first_chunk_rev_id) }
},
'ResponseMetadata': { 'HTTPStatusCode': 200 }
},
{
# Data from chunk 2 (the last chunk).
'Item': {
REV_ID: { 'N': str(expected_rev_id) }
},
'ResponseMetadata': { 'HTTPStatusCode': 200 }
}
]
res = 0
id = 5555
version = 0
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
# Method under test.
actual = self.obj_ind.get_last_partition_key_and_rev_id(key, version)
expected_last_chunk_key = '{}&{}'.format(key, expected_chunk)
self.assertEqual(2, fake_dynamodb_get_item.call_count)
(_, _, kwargs) = fake_dynamodb_get_item.mock_calls[1]
self.assertEqual(expected_last_chunk_key, kwargs['Key']['channel-id-key']['S'])
self.assertEqual(expected_chunk, actual[0])
self.assertEqual(expected_rev_id, actual[1])
def test_get_last_partition_key_and_rev_id_no_last_partition_key_or_rev_id(self):
"""
If there is no lastPartitionKey or revId, then should return (0, None).
"""
with patch.object(self.obj_ind.dynamodb, 'get_item') as fake_dynamodb_get_item:
expected_chunk = 0
expected_rev_id = None
fake_dynamodb_get_item.return_value = {
'ResponseMetadata': { 'HTTPStatusCode': 200 },
'Item': {}
}
res = 0
id = 5555
version = 0
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
# Method under test.
actual = self.obj_ind.get_last_partition_key_and_rev_id(key, version)
self.assertEqual(expected_chunk, actual[0])
self.assertEqual(expected_rev_id, actual[1])
def test_get_last_partition_key_and_rev_id_item_does_not_exist(self):
"""
If the key does not exist at all, then should raise KeyError.
"""
with patch.object(self.obj_ind.dynamodb, 'get_item') as fake_dynamodb_get_item:
fake_dynamodb_get_item.return_value = {
'ResponseMetadata': { 'HTTPStatusCode': 200 }
}
res = 0
id = 5555
version = 0
key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
with self.assertRaises(KeyError):
self.obj_ind.get_last_partition_key_and_rev_id(key, version)
def test_write_id_index(self):
"""
Standard case where a new Dynamo key does not need to be created.
"""
res = 0
time_sample = 0
morton = 11
id = 4
version = 0
last_partition_key = 2
rev_id = 521
max_capacity = 100
obj_key = AWSObjectStore.generate_object_key(
self.resource, res, time_sample, morton)
chan_key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
key_parts = AWSObjectStore.get_object_key_parts(obj_key)
with patch.multiple(
self.obj_ind,
get_last_partition_key_and_rev_id=DEFAULT,
lookup=DEFAULT,
write_cuboid=DEFAULT,
update_last_partition_key=DEFAULT
) as mocks:
mocks['get_last_partition_key_and_rev_id'].return_value = (
last_partition_key, rev_id
)
mocks['write_cuboid'].return_value = last_partition_key
mocks['lookup'].return_value = (False, -1)
# Method under test.
self.obj_ind.write_id_index(max_capacity, obj_key, id, version)
mocks['write_cuboid'].assert_called_with(
max_capacity, str(morton), chan_key, last_partition_key,
rev_id, ANY, version)
mocks['update_last_partition_key'].assert_not_called()
def test_write_id_index_new_id(self):
"""
Case where id is written to Dynamo for the first time.
"""
res = 0
time_sample = 0
morton = 11
id = 4
version = 0
last_partition_key = 0
rev_id = None
max_capacity = 100
obj_key = AWSObjectStore.generate_object_key(
self.resource, res, time_sample, morton)
chan_key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
key_parts = AWSObjectStore.get_object_key_parts(obj_key)
with patch.multiple(
self.obj_ind,
get_last_partition_key_and_rev_id=DEFAULT,
lookup=DEFAULT,
write_cuboid=DEFAULT,
update_last_partition_key=DEFAULT
) as mocks:
# Id doesn't exist in Dynamo table, yet.
mocks['get_last_partition_key_and_rev_id'].side_effect = (
KeyError()
)
mocks['write_cuboid'].return_value = last_partition_key
mocks['lookup'].return_value = (False, -1)
# Method under test.
self.obj_ind.write_id_index(max_capacity, obj_key, id, version)
mocks['write_cuboid'].assert_called_with(
max_capacity, str(morton), chan_key, last_partition_key,
rev_id, ANY, version)
mocks['update_last_partition_key'].assert_not_called()
def test_write_id_index_overflow(self):
"""
Case where a new Dynamo key needs to be created because the
current key is full. The LAST_PARTITION_KEY should be updated.
"""
res = 0
time_sample = 0
morton = 11
id = 4
version = 0
last_partition_key = 2
rev_id = 224
no_rev_id = None
max_capacity = 100
obj_key = AWSObjectStore.generate_object_key(
self.resource, res, time_sample, morton)
chan_key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
key_parts = AWSObjectStore.get_object_key_parts(obj_key)
with patch.multiple(
self.obj_ind,
get_last_partition_key_and_rev_id=DEFAULT,
lookup=DEFAULT,
write_cuboid=DEFAULT,
update_last_partition_key=DEFAULT
) as mocks:
mocks['get_last_partition_key_and_rev_id'].return_value = (
last_partition_key, rev_id
)
mocks['write_cuboid'].return_value = last_partition_key + 1
mocks['lookup'].return_value = (False, -1)
# Method under test.
self.obj_ind.write_id_index(max_capacity, obj_key, id, version)
mocks['write_cuboid'].assert_called_with(
max_capacity, str(morton), chan_key, last_partition_key,
rev_id, ANY, version)
mocks['update_last_partition_key'].assert_called_with(
chan_key, last_partition_key + 1, version)
def test_update_last_partition_key(self):
"""
Just exercise the Dynamo update_item call.
"""
res = 0
time_sample = 0
morton = 11
id = 4
version = 0
chunk_num = 2
chan_key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
self.obj_ind.update_last_partition_key(chan_key, chunk_num, version)
@unittest.skip('Moto 1.2 fails now that if_not_exists added to UpdateExpression')
def test_write_cuboid_dynamo_no_revision_id(self):
"""
Just exercise the Dynamo update_item call with no revision id.
"""
res = 0
time_sample = 0
morton = 11
id = 4
version = 0
rev_id = None
lookup_key = '1&4&2&0'
chan_key = self.obj_ind.generate_channel_id_key(self.resource, res, id)
self.obj_ind.write_cuboid_dynamo(
morton, chan_key, rev_id, lookup_key, version)
class TestObjectIndices(ObjectIndicesTestMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
""" Create a diction of configuration values for the test resource. """
# Create resource
cls.setup_helper = SetupTests()
cls.data = cls.setup_helper.get_anno64_dict()
cls.resource = BossResourceBasic(cls.data)
# Load config
cls.object_store_config = {"s3_flush_queue": 'https://mytestqueue.com',
"cuboid_bucket": "test_bucket",
"page_in_lambda_function": "page_in.test.boss",
"page_out_lambda_function": "page_out.test.boss",
"s3_index_table": "test_s3_table",
"id_index_table": "test_id_table",
"id_count_table": "test_count_table",
}
# Create AWS Resources needed for tests while mocking
cls.setup_helper.start_mocking()
with patch('spdb.spatialdb.test.setup.get_region') as fake_get_region:
fake_get_region.return_value = 'us-east-1'
cls.setup_helper.create_index_table(cls.object_store_config["id_count_table"], cls.setup_helper.ID_COUNT_SCHEMA)
cls.setup_helper.create_index_table(cls.object_store_config["id_index_table"], cls.setup_helper.ID_INDEX_SCHEMA)
cls.obj_ind = ObjectIndices(cls.object_store_config["s3_index_table"],
cls.object_store_config["id_index_table"],
cls.object_store_config["id_count_table"],
cls.object_store_config["cuboid_bucket"],
'us-east-1')
@classmethod
def tearDownClass(cls):
cls.setup_helper.stop_mocking()
if __name__ == '__main__':
unittest.main()
```
#### File: spatialdb/test/test_spatialdb.py
```python
import unittest
from unittest.mock import patch
from fakeredis import FakeStrictRedis
import redis
import collections
from spdb.project import BossResourceBasic
from spdb.spatialdb import Cube, SpatialDB, SpdbError
from spdb.c_lib.ndtype import CUBOIDSIZE
import numpy as np
from spdb.spatialdb.test.setup import SetupTests
import spdb.spatialdb.object
@patch('spdb.spatialdb.object.get_region', autospec=True, return_value='us-east-1')
class SpatialDBImageDataTestMixin(object):
cuboid_size = CUBOIDSIZE[0]
x_dim = cuboid_size[0]
y_dim = cuboid_size[1]
z_dim = cuboid_size[2]
def write_test_cube(self, sp, resource, res, cube, cache=True, s3=False):
"""
Method to write data to test read operations
Args:
sp (spdb.spatialdb.SpatialDB): spdb instance
resource (spdb.project.BossResource): Data model info based on the request or target resource
res (int): resolution
morton_idx_list (list(int)): list of morton IDs to add
time_sample_list (list(int)): list of time samples to add
cube (list(bytes)): list of time samples to add
cache (bool): boolean indicating if cubes should be written to cache
s3 (bool): boolean indicating if cubes should be written to S3
Returns:
(list(str)): a list of the cached-cuboid keys written
"""
# Get cache key
t = []
cube_bytes = []
for time_point in range(cube.time_range[0], cube.time_range[1]):
t.append(time_point)
cube_bytes.append(cube.to_blosc_by_time_index(time_point))
keys = sp.kvio.generate_cached_cuboid_keys(resource, res, t, [cube.morton_id])
# Write cuboid to cache
if cache:
sp.kvio.put_cubes(keys, cube_bytes)
# Write cuboid to S3
if s3:
obj_keys = sp.objectio.cached_cuboid_to_object_keys(keys)
sp.objectio.put_objects(obj_keys, cube_bytes)
# Add to S3 Index
for key in obj_keys:
sp.objectio.add_cuboid_to_index(key)
return keys
def test_resource_locked(self, fake_get_region):
"""Method to test if the resource is locked"""
sp = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
assert not sp.resource_locked(self.resource.get_lookup_key())
# Fake locking a project
sp.cache_state.set_project_lock(self.resource.get_lookup_key(), True)
assert sp.resource_locked(self.resource.get_lookup_key())
# Fake unlocking a project
sp.cache_state.set_project_lock(self.resource.get_lookup_key(), False)
assert not sp.resource_locked(self.resource.get_lookup_key())
def test_get_cubes_no_time_single(self, fake_get_region):
"""Test the get_cubes method - no time - single"""
# Generate random data
cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
cube1.random()
cube1.morton_id = 32
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
# populate dummy data
keys = self.write_test_cube(db, self.resource, 0, cube1, cache=True, s3=False)
cube2 = db.get_cubes(self.resource, keys)
np.testing.assert_array_equal(cube1.data, cube2[0].data)
def test_get_cubes_no_time_multiple(self, fake_get_region):
"""Test the get_cubes method - no time - multiple cubes"""
# Generate random data
cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
cube1.random()
cube1.morton_id = 32
cube2 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
cube2.random()
cube2.morton_id = 33
cube3 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
cube3.random()
cube3.morton_id = 36
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
# populate dummy data
keys = self.write_test_cube(db, self.resource, 0, cube1, cache=True, s3=False)
keys.extend(self.write_test_cube(db, self.resource, 0, cube2, cache=True, s3=False))
keys.extend(self.write_test_cube(db, self.resource, 0, cube3, cache=True, s3=False))
cube_read = db.get_cubes(self.resource, keys)
np.testing.assert_array_equal(cube1.data, cube_read[0].data)
np.testing.assert_array_equal(cube2.data, cube_read[1].data)
np.testing.assert_array_equal(cube3.data, cube_read[2].data)
def test_get_cubes_time_single(self, fake_get_region):
"""Test the get_cubes method - time - single"""
# Generate random data
cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim], [0, 2])
cube1.random()
cube1.morton_id = 76
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
# populate dummy data
keys = self.write_test_cube(db, self.resource, 0, cube1, cache=True, s3=False)
cube2 = db.get_cubes(self.resource, keys)
np.testing.assert_array_equal(cube1.data, cube2[0].data)
def test_get_cubes_time_multiple(self, fake_get_region):
"""Test the get_cubes method - time - multiple"""
# Generate random data
cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim], [0, 4])
cube1.random()
cube1.morton_id = 32
cube2 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim], [0, 4])
cube2.random()
cube2.morton_id = 33
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
# populate dummy data
keys = self.write_test_cube(db, self.resource, 0, cube1, cache=True, s3=False)
keys.extend(self.write_test_cube(db, self.resource, 0, cube2, cache=True, s3=False))
cube_read = db.get_cubes(self.resource, keys)
np.testing.assert_array_equal(cube1.data, cube_read[0].data)
np.testing.assert_array_equal(cube2.data, cube_read[1].data)
def test_get_cubes_missing_time_step(self, fake_get_region):
"""Test get_cubes() when not supplying keys for all time steps in a
time range.
"""
EXTENTS = [self.x_dim, self.y_dim, self.z_dim]
FIRST_T_RNG = (0, 4)
cube1 = Cube.create_cube(self.resource, EXTENTS, FIRST_T_RNG)
cube1.random()
cube1.morton_id = 70
# Note, no data for time steps 4 and 5 provided.
SECOND_T_RNG = (6, 9)
cube2 = Cube.create_cube(self.resource, EXTENTS, SECOND_T_RNG)
cube2.random()
cube2.morton_id = 70
TOTAL_T_RNG = (0, 9)
exp_cube = Cube.create_cube(self.resource, EXTENTS, TOTAL_T_RNG)
exp_cube.zeros()
exp_cube.morton_id = 70
exp_cube.overwrite(cube1.data, FIRST_T_RNG)
exp_cube.overwrite(cube2.data, SECOND_T_RNG)
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
# populate dummy data
keys = self.write_test_cube(db, self.resource, 0, cube1, cache=True, s3=False)
keys.extend(self.write_test_cube(db, self.resource, 0, cube2, cache=True, s3=False))
# Method under test.
cube_read = db.get_cubes(self.resource, keys)
np.testing.assert_array_equal(exp_cube.data, cube_read[0].data)
def test_cutout_no_time_single_aligned_zero(self, fake_get_region):
"""Test the get_cubes method - no time - single"""
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
cube = db.cutout(self.resource, (7, 88, 243), (self.x_dim, self.y_dim, self.z_dim), 0)
np.testing.assert_array_equal(np.sum(cube.data), 0)
def test_cutout_no_time_single_aligned_zero_access_mode_no_cache(self, fake_get_region):
"""Test the get_cubes method - no time - single - bypass cache"""
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
cube = db.cutout(self.resource, (7, 88, 243), (self.x_dim, self.y_dim, self.z_dim), 0, access_mode="no_cache")
np.testing.assert_array_equal(np.sum(cube.data), 0)
def test_cutout_no_time_single_aligned_zero_access_mode_raw(self, fake_get_region):
"""Test the get_cubes method - no time - single - bypass cache and bypass dirty key check"""
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
cube = db.cutout(self.resource, (7, 88, 243), (self.x_dim, self.y_dim, self.z_dim), 0, access_mode="raw")
np.testing.assert_array_equal(np.sum(cube.data), 0)
def test_cutout_no_time_single_aligned_zero_access_mode_cache(self, fake_get_region):
"""Test the get_cubes method - no time - single - DO NOT bypass cache"""
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
cube = db.cutout(self.resource, (7, 88, 243), (self.x_dim, self.y_dim, self.z_dim), 0, access_mode="cache")
np.testing.assert_array_equal(np.sum(cube.data), 0)
def test_cutout_no_time_single_aligned_zero_access_mode_invalid(self, fake_get_region):
"""Test the get_cubes method - no time - single - Raise error due to invalid access_mode"""
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
with self.assertRaises(SpdbError):
db.cutout(self.resource, (7, 88, 243), (self.x_dim, self.y_dim, self.z_dim), 0, access_mode="wrong")
def test_cutout_no_time_single_aligned_hit(self, fake_get_region):
"""Test the get_cubes method - no time - single"""
# Generate random data
cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
cube1.random()
cube1.morton_id = 0
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
# populate dummy data
self.write_test_cube(db, self.resource, 0, cube1, cache=True, s3=False)
cube2 = db.cutout(self.resource, (0, 0, 0), (self.x_dim, self.y_dim, self.z_dim), 0)
np.testing.assert_array_equal(cube1.data, cube2.data)
def test_cutout_no_time_single_aligned_miss(self, fake_get_region):
"""Test the get_cubes method - no time - single"""
# Generate random data
cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
cube1.random()
cube1.morton_id = 0
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
# populate dummy data
self.write_test_cube(db, self.resource, 0, cube1, cache=False, s3=True)
cube2 = db.cutout(self.resource, (0, 0, 0), (self.x_dim, self.y_dim, self.z_dim), 0)
np.testing.assert_array_equal(cube1.data, cube2.data)
def test_write_cuboid_off_base_res(self, fake_get_region):
"""Test writing a cuboid to not the base resolution"""
# Generate random data
cube1 = Cube.create_cube(self.resource, [self.x_dim, self.y_dim, self.z_dim])
cube1.random()
cube1.morton_id = 0
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
# populate dummy data
with self.assertRaises(SpdbError):
db.write_cuboid(self.resource, (0, 0, 0), 5, cube1.data, time_sample_start=0)
def test_mark_missing_time_steps_none(self, fake_get_region):
samples = [0, 1, 2, 3, 4, 5, 6]
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
actual = db.mark_missing_time_steps(samples, 2, 5)
self.assertEqual([], actual)
def test_mark_missing_time_steps(self, fake_get_region):
samples = [0, 1, 3, 5, 6, 7]
db = SpatialDB(self.kvio_config, self.state_config, self.object_store_config)
actual = db.mark_missing_time_steps(samples, 1, 4)
self.assertEqual([2, 4], actual)
@patch('redis.StrictRedis', FakeStrictRedis)
class TestSpatialDBImage8Data(SpatialDBImageDataTestMixin, unittest.TestCase):
@patch('spdb.spatialdb.test.setup.get_region')
@patch('redis.StrictRedis', FakeStrictRedis)
def setUp(self, fake_get_region):
""" Set everything up for testing """
# setup resources
fake_get_region.return_value = 'us-east-1'
self.setup_helper = SetupTests()
self.setup_helper.mock = True
self.data = self.setup_helper.get_image8_dict()
self.resource = BossResourceBasic(self.data)
# kvio settings
self.cache_client = redis.StrictRedis(host='https://mytestcache.com', port=6379,
db=1,
decode_responses=False)
self.kvio_config = {"cache_client": self.cache_client, "read_timeout": 86400}
# state settings
self.state_client = redis.StrictRedis(host='https://mytestcache2.com',
port=6379, db=1,
decode_responses=False)
self.state_config = {"state_client": self.state_client}
# object store settings
self.object_store_config = {"s3_flush_queue": 'https://mytestqueue.com',
"cuboid_bucket": "test_bucket",
"page_in_lambda_function": "page_in.test.boss",
"page_out_lambda_function": "page_out.test.boss",
"s3_index_table": "test_table",
"id_index_table": "test_id_table",
"id_count_table": "test_count_table",
}
# Create AWS Resources needed for tests
self.setup_helper.start_mocking()
with patch('spdb.spatialdb.test.setup.get_region') as fake_get_region:
fake_get_region.return_value = 'us-east-1'
self.setup_helper.create_index_table(self.object_store_config["s3_index_table"], self.setup_helper.DYNAMODB_SCHEMA)
self.setup_helper.create_cuboid_bucket(self.object_store_config["cuboid_bucket"])
def tearDown(self):
# Stop mocking
self.setup_helper.stop_mocking()
@patch('redis.StrictRedis', FakeStrictRedis)
class TestSpatialDBImage16Data(SpatialDBImageDataTestMixin, unittest.TestCase):
@patch('spdb.spatialdb.test.setup.get_region')
@patch('redis.StrictRedis', FakeStrictRedis)
def setUp(self, fake_get_region):
""" Set everything up for testing """
# setup resources
fake_get_region.return_value = 'us-east-1'
self.setup_helper = SetupTests()
self.setup_helper.mock = True
self.data = self.setup_helper.get_image16_dict()
self.resource = BossResourceBasic(self.data)
# kvio settings
self.cache_client = redis.StrictRedis(host='https://mytestcache.com', port=6379,
db=1,
decode_responses=False)
self.kvio_config = {"cache_client": self.cache_client, "read_timeout": 86400}
# state settings
self.state_client = redis.StrictRedis(host='https://mytestcache2.com',
port=6379, db=1,
decode_responses=False)
self.state_config = {"state_client": self.state_client}
# object store settings
self.object_store_config = {"s3_flush_queue": 'https://mytestqueue.com',
"cuboid_bucket": "test_bucket",
"page_in_lambda_function": "page_in.test.boss",
"page_out_lambda_function": "page_out.test.boss",
"s3_index_table": "test_table",
"id_index_table": "test_id_table",
"id_count_table": "test_count_table",
}
# Create AWS Resources needed for tests
self.setup_helper.start_mocking()
with patch('spdb.spatialdb.test.setup.get_region') as fake_get_region:
fake_get_region.return_value = 'us-east-1'
self.setup_helper.create_index_table(self.object_store_config["s3_index_table"], self.setup_helper.DYNAMODB_SCHEMA)
self.setup_helper.create_cuboid_bucket(self.object_store_config["cuboid_bucket"])
def tearDown(self):
# Stop mocking
self.setup_helper.stop_mocking()
``` |
{
"source": "JHUAPL/CodeCut",
"score": 2
} |
#### File: JHUAPL/CodeCut/snap_cg.py
```python
import snap
import sys
import idc
import struct
import idautils
import basicutils_7x as basicutils
MAX_DIST = 0
UGraph = []
def add_edge(f, t):
global UGraph
n = basicutils.GetFunctionName(f)
if n != "":
#since we're only doing one edge for each xref, we'll do weight based on distance from the middle of the caller to the callee
f_start = idc.get_func_attr(f, idc.FUNCATTR_START)
if (not UGraph.IsNode(f_start)):
print("Error: had to add node (to): %08x" % f_start)
UGraph.AddNode(f_start)
print("%08x -> %08x" % (f_start, t))
UGraph.AddEdge(t,f_start)
#print "s_%#x -> s_%#x" % (f_start,t)," [len = ",get_weight(func_mid, t), "]"
def add_node(f):
basicutils.ForEveryXrefToD(f, add_edge)
def create_snap_cg():
global UGraph
UGraph= snap.PNGraph.New()
#Add every function linearly, this makes sure the nodes are in order
basicutils.ForEveryFuncInSeg(".text",UGraph.AddNode)
basicutils.ForEveryFuncInSeg(".text",add_node)
for NI in UGraph.Nodes():
print("node id 0x%x with out-degree %d and in-degree %d" %(
NI.GetId(), NI.GetOutDeg(), NI.GetInDeg()))
return UGraph
``` |
{
"source": "JHUAPL/meta-system",
"score": 2
} |
#### File: system/api/results.py
```python
import glob
import json
import os
import re
import pandas as pd
import pydash
from bson import ObjectId
from flask import Blueprint, send_from_directory
from shared.config import config
from shared.log import logger
from system import FlaskExtensions
from system.controllers import classification_job, simulation_job, user_job
from system.models.metrics import SimulationMetrics, ClassificationMetrics
from system.utils.biology import TaxonomicHierarchy
from system.utils.zip import send_to_zip
results_bp = Blueprint("results", __name__, url_prefix=config.SERVER_API_CHROOT)
mongodb = FlaskExtensions.mongodb
@results_bp.route("/results/orig_abundance_profile/<string:user_job_id>")
def get_original_abundance_profile(user_job_id):
# Taxid Abundance Organization files: data/jobs/<user_job_id>/*.tsv
job = user_job.find_by_id(user_job_id=ObjectId(user_job_id))
if job is None:
return "{} does not exist!".format(user_job_id), 501
tsv_name = pydash.get(job, "abundance_tsv", None)
if tsv_name is not None:
path = os.path.join(config.JOBS_DIR, user_job_id, tsv_name)
abundance_df = get_result_dataframe(path, ["taxid", "abundance", "val"])
if abundance_df is None:
return "No abundance profile tsv file for {}!".format(user_job_id), 501
parsed_abundance_json = abundance_df.to_dict("records")
return json.dumps(parsed_abundance_json), 200
else:
logger.error("No abundance TSV found for job {}".format(user_job_id))
return None, 501
@results_bp.route(
"/results/computation/simulation/<string:metric>/<string:user_job_id>/<string:read_type>",
methods=["GET"])
def get_cpu_time_simulation(metric, user_job_id, read_type):
try:
SimulationMetrics(metric)
except ValueError:
return None, 501
data = simulation_job.find_specific_job(user_job_id=ObjectId(user_job_id), read_type=read_type)
res = {metric: pydash.get(data, metric, None)}
return json.dumps(res), 200
@results_bp.route(
"/results/computation/classification/<string:metric>/<string:user_job_id>/<string:read_type>/<string:classifier>",
methods=["GET"])
def get_computational_performance_simulated(metric, user_job_id, read_type, classifier):
try:
ClassificationMetrics(metric)
except ValueError:
return None, 501
data = classification_job.find_specific_job(user_job_id=ObjectId(user_job_id), read_type=read_type,
classifier=classifier)
res = {metric: pydash.get(data, metric, None)}
return json.dumps(res), 200
@results_bp.route(
"/results/computation/classification/<string:metric>/<string:user_job_id>/<string:classifier>",
methods=["GET"])
def get_computational_performance_real(metric, user_job_id, classifier):
try:
ClassificationMetrics(metric)
except ValueError:
return None, 501
data = classification_job.find_specific_job(user_job_id=ObjectId(user_job_id), classifier=classifier)
res = {metric: pydash.get(data, metric, None)}
return json.dumps(res), 200
@results_bp.route("/results/<string:user_job_id>/<string:read_type>/compare", methods=["GET"])
def get_results_for_user_job_and_read_type(user_job_id, read_type):
# Eval tsv: data/jobs/<user_job_id>/<read_type>/compare
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "eval.tsv")
eval_df = get_result_dataframe(path)
if eval_df is None:
return "No evaluation TSV file found!", 501
eval_json = eval_df.to_dict("records")
return json.dumps(eval_json), 200
@results_bp.route("/results/<string:user_job_id>/inclusion", methods=["GET"])
def get_classifier_rank_abu_taxid_org_inclusion_real(user_job_id):
# classifier_rank_abu_taxid_org_inclusion tsv:
# /data/jobs/<user_job_id>/eval/classifier_rank_abu_taxid_org_inclusion.tsv
path = os.path.join(config.JOBS_DIR, user_job_id, "eval", "classifier_rank_abu_taxid_org_inclusion.tsv")
eval_df = get_result_dataframe(path, ['classifier', 'rank', 'abundance', 'taxid', 'name', 'classifier_inclusion'])
eval_df['classifier_inclusion'] = eval_df['classifier_inclusion'].str.split(',')
eval_df['classifier_count'] = eval_df['classifier_inclusion'].str.len()
if eval_df is None:
return "No evaluation TSV file found!", 501
eval_json = eval_df.to_dict("records")
return json.dumps(eval_json), 200
@results_bp.route("/results/<string:user_job_id>/<string:read_type>/inclusion", methods=["GET"])
def get_classifier_rank_abu_taxid_org_inclusion_simulated(user_job_id, read_type):
# classifier_rank_abu_taxid_org_inclusion tsv:
# /data/jobs/<user_job_id>/<read_type>/eval/classifier_rank_abu_taxid_org_inclusion.tsv
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "classifier_rank_abu_taxid_org_inclusion.tsv")
eval_df = get_result_dataframe(path, ['classifier', 'rank', 'abundance', 'taxid', 'name', 'classifier_inclusion'])
eval_df['classifier_inclusion'] = eval_df['classifier_inclusion'].str.split(',')
eval_df['classifier_count'] = eval_df['classifier_inclusion'].str.len()
if eval_df is None:
return "No evaluation TSV file found!", 501
eval_json = eval_df.to_dict("records")
return json.dumps(eval_json), 200
@results_bp.route("/results/<string:user_job_id>/<string:read_type>/<string:classifier>", methods=["GET"])
def get_results_for_user_job_and_read_type_and_classifier(user_job_id, read_type, classifier):
# Report files: data/jobs/<user_job_id>/<read_type>/results/*.parsed_<classifier>
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "results", "*.parsed_{}".format(classifier))
parsed_report_df = get_result_dataframe(path, ["taxid", "abundance"])
if parsed_report_df is None:
return "No report file for {} {}!".format(classifier, user_job_id), 501
parsed_report_json = parsed_report_df.to_dict("records")
return json.dumps(parsed_report_json), 200
@results_bp.route("/results/taxid_abu_org/<string:user_job_id>/<string:read_type>/<string:classifier>/<string:rank>",
methods=["GET"])
def get_results_for_taxid_abu_org_by_rank(user_job_id, read_type, classifier, rank):
# Taxid Abundance Organization files: data/jobs/<user_job_id>/<read_type>/eval/tmp/parsed_<classifier>/taxid_abu_org-<rank>.tsv
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid_abu_org-{}.tsv".format(rank))
taxid_abu_org_df = get_result_dataframe(path, ["abundance", "taxid", "name"])
if taxid_abu_org_df is None:
return "No Tax ID Abundance Organization file for {} {} {}!".format(rank, read_type, user_job_id), 501
taxid_abu_org_json = taxid_abu_org_df.to_dict("records")
return json.dumps(taxid_abu_org_json), 200
@results_bp.route("/results/taxid_abu_org/<string:user_job_id>/<string:classifier>",
methods=["GET"])
def get_hierarchical_taxid_real(user_job_id, classifier):
# -------------------------------- Get result taxid abundance hierarchy --------------------------------
path = os.path.join(config.JOBS_DIR, user_job_id, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid.abu.ts.padded")
if not os.path.exists(path):
logger.warning("taxid.abu.ts.padded not found! Using taxid.abu.ts")
path = os.path.join(config.JOBS_DIR, user_job_id, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid.abu.ts")
taxid_abu_ts_df = get_result_dataframe(path, ["taxid", "abundance", "hierarchy"])
if taxid_abu_ts_df is None:
return "No taxid.abu.ts file for {} {} {}!".format(user_job_id, classifier), 501
# -------------------------------- Build hierarchy --------------------------------
hierarchy_col = taxid_abu_ts_df["hierarchy"].tolist()
abundance_col = taxid_abu_ts_df["abundance"].tolist()
tree = dict()
if len(hierarchy_col) > 0:
logger.info("BUILDING HIERARCHY FOR {} TAXONOMIC IDs".format(len(hierarchy_col)))
tree = build_hierarchy(hierarchy_list=hierarchy_col, abundance_list=abundance_col)
else:
logger.warning("taxid.abu.ts IS EMPTY!")
return json.dumps(tree), 200
@results_bp.route("/results/taxid_abu_org/<string:user_job_id>/<string:read_type>/<string:classifier>",
methods=["GET"])
def get_hierarchical_taxid_simulated(user_job_id, read_type, classifier):
# -------------------------------- Get result taxid abundance hierarchy --------------------------------
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid.abu.ts.padded")
if not os.path.exists(path):
logger.warning("taxid.abu.ts.padded not found! Using taxid.abu.ts")
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "parsed_{}_dir".format(classifier),
"taxid.abu.ts")
taxid_abu_ts_df = get_result_dataframe(path, ["taxid", "abundance", "hierarchy"])
if taxid_abu_ts_df is None:
return "No taxid.abu.ts file for {} {} {}!".format(user_job_id, read_type, classifier), 501
# -------------------------------- Get baseline taxid abundance hierarchy --------------------------------
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "BASELINE1.tsv_dir",
"taxid.abu.ts.padded")
if not os.path.exists(path):
logger.warning("taxid.abu.ts.padded not found for Baseline! Using taxid.abu.ts")
path = os.path.join(config.JOBS_DIR, user_job_id, read_type, "eval", "tmp", "BASELINE1.tsv_dir", 'taxid.abu.ts')
taxid_abu_baseline_ts_df = get_result_dataframe(path, ["taxid", "abundance", "hierarchy"])
taxid_abu_baseline_ts_df["abundance"] = 0
# ---------------------------- Merge the baseline and classifier abundance ts files ----------------------------
taxid_abu_ts_df = pd.concat([taxid_abu_ts_df, taxid_abu_baseline_ts_df]).reset_index().drop_duplicates(
subset=["taxid"], keep="first")
pd.options.display.max_colwidth = 10000
# -------------------------------- Build hierarchy --------------------------------
hierarchy_col = taxid_abu_ts_df["hierarchy"].tolist()
abundance_col = taxid_abu_ts_df["abundance"].tolist()
tree = dict()
if len(hierarchy_col) > 0:
logger.info("BUILDING HIERARCHY FOR {} TAXONOMIC IDs".format(len(hierarchy_col)))
tree = build_hierarchy(hierarchy_list=hierarchy_col, abundance_list=abundance_col)
else:
logger.warning("taxid.abu.ts IS EMPTY!")
return json.dumps(tree), 200
def build_hierarchy(hierarchy_list: list, abundance_list: list):
pattern = re.compile(r"(\d+);([/\-A-Z\w\s.\[\]=]+)\(?([\w\s]+)\)?")
hier_per_taxid = []
for i, h in enumerate(hierarchy_list):
try:
res = re.findall(pattern, h)
hier_per_taxid.append(res)
except TypeError as e:
logger.warning("Cannot parse {} in hierarchical taxid. Line {}.".format(h, i))
proper_taxid = False
while not proper_taxid: # find root node
try:
taxid, name, rank = hier_per_taxid[0][0]
proper_taxid = True
except IndexError:
hier_per_taxid.pop(0)
root_node = TaxonomicHierarchy.Node(taxid=taxid, name=name, rank=rank, abundance=None)
th = build_tree(root_node=root_node, hier_per_taxid=hier_per_taxid, abundance_list=abundance_list)
return th.to_dict()
def build_tree(root_node: TaxonomicHierarchy.Node, hier_per_taxid: list, abundance_list: list):
th = TaxonomicHierarchy(root=root_node)
# Build entire tree
for i, hier in enumerate(hier_per_taxid):
hier.pop(0) # remove root node, assumes root node is first in list
parent_node = root_node
for n, h in enumerate(hier):
taxid, name, rank = h
if rank == "subspecies":
rank = "strain"
if rank in ["superkingdom", "phylum", "class", "order", "family", "genus", "species",
"strain"]: # ignore no rank for now so that all ranks will consistent across nesting levels
if h == hier[-1]: # if last element in list
node = TaxonomicHierarchy.Node(taxid=taxid, name=name, rank=rank, abundance=abundance_list[i])
th.add_child_to_tree(parent=parent_node, child=node)
else:
node = TaxonomicHierarchy.Node(taxid=taxid, name=name, rank=rank, abundance=None)
th.add_child_to_tree(parent=parent_node, child=node)
parent_node = node
return th
def get_result_dataframe(path: str, columns: list or None = None):
try:
filepath = glob.glob(path)[0]
if not filepath:
logger.error("No {} found!".format(path))
return None
except IndexError:
return None
if columns is not None:
return pd.read_csv(filepath, sep="\t", encoding="utf-8", names=columns)
else:
return pd.read_csv(filepath, sep="\t", encoding="utf-8")
@results_bp.route("/results/download/<string:user_job_id>/<string:filename>",
methods=["GET"]) # download relevant job files
def download(user_job_id, filename):
"""
# Accepts job id and downloads the results files related to it
# :param: user_job_id to download files for
# :param: path of the filename to download
# :return: Success of moving the files to a zip download folder
"""
try:
logger.info("DOWNLOAD JOBS REQUEST SUBMITTED FOR USER JOB {}".format(user_job_id))
user_job_data = user_job.find_by_id(user_job_id=ObjectId(user_job_id))
read_types = [""] # list with empty quotes is needed to ensure the classify only jobs work
if "read_types" in user_job_data: # classify only jobs will not have a specified read_type
read_types = pydash.get(user_job_data, "read_types")
classifiers = pydash.get(user_job_data, "classifiers")
# Make zip
zip_loc = os.path.join(config.DATA_DIR, "jobs", user_job_id)
list_of_files = list_files_for_download(read_types=read_types, classifiers=classifiers, job_path=zip_loc)
send_to_zip(base_path=zip_loc, list_of_files=list_of_files, outfile=filename)
if not os.path.exists(os.path.join(zip_loc, filename)):
return "File does not exist", 400
logger.info("SUCCESSFULLY MOVED JOB {} TO ZIP FOR DOWNLOAD".format(user_job_id))
logger.info("ABOUT TO ATTEMPT SEND RESULTS.ZIP TO FRONT END...")
return send_from_directory(directory=zip_loc, filename=filename, as_attachment=True)
except Exception as e:
logger.warning("DOWNLOAD JOB {} REQUEST FAILED".format(user_job_id))
return "DOWNLOAD JOB {} REQUEST FAILED".format(user_job_id), 501
def list_files_for_download(read_types: list, classifiers: list, job_path: str) -> list:
"""
Download the existing UserJob by placing the relevant files in a zip folder.
:param read_types:
:param classifiers:
:param job_path:
:return: List of files to include in download
"""
# list files in zip folder
list_of_files = [] # list of files to add to zip, with relative paths to user_job_path
try:
for r in read_types:
for c in classifiers:
list_of_files.append(os.path.join(r, c, c + ".report")) # get .report file
list_of_files.append(os.path.join(r, c, c + ".result")) # get .results file
# inclusion and eval tsvs are only outputted per read_type
if not ("" in read_types): # Classify only jobs do not have eval.tsv
list_of_files.append(os.path.join(r, "eval", "eval.tsv")) # get eval.tsv file
list_of_files.append(
os.path.join(r, "eval", "classifier_rank_abu_taxid_org_inclusion.tsv")) # get inclusion.tsv file
# results folder contains parsed classifier results
parsed_results = os.listdir(os.path.join(job_path, r, "results"))
for parsed in parsed_results: # add file structure to each parsed results file
list_of_files.append(os.path.join(r, "results", parsed))
except Exception as e:
logger.warning("ERROR COMPILING RESULTS IN ZIP FOLDER")
return list_of_files
return list_of_files
```
#### File: system/controllers/classification_job.py
```python
from bson import ObjectId
from pymodm import connect
from shared.config import config
from system.controllers import controllers
from system.models.job_manager import JobStatus
from system.models.schemas_loader import SchemaLoader
connect(config.MONGO_URI)
def insert(user_job_id: ObjectId, classifier: str, fastq_path: str, read_type: str or None = None) -> ObjectId:
"""
Insert a new ClassificationJob into the collection.
:param user_job_id: Which UserJob is associated with this ClassificationJob
:param classifier: The classifier to use
:param fastq_path: The input fastq file to read from
:return: The ObjectId of the ClassificationJob added
"""
queue_position = -1
if read_type is None:
to_insert = dict(user_job_id=user_job_id, classifier=classifier, fastq_path=fastq_path,
queue_position=queue_position, status=JobStatus.QUEUED)
return controllers.insert_one(collection=SchemaLoader.CLASSIFICATION_JOB, data=to_insert)
else:
to_insert = dict(user_job_id=user_job_id, classifier=classifier, fastq_path=fastq_path, read_type=read_type,
queue_position=queue_position, status=JobStatus.QUEUED)
return controllers.insert_one(collection=SchemaLoader.CLASSIFICATION_JOB, data=to_insert)
def find_by_id(class_job_id: ObjectId, as_json: bool = False) -> str or dict:
"""
Find Classification job using ObjectId
:param class_job_id: The Classification job id to search for
:param as_json: Whether or not to return the results as a json
:return: a str or dict of the Classification Job object
"""
return controllers.find_by_id(collection=SchemaLoader.CLASSIFICATION_JOB, obj_id=class_job_id, as_json=as_json)
def find_specific_job(user_job_id: ObjectId, classifier: str, read_type: str or None = None,
as_json: bool = False) -> str or dict:
"""
Find classification job by user_job_id, read_type, and classifier
:param user_job_id: The User Job id that triggered this classification job
:param read_type: The read type you are searching for
:param classifier: The classifier you are searching for
:param as_json: Whether or not to return the results as a json
:return:
"""
if read_type is not None:
filter_map = dict(user_job_id=user_job_id, read_type=read_type, classifier=classifier)
else:
filter_map = dict(user_job_id=user_job_id, classifier=classifier)
return controllers.find_by_multi_key_value(collection=SchemaLoader.CLASSIFICATION_JOB, filter_map=filter_map,
as_json=as_json)
def update_wall_clock_time(obj_id: ObjectId, time: float):
"""
Takes an input float (in seconds) duration and updates the wall_clock_time of the document.
:param time: the datetime duration to update with
:param obj_id: the document to update
:return:
"""
controllers.update_by_id(SchemaLoader.CLASSIFICATION_JOB, obj_id, "wall_clock_time", time)
def update_max_memory_MBs(obj_id: ObjectId, max_mem: float):
"""
Update what the max memory of the classification job is.
:param obj_id: the classification job to update
:param max_mem: the max memory the classification job takes during the job
:return: None
"""
controllers.update_by_id(SchemaLoader.CLASSIFICATION_JOB, obj_id, "max_memory_MBs", max_mem)
def update_cpu_time(obj_id: ObjectId, time: float):
"""
How long (in seconds) the specified classification took in cpu time.
:param obj_id: the classification job to update
:param time: how long the classification job took in cpu time
:return: None
"""
controllers.update_by_id(SchemaLoader.CLASSIFICATION_JOB, obj_id, "cpu_time", time)
def update_container_id(obj_id: ObjectId, container_id: str or None):
"""
Track the id of the container that is currently being run via Docker.
:param obj_id: the classification job to update
:param container_id: the container id
:return: None
"""
controllers.update_by_id(SchemaLoader.CLASSIFICATION_JOB, obj_id, "container_id", container_id)
def update_status(obj_id: ObjectId, new_status: str):
"""
Update the status of the specified ClassificationJob.
:param obj_id: the classification job to update
:param new_status: the status to update the classification job with
:return: None
"""
controllers.update_by_id(SchemaLoader.CLASSIFICATION_JOB, obj_id, "status", new_status)
```
#### File: meta-system/system/evaluate.py
```python
import glob
import os
import shlex
import subprocess
import time
from datetime import datetime
from enum import Enum
import pydash
from bson import ObjectId
from shared.config import config
from shared.log import logger
from system.controllers import evaluation_job, user_job
from system.models.job_manager import JobMode, JobStatus, JobType
from system.utils.job_failure import handle_fail
metrics_root_dir = os.path.join(config.ROOT_DIR, "system", "metrics", "evaluation")
def run_evaluation_job(job: dict):
# -------------------- Parse input --------------------
user_job_id = pydash.get(job, "user_job_id")
eval_job_id = pydash.get(job, "_id")
if eval_job_id is None:
logger.warning("JOB ID NOT INCLUDED!")
return
read_type = pydash.get(job, "read_type", None)
evaluation_job.update_status(obj_id=eval_job_id, new_status=str(JobStatus.PROCESSING))
user_job_data = user_job.find_by_id(user_job_id=user_job_id)
if user_job_data is None:
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id, message="User Job Data not provided!")
return
job_mode = pydash.get(user_job_data, "mode", None)
if job_mode is None:
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id, message="Job mode not provided!")
try:
job_mode_enum = JobMode(job_mode)
except ValueError as e:
job_mode_enum = JobMode.SIMULATED_READS # use Simulated mode as default
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id, message=e)
if job_mode_enum == JobMode.SIMULATED_READS:
# eval_job_dir: data/jobs/<user_job_id>/<read_type>
eval_job_dir = os.path.join(config.JOBS_DIR, str(user_job_id), read_type)
elif job_mode_enum == JobMode.REAL_READS:
# eval_job_dir: data/jobs/<user_job_id>
eval_job_dir = os.path.join(config.JOBS_DIR, str(user_job_id))
else:
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id, message="Invalid Job Mode!")
return
classifiers = pydash.get(user_job_data, "classifiers", [])
logger.info("RUNNING EVALUATION FOR USER JOB {} ({})".format(str(user_job_id), classifiers))
t_dur, t_dur_cpu = evaluate(job_dir=eval_job_dir, classifiers=classifiers, eval_job_id=eval_job_id,
user_job_id=user_job_id, job_mode_enum=job_mode_enum)
logger.info("EVALUATION FINISHED FOR USER JOB {} IN {} ({} CPU TIME)".format(
str(user_job_id), str(t_dur), str(t_dur_cpu)))
return
def evaluate(job_dir: str, classifiers: list, eval_job_id: ObjectId, user_job_id: ObjectId, job_mode_enum: Enum) -> (
datetime or None, datetime or None):
t_start = datetime.now()
t_start_cpu = datetime.fromtimestamp(time.process_time())
eval_dir = os.path.join(job_dir, "eval")
if not os.path.exists(eval_dir):
os.mkdir(eval_dir)
# -------------------- Parse reports --------------------
parsed_path = parse_reports(classifiers=classifiers, job_dir=job_dir, eval_job_id=eval_job_id)
# -------------------- Compare --------------------
# Construct command
if job_mode_enum == JobMode.SIMULATED_READS:
# Truth file path (-b): data/jobs/<user_job_id>/*.tsv
# Assume first file in glob is the baseline abundance tsv file
truth_file = glob.glob(os.path.join(job_dir, os.pardir, "*.tsv"))[0]
if not os.path.exists(truth_file):
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id,
message="NO ABUNDANCE PROFILE TSV {}!".format(truth_file))
return
metacompare_bin = os.path.join(metrics_root_dir, "compare", "metacompare.sh")
if not os.path.exists(metacompare_bin):
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id,
message="NO COMPARE SCRIPT {}!".format(metacompare_bin))
return
# Command: metacompare.sh -b {} -i {} -o {} -t {}
# Parsed path (-i): data/jobs/<user_job_id>/<read_type>/results
# Output dir (-o): data/jobs/<user_job_id>/<read_type>/eval
cmd = "{} -b {} -i {} -o {} -t {}".format(metacompare_bin, truth_file, parsed_path, eval_dir,
config.NUM_EVAL_THREADS)
elif job_mode_enum == JobMode.REAL_READS:
metacompare_bin = os.path.join(metrics_root_dir, "compare", "metacompare_realdata.sh")
if not os.path.exists(metacompare_bin):
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id,
message="NO COMPARE SCRIPT {}!".format(metacompare_bin))
return None, None
# Command: metacompare_realdata.sh -i {} -o {} -t {}
# Parsed path (-i): data/jobs/<user_job_id>/<read_type>/results
# Output dir (-o): data/jobs/<user_job_id>/eval
cmd = "{} -i {} -o {} -t {}".format(metacompare_bin, parsed_path, eval_dir,
config.NUM_EVAL_THREADS)
else:
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id, message="Invalid Job Mode!")
return None, None
split_cmd = shlex.split(cmd)
subprocess.run(split_cmd) # Make sure it is chmod +x
logger.info("RUNNING METACOMPARE.SH")
t_end_cpu = datetime.fromtimestamp(time.process_time())
t_dur_cpu = t_end_cpu - t_start_cpu
t_end = datetime.now()
t_dur = t_end - t_start
# Update performance metrics for job.
evaluation_job.update_cpu_time(obj_id=eval_job_id, time=t_dur_cpu.total_seconds())
evaluation_job.update_wall_clock_time(obj_id=eval_job_id, time=t_dur.total_seconds())
evaluation_job.update_status(obj_id=eval_job_id, new_status=str(JobStatus.COMPLETED))
user_job.update_completion_time(obj_id=user_job_id, time=t_end)
return t_dur, t_dur_cpu
def parse_reports(classifiers: list, job_dir: str, eval_job_id: ObjectId) -> str:
# Parsed paths (-o): <eval_job_dir>/results
parsed_path = os.path.join(job_dir, "results")
if not os.path.exists(parsed_path):
os.mkdir(parsed_path)
logger.info("Attempting to download taxdump")
taxdump_bin = os.path.join(metrics_root_dir, "parsers", "download_taxdump.sh")
try:
subprocess.run(taxdump_bin, capture_output=False, check=True)
except (subprocess.TimeoutExpired, subprocess.CalledProcessError) as e:
logger.error("Failed to download taxdump", exc_info=e)
return parsed_path
for c in classifiers:
# Report paths (-i): <eval_job_dir>/<classifier>/<classifier>.report
report_path = os.path.join(job_dir, c, "{}.report".format(c))
if not os.path.exists(report_path):
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id,
message="NO REPORT FILE {}!".format(report_path))
continue
parser_bin = os.path.join(metrics_root_dir, "parsers", "parse_{}.sh".format(c))
if not os.path.exists(parser_bin):
handle_fail(job_type=JobType.EVALUATION, job_id=eval_job_id,
message="NO PARSING SCRIPT {}!".format(parser_bin))
continue
# Command: parse_kraken.sh -i {} -o {}
cmd = "{} -i {} -o {}".format(parser_bin, report_path, parsed_path)
split_cmd = shlex.split(cmd)
logger.info("PARSING {} REPORT".format(c))
# find meta_system/system/metrics/evaluation -type f -iname "*.sh" -exec chmod +x {} \;
subprocess.run(split_cmd) # Make sure it is chmod +x
return parsed_path
```
#### File: system/utils/errors.py
```python
class ValidationError(Exception):
pass
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
```
#### File: system/utils/readtypes.py
```python
from collections import namedtuple
import pydash
import yaml
from shared.config import config
from system.utils.security import str_normalize_attr
def get_read_types() -> (list, dict):
"""
Read data read_types from read_types.yaml and create objects into namedtuples
"""
yaml_file = open(config.READTYPES_PATH, "r")
read_type_names, read_types_info = get_read_types_parser(yaml_file)
return read_type_names,read_types_info
def get_read_types_parser(yaml_file):
info = yaml.load(yaml_file, Loader=yaml.FullLoader)
read_types_info = dict()
read_types = namedtuple("read_types", info[next(iter(info))].keys())
read_type_names = list(info.keys())
for name, values in info.items():
values = {k: str_normalize_attr(v) for k, v in values.items()}
v_read_types = read_types(**values)
pydash.set_(read_types_info, name, v_read_types)
return read_type_names, read_types_info
```
#### File: system/controllers/test_simulation_job.py
```python
import unittest
import pydash
import pymongo
from pymodm import connect
from shared.config import config
from system.controllers import simulation_job, user_job, user
from system.models.job_manager import JobMode, JobStatus
class TestSimulationJobController(unittest.TestCase):
def setUp(self) -> None:
self.mongo_uri = config.MONGO_URI
self.my_client = pymongo.MongoClient(self.mongo_uri)
self.db_name = "TestMETA"
self.db = self.my_client[self.db_name]
connect(self.mongo_uri + self.db_name) # Connect to MongoDB
self.user_id = user.insert("<NAME>", "<EMAIL>", [])
self.user_job_id = user_job.insert(user_id=self.user_id, title="my job title", read_types=["Z"],
classifiers=["A", "B"], mode=JobMode.SIMULATED_READS)
self.simulation_job_id = simulation_job.insert(user_job_id=self.user_job_id, read_type="iseq",
abundance_tsv="foo.tsv", number_of_reads=5000)
def test_insert(self):
simulation_job_id = simulation_job.insert(user_job_id=self.user_job_id, read_type="iseq",
abundance_tsv="foo.tsv", number_of_reads=5000)
self.assertIsNotNone(simulation_job_id)
def test_find_by_id(self):
data = simulation_job.find_by_id(sim_job_id=self.simulation_job_id)
self.assertIsNotNone(data)
def test_find_specific_job(self):
data = simulation_job.find_specific_job(user_job_id=self.user_job_id, read_type="foobar")
self.assertIsNone(data)
data = simulation_job.find_specific_job(user_job_id=self.user_job_id, read_type="iseq")
self.assertIsNotNone(data)
def test_update_cpu_time(self):
simulation_job.update_cpu_time(obj_id=self.simulation_job_id, time=0.12789)
data = simulation_job.find_by_id(sim_job_id=self.simulation_job_id)
res = pydash.get(data, "cpu_time")
self.assertEqual(res, 0.12789)
def test_update_wall_clock_time(self):
simulation_job.update_wall_clock_time(obj_id=self.simulation_job_id, time=0.0056)
data = simulation_job.find_by_id(sim_job_id=self.simulation_job_id)
res = pydash.get(data, "wall_clock_time")
self.assertEqual(res, 0.0056)
def test_update_container_id(self):
simulation_job.update_container_id(obj_id=self.simulation_job_id, container_id="8cd216ad9b38")
data = simulation_job.find_by_id(sim_job_id=self.simulation_job_id)
res = pydash.get(data, "container_id")
self.assertEqual(res, "8cd216ad9b38")
def test_update_status(self):
simulation_job.update_status(obj_id=self.simulation_job_id, new_status=str(JobStatus.QUEUED))
data = simulation_job.find_by_id(sim_job_id=self.simulation_job_id)
res = pydash.get(data, "status")
self.assertEqual(res, str(JobStatus.QUEUED))
def tearDown(self) -> None:
col_list = self.db.list_collection_names()
for col in col_list:
self.db.drop_collection(col)
``` |
{
"source": "JHUAPL/scatterbrained",
"score": 3
} |
#### File: scatterbrained/examples/discovery_engine.py
```python
import asyncio
import dataclasses
import json
from loguru import logger
import scatterbrained as sb
async def on_appear(v):
await asyncio.sleep(0.1)
logger.info(f"Appear: {v}")
async def on_disappear(v):
await asyncio.sleep(0.1)
logger.info(f"Disappear: {v}")
async def on_error(e):
await asyncio.sleep(0.1)
logger.opt(exception=e).error("local error")
async def on_remote_recv(v):
logger.info(f"Remote: {v}")
async def on_remote_error(e):
logger.opt(exception=e).error("remote error")
async def main():
# NOTE: in a real deployment you'd want everything to use the same port, but because we're running on the
# same system here, we need to bind to different ports.
local_pub = sb.discovery.udp.UDPBroadcaster("127.0.0.1", port=9002)
local_sub = sb.discovery.udp.UDPReceiver("127.0.0.1", port=9001)
# Fake a remote node.
remote_pub = sb.discovery.udp.UDPBroadcaster("127.0.0.1", port=9001)
remote_sub = sb.discovery.udp.UDPReceiver("127.0.0.1", port=9002)
await asyncio.wait([local_pub.open(), local_sub.open(), remote_pub.open(), remote_sub.open()])
engine = sb.discovery.DiscoveryEngine(
local_pub,
local_sub,
identities=[sb.types.Identity(id="baz", namespace="bar", host="omg", port=3223)],
heartbeat=2,
)
await engine.start(on_appear=on_appear, on_disappear=on_disappear, on_error=on_error)
peer = sb.types.Identity(id="foo", namespace="bar", host="meme", port=32233)
remote_sub.subscribe(on_recv=on_remote_recv, on_error=on_error)
await remote_pub.publish(json.dumps(dataclasses.asdict(peer)).encode())
await asyncio.sleep(15)
await engine.stop()
await asyncio.wait([local_pub.close(), local_sub.close(), remote_pub.close(), remote_sub.close()])
if __name__ == "__main__":
asyncio.run(main())
```
#### File: scatterbrained/discovery/types.py
```python
from typing import Awaitable, Callable, Optional, Protocol
class Publisher(Protocol):
"""
A Publisher is a class that can publish a message to a set of peers.
Note that this is a protocol and shouldn't be used directly!
"""
async def publish(self, data: bytes) -> None:
"""
Publish the given payload to a set of peers.
"""
...
async def open(self) -> None:
"""
Open the underlying connection mechanism, enabling this instance to send messages.
"""
...
async def close(self) -> None:
"""
Close the underlying connection mechanism, stopping this instance from sending messages.
"""
...
class Subscriber(Protocol):
"""
A Subscriber is a class that can subscribe to messages from a set of peers.
Note that this is a protocol and shouldn't be used directly!
"""
def subscribe(
self,
on_recv: Callable[[bytes], Awaitable[None]],
on_error: Optional[Callable[[Exception], Awaitable[None]]] = None,
) -> None:
"""
Subscribe to messages from a set of peers, and attach async callbacks.
Arguments:
on_recv (Callable[[bytes], Awaitable[None]]): The callback to call when a message is received.
on_error (Optional[Callable[[Exception], Awaitable[None]]]): The callback to run when an error occurs.
Returns:
None
"""
...
async def open(self) -> None:
"""
Open the underlying connection mechanism, enabling this instance to receive messages.
"""
...
async def close(self) -> None:
"""
Close the underlying connection mechanism, stopping this instance from receiving messages.
"""
...
__all__ = ["Publisher", "Subscriber"]
``` |
{
"source": "JHUAPL/SIMoN",
"score": 3
} |
#### File: SIMoN/graphs/build.py
```python
from datetime import datetime
start = datetime.now()
print(start)
from geopandas import read_file, sjoin
import networkx as nx
from networkx.readwrite import json_graph
import geojson
import itertools
from collections import defaultdict
import uuid
import os
import json
with open(os.path.join(os.path.dirname(__file__), "config.json")) as f:
config = json.load(f)
# where to save the graphs
save_dir = os.path.join(os.path.dirname(__file__), "out")
# where the shapefiles are stored
shapefile_dir = os.path.join(os.path.dirname(__file__), "shapefiles")
# the coordinate reference system the shapefiles are defined on
projection = config["projection"]
# scale the areas calculated for shapefile geometries from square kilometers to square meters
scale_factor = config["scale_factor"]
# the minimum area of a meet node (a node formed by the interestion of two disparate geograophic granularities), in square kilometers
minimum_intersection_area = config["minimum_intersection_area"]
# Define the abstract graph with a list of tuples with the form (source, destination), where source is a higher lower resolution granularity that encompasses destination, a higher resolution granularity.
abstract_edges = config["abstract_edges"]
# Save the instance graph with its geometries included. This will create a very large graph.
save_shapes = config["save_shapes"]
# open the shapefiles for each granularity
states = read_file(f"{shapefile_dir}/state.shp").to_crs(epsg=projection)
counties = read_file(f"{shapefile_dir}/county.shp").to_crs(epsg=projection)
nercs = read_file(f"{shapefile_dir}/nerc.shp").to_crs(epsg=projection)
huc8s = read_file(f"{shapefile_dir}/huc8.shp").to_crs(epsg=projection)
latlons = read_file(f"{shapefile_dir}/latlon.shp").to_crs(epsg=projection)
# nerc regions
nercs["country"] = nercs.apply(lambda x: 1, axis=1)
nercs["area"] = nercs.apply(lambda row: row["geometry"].area, axis=1)
# nerc shapefile has the smallest scope, so it is used to define the scope of the USA
country = nercs.dissolve(by="country", aggfunc={"area": "sum"})
country["NAME"] = "usa48"
# counties
counties["county_polygons"] = counties.geometry.copy()
counties.geometry = counties.geometry.representative_point()
counties = sjoin(counties, states, how="inner", op="within")
counties.geometry = counties["county_polygons"]
counties.drop("index_right", axis=1, inplace=True)
counties.rename(columns={"ID_left": "ID"}, inplace=True)
counties.rename(columns={"NAME_left": "NAME"}, inplace=True)
counties.rename(columns={"ID_right": "parent_ID"}, inplace=True)
# latitude-longitude grid squares
latlons["parent_ID"] = "usa48"
huc8s["parent_ID"] = "usa48"
nercs["parent_ID"] = "usa48"
states["parent_ID"] = "usa48"
# each shapefile should have 4 attributes: ID, NAME, parent_ID, geometry
shapefiles = {}
shapefiles["state"] = states
shapefiles["county"] = counties
shapefiles["nerc"] = nercs
shapefiles["huc8"] = huc8s
shapefiles["latlon"] = latlons
for granularity, shapefile in shapefiles.items():
for column in ["ID", "NAME", "geometry", "parent_ID"]:
if column not in shapefile.columns:
print(
f"WARNING: required column {column} not in shapefile {granularity}"
)
# display the graph
def draw_graph(graph, display=False):
if display:
nx.draw(graph, labels={node: node for node in graph.nodes()})
print("{} nodes:".format(len(graph.nodes())))
print("{} edges:".format(len(graph.edges())))
# construct a graph from a list of edges
def build_graph(original_edges, is_abstract=False):
graph = nx.DiGraph()
graph.is_abstract = is_abstract
for edge in original_edges:
if is_abstract:
graph.add_edge(*edge)
else:
graph.add_edge(*edge)
return graph
# returns the name of a wedge node, based on its parents
def meet(a, b):
sort = sorted((a, b))
return "{}^{}".format(sort[0], sort[1])
# add wedge nodes to an abstact graph
def add_abstract_wedges(abstract_graph, original_nodes):
for combo in list(itertools.combinations(original_nodes, 2)):
if not (
nx.has_path(abstract_graph, combo[0], combo[1])
or nx.has_path(abstract_graph, combo[1], combo[0])
):
new_node = meet(combo[0], combo[1])
abstract_graph.add_edge(combo[0], new_node)
abstract_graph.add_edge(combo[1], new_node)
# add wedge nodes to an instance graph
def add_instance_wedges(graph, combos, instance_graph_types):
for combo in combos:
check_intersection = graph.nodes[combo[0]]["shape"].intersects(
graph.nodes[combo[1]]["shape"]
) and not graph.nodes[combo[0]]["shape"].touches(
graph.nodes[combo[1]]["shape"]
)
if not check_intersection:
continue
try:
shape = graph.nodes[combo[1]]["shape"].intersection(
graph.nodes[combo[0]]["shape"]
)
area = shape.area / scale_factor
new_node = meet(combo[0], combo[1])
if area >= minimum_intersection_area:
graph.add_edge(combo[0], new_node)
graph.add_edge(combo[1], new_node)
instance_graph_types[
meet(
graph.nodes[combo[0]]["type"],
graph.nodes[combo[1]]["type"],
)
].append(new_node)
graph.nodes[new_node]["shape"] = shape
graph.nodes[new_node]["area"] = area
graph.nodes[new_node]["type"] = meet(
graph.nodes[combo[0]]["type"],
graph.nodes[combo[1]]["type"],
)
else:
pass
# print(f"{new_node} is too small to be added. area = {area}")
except Exception as e:
print(
"ERROR: could not calculate intersection of {} with {}: {}".format(
combo[0], combo[1], e
)
)
if not graph.nodes[combo[0]]["shape"].is_valid:
print(
f"WARNING: {combo[0]} has invalid geometry, area = {graph.nodes[combo[0]]['shape'].area/scale_factor}"
)
# graph.remove_node(combo[0])
# print(f"removed {combo[0]} from graph")
if not graph.nodes[combo[1]]["shape"].is_valid:
print(
f"WARNING: {combo[1]} has invalid geometry, area = {graph.nodes[combo[1]]['shape'].area/scale_factor}"
)
# graph.remove_node(combo[1])
# print(f"removed {combo[1]} from graph")
return instance_graph_types
# construct an instance graph
def build_instance_graph(root):
instance_graph_types = defaultdict(list)
instance_graph = build_graph([])
instance_graph.add_node(root, name=root, type=root, shape=None, area=None)
abstract_nodes_bfs = [root] + [
v for u, v in nx.bfs_edges(abstract_graph, root)
]
for node in abstract_nodes_bfs:
for child in abstract_graph.successors(node):
shapefile = shapefiles[child]
for index, row in shapefile.iterrows():
ID = str(row["ID"])
shape = row["geometry"]
area = row["geometry"].area / scale_factor
name = row["NAME"]
if not shape.is_valid:
print(
f"WARNING: instance node {name} has invalid geometry. area = {area}"
)
if area < minimum_intersection_area:
print(
f"WARNING: instance node {name} is smaller than minimum intersection area. area = {area}"
)
instance_graph.add_node(
ID, name=name, type=child, shape=shape, area=area
)
instance_graph_types[child].append(ID)
instance_graph.add_edge(str(row["parent_ID"]), ID)
instance_graph.add_node(
root,
name=root,
type=root,
shape=None,
area=float(country.area) / scale_factor,
)
return instance_graph, instance_graph_types
# build the abstract graph
abstract_graph = build_graph(abstract_edges, is_abstract=True)
root = list(nx.topological_sort(abstract_graph))[0]
abstract_nodes = [root] + [v for u, v in nx.bfs_edges(abstract_graph, root)]
# build the instance graph
instance_graph, instance_graph_types = build_instance_graph(root)
# add wedges to the abstract graph
add_abstract_wedges(abstract_graph, abstract_nodes)
# iterate through the abstract graph wedges, in BFS order
abstract_graph_wedges = [
v for u, v in nx.bfs_edges(abstract_graph, root) if v not in abstract_nodes
]
for wedge in abstract_graph_wedges:
l, r = wedge.split("^")
parents = [
parent
for parent in abstract_graph.predecessors(wedge)
if parent in abstract_graph_wedges
]
if parents:
parent = sorted(
parents, key=lambda node: len(instance_graph_types[node])
)[0]
ll, rr = parent.split("^")
combos = []
for instance in instance_graph_types[parent]:
instance_l, instance_r = instance.split("^")
if l == instance_graph.nodes[instance_l].get("type"):
for element in instance_graph.successors(instance_r):
if instance_graph.nodes[element].get("type") == r:
combos.append((instance_l, element))
elif r == instance_graph.nodes[instance_r].get("type"):
for element in instance_graph.successors(instance_l):
if instance_graph.nodes[element].get("type") == l:
combos.append((element, instance_r))
elif l == instance_graph.nodes[instance_r].get("type"):
for element in instance_graph.successors(instance_l):
if instance_graph.nodes[element].get("type") == r:
combos.append((element, instance_r))
elif r == instance_graph.nodes[instance_l].get("type"):
for element in instance_graph.successors(instance_r):
if instance_graph.nodes[element].get("type") == l:
combos.append((instance_l, element))
else:
print(f"ERROR: no match for instance {instance}")
else:
combos = list(
itertools.product(
*[instance_graph_types[l], instance_graph_types[r]]
)
)
instance_graph_types = add_instance_wedges(
instance_graph, combos, instance_graph_types
)
# remove nodes without neighbors
no_neighbors = set(
[
node[0]
for node in instance_graph.nodes(data=True)
if node[1]["type"] in abstract_nodes
and not list(instance_graph.neighbors(node[0]))
]
)
if no_neighbors:
print(f"removing non-wedge nodes without children: {no_neighbors}")
for node in no_neighbors:
print(f"removing {node}")
instance_graph.remove_node(node)
# add metadata to the graphs
meets = [
"^".join(sorted(combo))
for combo in list(itertools.combinations(abstract_nodes, 2))
]
granularities = abstract_nodes + meets
counts = {
granularity: len(
[
node[1]["area"]
for node in instance_graph.nodes(data=True)
if node[1]["type"] == granularity
]
)
for granularity in granularities
}
areas = {
granularity: sum(
[
node[1]["area"]
for node in instance_graph.nodes(data=True)
if node[1]["type"] == granularity
]
)
for granularity in granularities
}
metadata = {
"id": str(uuid.uuid4()),
"projection": projection,
"granularities": abstract_nodes,
"minimum_intersect_area": minimum_intersection_area,
"nodes": len(instance_graph.nodes()),
"links": len(instance_graph.edges()),
"counts": counts,
"areas": areas,
}
abstract_graph.graph = metadata
instance_graph.graph = metadata
print(metadata)
# save the instance graph with its geometries (very large)
if save_shapes:
with open(
"{}/instance-graph_{}_{}_{}_{}_shapes.geojson".format(
save_dir,
"-".join(abstract_nodes),
projection,
minimum_intersection_area,
config["tag"],
),
mode="w",
) as outfile:
geojson.dump(json_graph.node_link_data(instance_graph), outfile)
# remove geometries from the instance graph (much smaller)
instance_graph_noshapes = json_graph.node_link_data(instance_graph)
for node in instance_graph_noshapes["nodes"]:
if "shape" in node:
del node["shape"]
# save graphs to JSON files
with open(
"{}/abstract-graph_{}_{}_{}_{}.geojson".format(
save_dir,
"-".join(abstract_nodes),
projection,
minimum_intersection_area,
config["tag"],
),
mode="w",
) as outfile:
geojson.dump(json_graph.node_link_data(abstract_graph), outfile)
with open(
"{}/instance-graph_{}_{}_{}_{}.geojson".format(
save_dir,
"-".join(abstract_nodes),
projection,
minimum_intersection_area,
config["tag"],
),
mode="w",
) as outfile:
geojson.dump(instance_graph_noshapes, outfile)
print("done building graphs")
end = datetime.now()
print(end)
print(end - start)
```
#### File: population/src/inner_wrapper.py
```python
import glob
import sys
import logging
sys.path.append("/")
from outer_wrapper import OuterWrapper
from PopulationSimulation import pop_sim, get_data
class InnerWrapper(OuterWrapper):
def __init__(self):
num_input_schemas = len(glob.glob("/opt/schemas/input/*.json"))
super().__init__(
model_id="population", num_expected_inputs=num_input_schemas
)
# should match the max_incstep in broker/config.json
self.max_incstep = 50
def configure(self, **kwargs):
if "county_populations" in kwargs.keys():
self.data = pop_sim(kwargs["county_populations"], self.max_incstep)
else:
logging.warning(
f"incstep {self.incstep}: county_populations not found"
)
def increment(self, **kwargs):
data = get_data(self.data, self.initial_year + self.incstep)
results = {
"population": {
"population": {"data": data, "granularity": "county"}
}
}
return results
def main():
wrapper = InnerWrapper()
wrapper.run()
if __name__ == "__main__":
main()
```
#### File: water_demand/src/inner_wrapper.py
```python
import glob
import sys
import logging
sys.path.append("/")
from outer_wrapper import OuterWrapper
from Water_Demand_Model import Water_Demand_Simulation
class InnerWrapper(OuterWrapper):
def __init__(self):
num_input_schemas = len(glob.glob("/opt/schemas/input/*.json"))
super().__init__(
model_id="water_demand", num_expected_inputs=num_input_schemas
)
def configure(self, **kwargs):
if "rates" in kwargs.keys():
self.rate = kwargs["rates"]
else:
logging.warning(f"incstep {self.incstep}: rates not found")
if "2016_thermo_water" in kwargs.keys():
self.thermo_water = kwargs["2016_thermo_water"]
else:
logging.warning(f"incstep {self.incstep}: thermo_water not found")
self.thermo_water = {}
if "2016_populations" in kwargs.keys():
self.countypop = kwargs["2016_populations"]
else:
logging.warning(
f"incstep {self.incstep}: 2016_populations not found"
)
def increment(self, **kwargs):
if "population" in kwargs.keys():
self.countypop = kwargs["population"]["population"]["data"]
elif self.incstep > 1:
logging.warning(f"incstep {self.incstep}: population not found")
if "power_supply" in kwargs.keys():
self.thermo_water = kwargs["power_supply"]["thermo_water"]["data"]
elif self.incstep > 1:
logging.warning(f"incstep {self.incstep}: thermo_water not found")
demand = Water_Demand_Simulation(
self.countypop, self.rate, self.thermo_water
)
results = {
"water_demand": {
"water_demand": {"data": demand, "granularity": "county"}
}
}
return results
def main():
wrapper = InnerWrapper()
wrapper.run()
if __name__ == "__main__":
main()
```
#### File: SIMoN/viz/plot.py
```python
import click
import sys
import os
import numpy as np
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
from geopandas import read_file
import pandas as pd
import json
from bokeh.io import show
from bokeh.models import LogColorMapper
from bokeh.palettes import Blues256 as palette
palette.reverse()
from bokeh.plotting import figure, output_file, save
# geopandas functions for getting coordinates
# references:
# https://automating-gis-processes.github.io/2016/Lesson5-interactive-map-bokeh.html
# https://discourse.bokeh.org/t/mapping-europe-with-bokeh-using-geopandas-and-handling-multipolygons/2571
def get_xy_coords(geometry, coord_type):
"""
Returns either x or y coordinates from geometry coordinate sequence. Used with Polygon geometries.
"""
if coord_type == "x":
return list(geometry.coords.xy[0])
elif coord_type == "y":
return list(geometry.coords.xy[1])
def get_poly_coords(geometry, coord_type):
"""
Returns Coordinates of Polygon using the Exterior of the Polygon
"""
return get_xy_coords(geometry.exterior, coord_type)
def multi_geom_handler(multi_geometry, coord_type):
"""
Function for handling MultiPolygon geometries.
Returns a list of coordinates where all parts of Multi-geometries are merged into a single list.
Individual geometries are separated with np.nan which is how Bokeh wants them.
Bokeh documentation regarding the Multi-geometry issues can be found here (it is an open issue).
https://github.com/bokeh/bokeh/issues/2321
"""
all_poly_coords = [
np.append(get_poly_coords(part, coord_type), np.nan)
for part in multi_geometry
]
coord_arrays = np.concatenate(all_poly_coords)
return coord_arrays
def get_coords(row, coord_type):
"""
Returns the coordinates ('x' or 'y') of edges of a Polygon exterior
"""
poly_type = type(row["geometry"])
# get coords from a single polygon
if poly_type == Polygon:
return get_poly_coords(row["geometry"], coord_type)
# get coords from multiple polygons
elif poly_type == MultiPolygon:
return multi_geom_handler(row["geometry"], coord_type)
# plot data on the shapefile
# references:
# https://docs.bokeh.org/en/latest/docs/gallery/texas.html
def plot_mongo_doc(
data,
shapefile_dir=".",
projection=4326,
plot_width=1200,
plot_height=800,
show_fig=False,
save_fig=True,
):
df = {}
geographies = {}
datasets = data["payload"].keys()
for dataset in datasets:
granularity = data["payload"][dataset]["granularity"]
if not granularity:
print(
f"skipping {dataset} (does not have a granularity specified)"
)
continue
else:
print(f"plotting {dataset} (granularity: {granularity})")
instance_col_name = "ID"
year = data["year"]
unit = data["payload"][dataset]["unit"]
df[dataset] = pd.DataFrame.from_dict(
data["payload"][dataset]["data"],
orient="index",
columns=[f"{dataset}_value"],
)
df[dataset][instance_col_name] = df[dataset].index
shapefile_path = f"{shapefile_dir}/{granularity}.shp"
if os.path.exists(shapefile_path):
geographies[dataset] = read_file(shapefile_path).to_crs(
epsg=projection
)
else:
print(f"{shapefile_path} not found, skipping")
continue
geographies[dataset] = geographies[dataset].merge(
df[dataset], on=instance_col_name
)
# reset the color palette
color_mapper = LogColorMapper(palette=palette)
geographies[dataset]["x"] = geographies[dataset].apply(
get_coords, coord_type="x", axis=1
)
geographies[dataset]["y"] = geographies[dataset].apply(
get_coords, coord_type="y", axis=1
)
plot_data = dict(
x=geographies[dataset]["x"].tolist(),
y=geographies[dataset]["y"].tolist(),
name=geographies[dataset]["NAME"].tolist(),
identifier=geographies[dataset]["ID"].tolist(),
value=geographies[dataset][f"{dataset}_value"].tolist(),
)
TOOLS = "pan,wheel_zoom,reset,hover,save,box_zoom"
coords_tuple = (
("(Lat, Lon)", "($y, $x)")
if projection == 4326
else ("(x, y)", "($x, $y)")
)
title = f"{dataset} ({unit}, {year})" if unit else f"{dataset} ({year})"
fig = figure(
title=title,
tools=TOOLS,
plot_width=plot_width,
plot_height=plot_height,
x_axis_location=None,
y_axis_location=None,
tooltips=[
("Name", "@name"),
("ID", "@identifier"),
("Value", "@value{(0.000 a)}"),
coords_tuple,
],
)
fig.grid.grid_line_color = None
fig.hover.point_policy = "follow_mouse"
fig.patches(
"x",
"y",
source=plot_data,
fill_color={"field": "value", "transform": color_mapper},
fill_alpha=0.7,
line_color="white",
line_width=0.5,
)
if save_fig:
output_file(f"{year}_{dataset}.html")
save(fig)
if show_fig:
show(fig)
@click.command()
@click.option(
"--data",
type=click.Path(),
required=True,
help="path to the JSON file created by export.sh",
)
@click.option(
"--shapefile_dir",
type=click.Path(),
default=os.path.join(
os.path.dirname(__file__), os.pardir, "graphs", "shapefiles"
),
help="path to the directory of shapefiles",
)
@click.option(
"--projection",
default=4326,
help="coordinate reference system to use for plotting",
)
@click.option("--width", default=1200, help="pixel width of the plot")
@click.option("--height", default=800, help="pixel height of the plot")
@click.option(
"--show", type=click.BOOL, default=False, help="display the plot"
)
@click.option(
"--save", type=click.BOOL, default=True, help="write the plot to a file"
)
def main(data, shapefile_dir, projection, width, height, show, save):
# load the data
with open(data) as f:
data_dict = json.load(f)
# plot the data
plot_mongo_doc(
data_dict,
shapefile_dir=shapefile_dir,
projection=projection,
plot_width=width,
plot_height=height,
show_fig=show,
save_fig=save,
)
if __name__ == "__main__":
main()
``` |
{
"source": "jhu-arch/jupyterlab-slurm",
"score": 2
} |
#### File: jupyterlab-slurm/jupyterlab_slurm/handlers.py
```python
import asyncio
import html
import json
import logging
import os
import re
import shlex
import tempfile
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
import tornado.web
logger = logging.Logger(__file__)
jobIDMatcher = re.compile("^[0-9]+$")
class MissingSlurmJobID(Exception):
def __init__(self, message):
self.message = message
class InvalidSlurmJobID(Exception):
def __init__(self, jobid, message):
self.jobid = jobid
self.message = message
class MissingBatchScript(Exception):
def __init__(self, message):
self.message = message
class InvalidCommand(Exception):
def __init__(self, command, message):
self.command = command
self.message = message
# Here mainly as a sanity check that the extension is installed and running
class ExampleHandler(APIHandler):
def initialize(self, log=logger):
super().initialize()
self._serverlog = log
self._serverlog.info("ExampleHandler.initialize()")
@tornado.web.authenticated
def get(self):
try:
self._serverlog.info("ExampleHandler.get()")
self.finish(json.dumps({
"data": "This is the /jupyterlab_slurm/get_example endpoint!"
}))
except Exception as e:
self.finish(json.dumps({
"message": "ExampleHandler error", "exception": str(e)
}))
# A simple request handler for retrieving the username
class UserFetchHandler(APIHandler):
def initialize(self, log=logger):
super().initialize()
self._serverlog = log
self._serverlog.info("UserFetchHandler.initialize()")
@tornado.web.authenticated
def get(self):
try:
username = os.environ.get('USER')
self._serverlog.info("UserFetchHandler.get() {}".format(username))
self.finish(json.dumps({
"user": username
}))
except Exception as e:
self._serverlog.exception(e)
self.finish(json.dumps(e))
# common utility methods for running slurm commands, and defaults to the run_command() for scancel and scontrol
# sbatch and squeue need special handling of the command and override run_command()
class SlurmCommandHandler(APIHandler):
def initialize(self, command: str = None, log=logger):
super().initialize()
self._slurm_command = command
self._serverlog = log
self._serverlog.info("SlurmCommandHandler.initialize(): {} {}".format(self._slurm_command, self._serverlog))
def get_jobid(self):
if self.request.headers['Content-Type'] == 'application/json':
body = json.loads(self.request.body)
if "jobID" not in body:
raise MissingSlurmJobID("")
jobID = body["jobID"]
else:
jobID = self.get_body_arguments('jobID')[0]
if not jobIDMatcher.search(jobID):
raise InvalidSlurmJobID(jobID, "jobID {} is invalid".format(jobID))
return jobID
async def _run_command(self, command: str = None, stdin=None, cwd=None):
self._serverlog.info('SlurmCommandHandler._run_command(): {} {} {}'.format(command, stdin, cwd))
commands = shlex.split(command)
self._serverlog.info('SlurmCommandHandler._run_command(): {}'.format(commands))
process = await asyncio.create_subprocess_exec(*commands,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=stdin,
cwd=cwd)
stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=60.0)
# decode stdout and from bytes to str, and return stdout, stderr, and returncode
return {
"stdout": stdout.decode().strip(),
"stderr": stderr.decode().strip(),
"returncode": process.returncode
}
async def run_command(self, args: list = None):
responseMessage = ""
errorMessage = "{} did not run!".format(self._slurm_command)
returncode = -1
try:
jobID = self.get_jobid()
if args is None:
args = []
out = await self._run_command("{} {} {}".format(self._slurm_command, " ".join(args), jobID))
returncode = out["returncode"]
cmd_stdout = ""
if "stdout" in out and len(out["stdout"].strip()) > 0:
cmd_stdout = out["stdout"]
cmd_stderr = ""
if "stderr" in out and len(out["stderr"].strip()) > 0:
cmd_stderr = out["stderr"]
if returncode != 0:
responseMessage = "Failure: {} {} {}".format(self._slurm_command, jobID, cmd_stdout)
errorMessage = cmd_stderr
else:
responseMessage = "Success: {} {}".format(self._slurm_command, jobID)
errorMessage = ""
except KeyError as ke:
self._serverlog.exception(ke)
try:
jobID is not None
except NameError:
jobID = "No jobID parsed"
responseMessage = "Failure: {} {}".format(self._slurm_command, jobID)
errorMessage = "Missing key before running command: {}".format(str(ke))
returncode = -1
except MissingSlurmJobID as emj:
self._serverlog.exception(emj)
responseMessage = "Failure: {} missing jobID".format(self._slurm_command)
errorMessage = emj.message
returncode = -1
except InvalidSlurmJobID as eij:
self._serverlog.exception(eij)
responseMessage = "Failure: {} invalid jobID {}".format(self._slurm_command, eij.jobid)
errorMessage = eij.message
returncode = -1
except Exception as e:
self._serverlog.exception(e)
try:
jobID is not None
except NameError:
jobID = "No jobID parsed"
responseMessage = "Failure: {} {}".format(self._slurm_command, jobID)
errorMessage = "Unhandled Exception: {}".format(str(e))
returncode = -1
finally:
return {
"responseMessage": responseMessage,
"returncode": returncode,
"errorMessage": errorMessage
}
# Conventions: Query arguments: always settings for how to use or options provided by a SLURM command. Body
# arguments: always job designators, e.g. job ID, paths to SLURM scripts, input streams of SLURM script contents,
# etc. Path arguments: always commands (including commands sent to `scontrol`, e.g. `scontrol hold`/`scontrol resume`)
# Unsurprisingly, the job ID's are always (for scancel and scontrol) the body argument named 'jobID'
# Since this is idempotent, hypothetically one could also use PUT instead of DELETE here.
class ScancelHandler(SlurmCommandHandler):
def initialize(self, scancel: str = "scancel", log=logger):
super().initialize(scancel, log)
self._serverlog.info("ScancelHandler.initialize(): {} {}".format(self._slurm_command, self._serverlog))
# Add `-H "Authorization: token <token>"` to the curl command for any DELETE request
@tornado.web.authenticated
async def delete(self):
self._serverlog.info('ScancelHandler.delete() - request: {}, command: {}'.format(
self.request, self._slurm_command))
results = {
"responseMessage": "{} has not run yet!".format(self._slurm_command),
"errorMessage": "",
"returncode": -1
}
try:
results = await self.run_command()
except Exception as e:
results = {
"responseMessage": "Failure {}".format(self._slurm_command),
"errorMessage": str(e),
"returncode": -1
}
finally:
await self.finish(json.dumps(results))
# scontrol isn't idempotent, so PUT isn't appropriate, and in general scontrol only modifies a subset of properties,
# so POST also is not ideal
class ScontrolHandler(SlurmCommandHandler):
def initialize(self, scontrol: str = "scontrol", log=logger):
super().initialize(scontrol, log)
self._serverlog.info("ScontrolHandler.initialize()")
# Add `-H "Authorization: token <token>"` to the curl command for any PATCH request
@tornado.web.authenticated
async def patch(self, action):
self._serverlog.info("ScontrolHandler.patch(): {} {}".format(self._slurm_command, action))
results = {
"responseMessage": "{} has not run yet!".format(self._slurm_command),
"errorMessage": "",
"returncode": -1
}
try:
results = await self.run_command([action])
except Exception as e:
results = {
"responseMessage": "Failure {} {}".format(self._slurm_command, action),
"errorMessage": str(e),
"returncode": -1
}
finally:
await self.finish(json.dumps(results))
# sbatch clearly isn't idempotent, and resource ID (i.e. job ID) isn't known when running it, so only POST works for
# the C in CRUD here, not PUT
class SbatchHandler(SlurmCommandHandler):
def initialize(self, sbatch: str = "sbatch", temporary_directory: str = None, log=logger):
super().initialize(sbatch, log)
self.temp_dir = temporary_directory
self._serverlog.debug("SbatchHandler.initialize()")
def get_batch_script(self):
script_data = None
try:
if self.request.headers['Content-Type'] == 'application/json':
body = json.loads(self.request.body)
if "input" in body:
script_data = json.loads(self.request.body)["input"]
else:
raise MissingBatchScript("'input' argument was not found for a batch script!")
else:
script_data = self.get_body_argument('input')
finally:
return script_data
async def run_command(self, script_data: str = None, inputType: str = None, outputDir: str = None):
responseMessage = ""
errorMessage = "{} has not run yet!".format(self._slurm_command)
returncode = -1
try:
if inputType == 'path':
try:
self._serverlog.info("SbatchHandler.post() - sbatch call - {} {} {}".format(
self._slurm_command, script_data, outputDir))
out = await self._run_command("{} {}".format(
self._slurm_command, script_data), cwd=outputDir)
out["errorMessage"] = ""
except Exception as e:
out = {
"stdout": "",
"stderr": "Attempted to run: " +
"command - {}, path - {}, dir - {}. Check console for more details.".format(
self._slurm_command,
script_data,
outputDir
),
"returncode": 1,
"errorMessage": str(e)
}
self._serverlog.error("Error running sbatch: {}".format(out["stderr"]))
self._serverlog.exception(e)
elif inputType == 'contents':
self._serverlog.info("Writing script data to temp file for sbatch: {}".format(script_data))
with tempfile.TemporaryFile(mode='w+b', dir=self.temp_dir) as temp:
buffer = str.encode(script_data)
temp.write(buffer)
temp.flush()
temp.seek(0)
try:
self._serverlog.info("sbatch call - {} {} {}".format(
self._slurm_command, buffer, outputDir))
out = await self._run_command(self._slurm_command, stdin=temp.fileno(), cwd=outputDir)
out["errorMessage"] = ""
except Exception as e:
out = {
"stdout": "",
"stderr": "Attempted to run: " +
"command - {}, script - {}, dir - {}. Check console for more details.".format(
self._slurm_command,
script_data,
outputDir
),
"returncode": 1,
"errorMessage": str(e)
}
self._serverlog.error("Error running sbatch: {}".format(out["stderr"]))
self._serverlog.exception(e)
else:
raise Exception(
'The query argument inputType needs to be either \'path\' or \'contents\', received {}.'.format(
inputType))
returncode = out["returncode"]
cmd_stdout = ""
if "stdout" in out and len(out["stdout"].strip()) > 0:
cmd_stdout = out["stdout"]
cmd_stderr = ""
if "stderr" in out and len(out["stderr"].strip()) > 0:
cmd_stderr = out["stderr"]
if returncode != 0:
responseMessage = "Failure: {} {}".format(self._slurm_command, cmd_stdout)
errorMessage = cmd_stderr
else:
responseMessage = "Success: {}".format(self._slurm_command)
errorMessage = ""
except KeyError as ke:
self._serverlog.exception(ke)
responseMessage = "Failure: {}".format(self._slurm_command)
errorMessage = "Missing key before running command: {}".format(str(ke))
returncode = -1
except Exception as e:
self._serverlog.exception(e)
responseMessage = "Failure: {}".format(self._slurm_command)
errorMessage = "Unhandled Exception: {}".format(str(e))
returncode = -1
finally:
return {
"responseMessage": responseMessage,
"returncode": returncode,
"errorMessage": errorMessage
}
# Add `-H "Authorization: token <token>"` to the curl command for any POST request
@tornado.web.authenticated
async def post(self):
self._serverlog.debug('SbatchHandler.post()')
inputType = self.get_query_argument('inputType')
outputDir = self.get_query_argument('outputDir', default='')
self._serverlog.info('SbatchHandler.post() - sbatch request: {} {}, inputType: {}, outputDir: {}'.format(
self.request, self.request.body, inputType, outputDir))
responseMessage = "{} has not run yet!".format(self._slurm_command)
errorMessage = ""
returncode = -1
try:
out = {}
# Have two options to specify SLURM script in the request body: either with a path to the script, or with the
# script's text contents
script_data = self.get_batch_script()
if inputType:
out = await self.run_command(script_data, inputType, outputDir)
else:
raise tornado.web.MissingArgumentError('inputType')
self._serverlog.info("out: {}".format(out))
responseMessage = out["responseMessage"]
errorMessage = out["errorMessage"]
returncode = out["returncode"]
except Exception as e:
self._serverlog.exception(e)
responseMessage = "Failure: {}".format(self._slurm_command)
errorMessage = "Unhandled Exception: {}".format(str(e))
returncode = -1
finally:
await self.finish(json.dumps({
"responseMessage": responseMessage,
"errorMessage": errorMessage,
"returncode": returncode
}))
# all squeue does is request information from SLURM scheduler, which is idempotent (for the "server-side"),
# so clearly GET request is appropriate here
class SqueueHandler(SlurmCommandHandler):
def initialize(self, squeue: str = None, log=logger):
super().initialize(squeue, log)
self._serverlog.debug("SqueueHandler.initialize()")
# squeue -h automatically removes the header row -o <format string> ensures that the output is in a
# format expected by the extension Hard-coding this is not great -- ideally we would allow the user to
# customize this, or have the default output be the user's output stdout, stderr, _ = await
# run_command('squeue -o "%.18i %.9P %.8j %.8u %.2t %.10M %.6D %R" -h')
self.output_formatting = '-o "%.18i %.9P %.8j %.8u %.2t %.10M %.6D %R" -h'
def get_command(self):
userOnly = self.get_query_argument('userOnly')
if userOnly == 'true':
exec_command = "{} -u {} {}".format(self._slurm_command, os.environ["USER"], self.output_formatting)
else:
exec_command = "{} {}".format(self._slurm_command, self.output_formatting)
return exec_command
async def run_command(self, args: list = None):
responseMessage = ""
errorMessage = "{} did not run!".format(self._slurm_command)
returncode = -1
data_list = []
try:
exec_command = self.get_command()
self._serverlog.info("SqueueHandler.run_command(): {}".format(exec_command))
out = await self._run_command(exec_command)
# self._serverlog.info("SqueueHandler response: {}".format(out))
returncode = out["returncode"]
cmd_stdout = ""
if "stdout" in out and len(out["stdout"].strip()) > 0:
cmd_stdout = out["stdout"]
cmd_stderr = ""
if "stderr" in out and len(out["stderr"].strip()) > 0:
cmd_stderr = out["stderr"]
if returncode != 0:
responseMessage = "Failure: {} {}".format(exec_command, cmd_stdout)
errorMessage = cmd_stderr
else:
responseMessage = "Success: {}".format(exec_command)
errorMessage = ""
data = out["stdout"].splitlines()
# self._serverlog.info("SqueueHandler stdout: {}".format(data))
for row in data:
# maxsplit=7 so we can still display squeue entries with final columns with spaces like the
# following: (burst_buffer/cray: dws_data_in: DataWarp REST API error: offline namespaces: [34831] -
# ask a system administrator to consult the dwmd log for more information
if len(row.split(maxsplit=7)) == 8:
# html.escape because some job ID's might have '<'s and similar characters in them.
# Also, hypothetically we could be Bobbytable'd without html.escape here,
# e.g. if someone had as a jobname '<script>virus.js</script>'.
data_list += [[(html.escape(entry)).strip()
for entry in row.split(maxsplit=7)]]
else:
continue
except KeyError as ke:
self._serverlog.exception(ke)
responseMessage = "Failure: {}".format(self._slurm_command)
errorMessage = "Missing key before running command: {}".format(str(ke))
returncode = -1
data_list = []
except Exception as e:
self._serverlog.exception(e)
responseMessage = "Failure: {}".format(self._slurm_command)
errorMessage = "Unhandled Exception: {}".format(str(e))
returncode = -1
data_list = []
finally:
return {
"data": data_list[:],
"squeue": {
"responseMessage": responseMessage,
"returncode": returncode,
"errorMessage": errorMessage
}
}
# we want to limit the rate at which this is called for a user
@tornado.web.authenticated
async def get(self):
self._serverlog.info("SqueueHandler.get() {}".format(self._slurm_command))
out = {
"returncode": -1,
"stderr": "Command did not run!",
"stdout": ""
}
data_dict = {"data": []}
try:
out = await self.run_command()
# self._serverlog.info("SqueueHandler response: {}".format(out))
data = out["data"]
# self._serverlog.info("SqueueHandler stdout: {}".format(data))
data_dict = {
"data": data,
"squeue": out
}
except Exception as e:
self._serverlog.exception("Unhandled Exception: {}".format(e))
data_dict = {"data": [], "squeue": out}
finally:
# finish(chunk) writes chunk to the output
# buffer and ends the HTTP request
await self.finish(json.dumps(data_dict))
def setup_handlers(web_app, temporary_directory=None, log=None):
if log:
log.debug(web_app.settings)
host_pattern = ".*$"
spath = os.path.normpath(
web_app.settings['spath']) + "/" if 'spath' in web_app.settings else ''
def obtain_path(method):
return web_app.settings[method + '_path'] if method + '_path' in web_app.settings else spath + method
squeue_path = obtain_path("squeue")
scancel_path = obtain_path("scancel")
scontrol_path = obtain_path("scontrol")
sbatch_path = obtain_path("sbatch")
base_url = web_app.settings['base_url']
handlers = [
(url_path_join(base_url, "jupyterlab_slurm", "get_example"), ExampleHandler, dict(log=log)),
(url_path_join(base_url, "jupyterlab_slurm", "user"), UserFetchHandler, dict(log=log)),
(url_path_join(base_url, 'jupyterlab_slurm', 'squeue'), SqueueHandler, dict(squeue=squeue_path, log=log)),
(url_path_join(base_url, 'jupyterlab_slurm', 'scancel'), ScancelHandler, dict(scancel=scancel_path, log=log)),
(url_path_join(base_url, 'jupyterlab_slurm', 'scontrol', '(?P<action>.*)'), ScontrolHandler,
dict(scontrol=scontrol_path, log=log)),
(url_path_join(base_url, 'jupyterlab_slurm', 'sbatch'), SbatchHandler,
dict(sbatch=sbatch_path, temporary_directory=temporary_directory, log=log))
]
if log:
log.info("Slurm command paths: \nsqueue: {}\nscancel: {}\nscontrol: {}\nsbatch: {}\n".format(
squeue_path, scancel_path, scontrol_path, sbatch_path
))
log.info("Starting up handlers....\n")
for h in handlers:
log.info("Handler: {}\tURI: {}\tdict: {}\n".format(
h[1].__name__, h[0], h[2]))
web_app.add_handlers(host_pattern, handlers)
``` |
{
"source": "jhuarte/CarND-Behavioral-Cloning-P4",
"score": 3
} |
#### File: jhuarte/CarND-Behavioral-Cloning-P4/model.py
```python
import os
import cv2
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
keras = tf.keras
# Camera position and keys for the DataFrame (columns) and keys
CENTER = 'center'
LEFT = 'left'
RIGHT = 'right'
IMAGE = 'image'
POSITIONS = [CENTER, LEFT, RIGHT]
# Correction used to train the model with lateral cameras as if they were on the center
STEERING_CORRECTION = 0.2
IMG_HEIGHT_SIZE = 160
IMG_WIDTH_SIZE = 320
IMG_DEPTH = 3
################################################################################################################
# Transform the DataFrame from 3 columns for the images (center, left, right) to only one (for simplicity as
# and modify the 'steering' value for the lateral cameras (trigronometry)
################################################################################################################
def lateral_steering_transform(steering, camera_position):
"""
Transform the steering of the lateral cameras (left, right)
Parameters:
steering (numpy.float): Original steering
Returns:
out (numpy.float): New steering
"""
if (camera_position == LEFT):
steering += STEERING_CORRECTION
if (camera_position == RIGHT):
steering -= STEERING_CORRECTION
return steering
def row_update(row, camera_position, positions):
"""
Transform the steering of the lateral cameras (left, right)
Parameters:
row (pandas.Sereis): One DataFrame's row
camera (string): Camera position (center, left, right)
Returns:
out (dictionary): Dictionary (keys: image, steering, throttle, brake, speed)
"""
d = {}
if (camera_position in positions):
# Fill the data (first the data on the dataframe)
d.update(image = row[camera_position], steering = lateral_steering_transform(float(row['steering']), camera_position),
throttle = row['throttle'], brake = row['brake'], speed = row['speed'])
else:
print('The current position {%s} is not supported'.format(camera_position))
return d
def plain_dataframe(data):
"""
Iterate over a DataFrame to create a new one with only 1 images column
Parameters:
data (pandas.DataFrame): Original DataFrame(3 columns images: center, left and right)
Returns:
out (pandas.DataFrame): New DataFrame (1 column images)
"""
row_list = []
for idx, row in data.iterrows():
# Iterate over the three camera positions
for camera_position in POSITIONS:
d = row_update(row, camera_position, POSITIONS)
row_list.append(d)
return pd.DataFrame(row_list)
################################################################################################################
# Flip images and reverse the sttering angle
################################################################################################################
def flip_image(fname):
"""
Flip horizontally the image and save it on the same directory but adding the suffix '_flipped'
Parameters:
fname (string): Path of the image
Returns:
fname (string): The new path of the flipped image
"""
# Read de image from disk
img = cv2.imread(fname)
# Horizontal flip
flipped = cv2.flip(img, flipCode = 1)
# Set the new name
head, tail = os.path.split(fname)
root, ext = os.path.splitext(tail)
fname = root + '_flipped' + ext
# Write the flipped image to disk
cv2.imwrite(os.path.join(head,fname), flipped)
# Return the new name
return fname
def flip_dataframe_images(data):
"""
Iterate over a DataFrame, read the relative path of the images (center, left, rigth) and call the flip_image function
Parameters:
fname (string): Path of the image
Returns:
fname (string): The new path of the flipped image
"""
d = {}
row_list = []
for idx, row in data.iterrows():
# Initialize a dictionary
# Fill the data (first the data on the dataframe)
d = {}
d.update(image = row[IMAGE], steering = row['steering'], throttle = row['throttle'], brake = row['brake'], speed = row['speed'])
row_list.append(d)
# Flip images, store and create the dataframe row
d = {}
d.update(image = flip_image(os.path.join(IMAGES_DIR, row[IMAGE])),
steering = -row['steering'], throttle = row['throttle'], brake = row['brake'], speed = row['speed'])
row_list.append(d)
return pd.DataFrame(row_list)
def load_data(log_file, valid_split, images_dir, batch_size):
# Read the 'driving_log' file
# 1. Add columns name to manage the data
# 2. Add "dtype" for each column
driving_log = pd.read_csv(log_file,
names = ['center', 'left', 'right', 'steering', 'throttle', 'brake', 'speed'],
dtype = {'center': pd.StringDtype(),
'left': pd.StringDtype(),
'right': pd.StringDtype(),
'steering': np.float32,
'throttle': np.float32,
'brake': np.float32,
'speed': np.float32}
)
# As the path of the images contain the full path (os independat due the replace statemente) split it and replace only with the filename
driving_log[CENTER] = driving_log[CENTER].str.replace('\\','/').str.rsplit("/", expand = True, n = 1)[1]
driving_log[LEFT] = driving_log[LEFT].str.replace('\\','/').str.rsplit("/", expand = True, n = 1)[1]
driving_log[RIGHT] = driving_log[RIGHT].str.replace('\\','/').str.rsplit("/", expand = True, n = 1)[1]
# From three to one column of images (for better understand)
data = plain_dataframe(driving_log)
# Generate augmented dataframes
data = flip_dataframe_images(data)
################################################################################################################
# Data generator (tf.keras.preprocessing.ImageDataGenerator)
################################################################################################################
# Create the image data generator
datagen = keras.preprocessing.image.ImageDataGenerator(validation_split = valid_split,
shear_range = 0.2,
height_shift_range = 0.2,
fill_mode = "nearest",
brightness_range = [0.3, 0.9])
# Training data generator by default color_mode = 'rgb' and shuffle = True
train_generator = datagen.flow_from_dataframe(
dataframe = driving_log,
directory = images_dir,
x_col = IMAGE,
y_col = "steering",
class_mode = "raw",
target_size = (IMG_HEIGHT_SIZE, IMG_WIDTH_SIZE),
batch_size = batch_size,
subset = "training")
# Validation data generator by default color_mode = 'rgb' and shuffle = True
valid_generator = datagen.flow_from_dataframe(
dataframe = driving_log,
directory = images_dir,
x_col = IMAGE,
y_col = "steering",
class_mode = "raw",
target_size = (IMG_HEIGHT_SIZE, IMG_WIDTH_SIZE),
batch_size = batch_size,
subset = "validation")
return train_generator, valid_generator
################################################################################################################
# End-to-End Learning for Self-Driving (NVIDIA)
################################################################################################################
def create_model_nvidia(keep_prob):
"""
Create End-to-End Learning for Self-Driving (NVIDIA)
Parameters:
type (string):
Returns:
model (keras.models): keras.models
"""
# Clean the model
keras.backend.clear_session()
CONV1_FILTERS = 24
CONV2_FILTERS = 36
CONV3_FILTERS = 48
CONV4_FILTERS = 64
CONV5_FILTERS = 64
FC1_UNITS = 1164
FC2_UNITS = 100
FC3_UNITS = 50
model = keras.models.Sequential([
keras.layers.Cropping2D(cropping=((70, 25), (0, 0)), input_shape = (IMG_HEIGHT_SIZE, IMG_WIDTH_SIZE, IMG_DEPTH)),
keras.layers.Lambda(lambda x: tf.image.per_image_standardization(x)),
keras.layers.Conv2D(CONV1_FILTERS, (5, 5), activation = 'relu'),
keras.layers.MaxPooling2D(pool_size = (2, 2), strides = 2, padding = 'SAME'),
keras.layers.Conv2D(CONV2_FILTERS, (5, 5), activation = 'relu'),
keras.layers.MaxPooling2D(pool_size = (2, 2), strides = 2, padding = 'SAME'),
keras.layers.Conv2D(CONV3_FILTERS, (5, 5), activation = 'relu'),
keras.layers.MaxPooling2D(pool_size = (2, 2), strides = 2, padding = 'SAME'),
keras.layers.Conv2D(CONV4_FILTERS, (3, 3), activation = 'relu'),
keras.layers.Conv2D(CONV5_FILTERS, (3, 3), activation = 'relu'),
# Fully connected layers
keras.layers.Flatten(),
keras.layers.Dense(FC1_UNITS, activation = 'relu'),
keras.layers.Dropout(rate = keep_prob),
keras.layers.Dense(FC2_UNITS, activation = 'relu'),
keras.layers.Dropout(rate = keep_prob),
keras.layers.Dense(FC3_UNITS, activation = 'relu'),
keras.layers.Dropout(rate = keep_prob),
keras.layers.Dense(1)
])
model.compile(loss = keras.losses.mse, optimizer =Adam(lr=args.learning_rate), metrics = ["mse"])
return model
def train_model(model, epochs, training_data, validation_data, ckp_path = "model.ckpt"):
"""
Train the model
Parameters:
model (keras.models): Model to train
epochs (integer): Number of epochs
training_data (keras.preprocessing.image.ImageDataGenerator): Training data
validation_data (keras.preprocessing.image.ImageDataGenerator): Validation data (to implement cross-validation)
ckp_path (string): Path to store the checkpoints (only weights and the best)
Returns:
history (keras.history): History for data analytics
"""
# Create a callback that saves the model's weights every 5 epochs
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath = ckp_path,
verbose = 1,
save_weights_only = True,
save_best_only = True)
# Configure early stopping callback
early_stopping = keras.callbacks.EarlyStopping(patience = 10)
# Train the model with the new callback
history = model.fit(x = training_data,
epochs = epochs,
validation_data = validation_data,
callbacks = [cp_callback, early_stopping])
return history
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Behavioral Cloning Training Program')
parser.add_argument('-d', help='data directory', dest='data_dir', type = str, default = 'data')
parser.add_argument('-v', help='validation proportion', dest='val_prop', type = float, default = 0.2)
parser.add_argument('-k', help='drop out probability', dest='keep_prob', type = float, default = 0.5)
parser.add_argument('-e', help='number of epochs', dest='epochs', type = int, default = 3)
parser.add_argument('-b', help='batch size', dest='batch_size', type = int, default = 40)
parser.add_argument('-l', help='learning rate', dest='learning_rate', type = float, default = 1.0e-4)
args = parser.parse_args()
BASE_DIR = os.path.join(args.data_dir)
LOG_FILE = os.path.join(BASE_DIR, 'driving_log.csv')
IMAGES_DIR = os.path.join(BASE_DIR, 'IMG')
print('-' * 30)
print('Checking parameters....')
print('-' * 30)
for key, value in vars(args).items():
print('{:<20} : {}'.format(key, value))
print('-' * 30)
print('Training...')
train_generator, valid_generator = load_data(LOG_FILE, args.val_prop, IMAGES_DIR, args.batch_size)
model = build_model(keep_prob = args.keep_prob)
train_model(model, args.epochs, train_generator, valid_generator, ckp_path):
print('Done')
``` |
{
"source": "jhubaum/draft",
"score": 3
} |
#### File: draft/database/models.py
```python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
Base = declarative_base()
class Draft(Base):
__tablename__ = 'drafts'
id = Column(Integer, primary_key=True)
title = Column(String, nullable=False)
filename = Column(String, nullable=False)
urls = relationship("URL", back_populates="draft",
cascade="all, delete, delete-orphan")
class Highlight(Base):
__tablename__ = 'highlights'
id = Column(Integer, primary_key=True)
url_id = Column(Integer, ForeignKey('urls.id'))
url = relationship("URL", back_populates="highlights")
paragraph = Column(String(5), nullable=False)
start = Column(Integer, nullable=False)
length = Column(Integer, nullable=False)
type = Column(String(5), nullable=False)
@staticmethod
def from_json(url, json):
return Highlight(url_id=url.id,
paragraph=json['p'],
start=json['start'],
length=json['length'],
type=json['type'])
def to_dict(self):
return dict(
id=self.id,
p=self.paragraph,
start=self.start,
length=self.length,
type=self.type
)
class URL(Base):
__tablename__ = 'urls'
id = Column(Integer, primary_key=True)
url = Column(String(8), nullable=False)
name = Column(String, nullable=False)
draft_id = Column(Integer, ForeignKey('drafts.id'))
draft = relationship("Draft", back_populates="urls")
highlights = relationship("Highlight", back_populates="url",
cascade="all, delete, delete-orphan")
```
#### File: jhubaum/draft/parsing.py
```python
import re
import os
FILE_SUBDIR = 'draft/templates/files/'
# if line starts with one of these characters, ignore the line
IGNORE_CHARS = '#*'
def ignore_line(l):
for c in IGNORE_CHARS:
if l.startswith(c):
return True
return False
def create_draft_file(text):
lines = text.split('\n')
title = lines[0][2:]
filename = re.sub(r'\W+', '', title.lower().replace(' ', '_')) + '.html'
if os.path.isfile(os.path.join(FILE_SUBDIR, filename)):
return None, None
with open(os.path.join(FILE_SUBDIR, filename), 'w+') as f:
p_count = 0
paragraph = []
def write_p():
f.write('<p id="p{}">{}</p>\n'.format(p_count, paragraph))
f.write('{% extends "file.html" %}\n')
f.write('{% block content %}\n')
for l in lines[1:]:
l = l.strip()
if (len(paragraph) == 0 and not l) or ignore_line(l):
continue
if not l:
# finish paragraph
paragraph = '<br>'.join(paragraph)
write_p()
p_count += 1
paragraph = []
else:
paragraph.append(l)
write_p()
f.write('{% endblock %}\0')
return filename, title
def delete_draft_file(draft):
os.remove(os.path.join(FILE_SUBDIR, draft.filename))
``` |
{
"source": "jhubberts/SkiArt",
"score": 3
} |
#### File: SkiArt/ski_art/main.py
```python
from bs4 import BeautifulSoup
import argparse
import svgwrite
import os
import random
import colorsys
def main(name, output):
name_to_files_map = get_name_to_files_map(name)
name_to_data_map = parse_name_to_files_map(name_to_files_map)
bounds = get_universal_bounds(name_to_data_map)
dwg = svgwrite.Drawing(output.name, profile='tiny')
colors = get_n_colors(len(name_to_files_map))
color_idx = 0
for name in name_to_data_map:
coords = points_to_coordinates(name_to_data_map[name]["points"], bounds)
dwg.add(dwg.polyline(coords, stroke=colors[color_idx], stroke_width="3", stroke_opacity="0.6", fill="none"))
color_idx += 1
dwg.save()
def get_name_to_files_map(input_dir):
"""
Gets a map of the name of a given skier to a list of files in a directory with that name. This assumes
that the input directory provided to this tool has the following format:
dir -
| - Person 1
| - Day_1.gpx
| - Day_2.gpx
| - Person 2
| - Day_1.gpx
| - Day_2.gpx
"""
name_to_files_map = {}
for dir in os.listdir(input_dir):
dir_path = os.path.join(input_dir,dir)
name_to_files_map[dir] = map(lambda x: open(os.path.join(dir_path, x), 'r'), os.listdir(dir_path))
return name_to_files_map
def parse_name_to_files_map(name_to_files_map):
"""
Reads and transforms GPS points for each individual in the name_to_files_map, and figures out the bounds of their
entire trip
"""
name_to_data_map = {}
for input in name_to_files_map:
points = []
for file in name_to_files_map[input]:
points.extend(get_points_from_file(file))
bounds = get_bounds(points)
name_to_data_map[input] = {"points": points, "bounds": bounds}
return name_to_data_map
def get_points_from_file(file):
"""
Parses a GPX file, and returns an array of all GPS points described by that file
"""
parsed = BeautifulSoup(file.read(), 'html.parser')
segments = parsed.trk.findAll("trkseg")
points = []
for segment in segments:
segment_points = segment.findAll("trkpt")
points.extend(map(lambda x: transform_bs4_point(x), segment_points))
return points
def transform_bs4_point(bs4_point):
"""
Transforms a single BeautifulSoup node representing a node in the GPS coordinate graph
into a dictionary representing the elements we care about
"""
return {
"lat": float(bs4_point["lat"]), # Degrees
"lon": float(bs4_point["lon"]), # Degrees
"ele": float(bs4_point.ele.string), # Meters?
"time": bs4_point.time.string # ISO8601
}
def get_universal_bounds(name_to_data_map):
"""
Gets the bounds of all recorded segments
"""
min_lat = float("inf")
max_lat = float("-inf")
min_lon = float("inf")
max_lon = float("-inf")
for name in name_to_data_map:
item = name_to_data_map[name]
min_lat = item["bounds"]["min_lat"] if item["bounds"]["min_lat"] < min_lat else min_lat
min_lon = item["bounds"]["min_lon"] if item["bounds"]["min_lon"] < min_lon else min_lon
max_lat = item["bounds"]["max_lat"] if item["bounds"]["max_lat"] > max_lat else max_lat
max_lon = item["bounds"]["max_lon"] if item["bounds"]["max_lon"] > max_lon else max_lon
return {
"min_lat": min_lat,
"min_lon": min_lon,
"max_lat": max_lat,
"max_lon": max_lon
}
def get_bounds(points):
"""
Returns the min and max lat and lon within an array of points
"""
min_lat = float("inf")
max_lat = float("-inf")
min_lon = float("inf")
max_lon = float("-inf")
for point in points:
min_lat = point["lat"] if point["lat"] < min_lat else min_lat
min_lon = point["lon"] if point["lon"] < min_lon else min_lon
max_lat = point["lat"] if point["lat"] > max_lat else max_lat
max_lon = point["lon"] if point["lon"] > max_lon else max_lon
return {
"min_lat": min_lat,
"min_lon": min_lon,
"max_lat": max_lat,
"max_lon": max_lon
}
def points_to_coordinates(points, bounds, desired_width=1000):
"""
Transforms GPS points into coordinates for the desired SVG file
"""
height = bounds["max_lat"] - bounds["min_lat"]
width = bounds["max_lon"] - bounds["min_lon"]
transform_coefficient = float(desired_width) / width # Make width 1000
return map(lambda x: point_to_coordinate(x, bounds, transform_coefficient), points)
def get_n_colors(n):
"""
Returns N contrasting RGB fill descriptors
"""
transform_constant = 1.0 / n
rgb_colors = []
for i in range(0, n):
hue = i * transform_constant
lightness = 0.5 + 0.1 * random.random()
saturation = 0.9 + 0.1 * random.random()
rgb = map(lambda x: x * 256.0, colorsys.hls_to_rgb(hue, lightness, saturation))
rgb_colors.append(svgwrite.rgb(rgb[0], rgb[1], rgb[2], '%'))
return rgb_colors
def point_to_coordinate(point, bounds, transform_coefficient):
"""
Transforms a single point into a coordinate in an SVG file
"""
return ((point["lon"] - bounds["min_lon"]) * transform_coefficient,
((bounds["max_lat"] - bounds["min_lat"]) - (point["lat"] - bounds["min_lat"])) * transform_coefficient)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Turns a gpx file into an art.')
parser.add_argument("-o", "--output", type=argparse.FileType('w'), required=True, help="Directs the output to a name of your choice")
parser.add_argument("-i", "--input", required=True, help="Specifies the input directory")
args = parser.parse_args()
main(args.input, args.output)
``` |
{
"source": "jhubert/pydocx-s3-images",
"score": 3
} |
#### File: pydocxs3upload/util/image.py
```python
from __future__ import (
absolute_import,
unicode_literals,
)
import os
import posixpath
import requests
from requests.exceptions import InvalidSchema, MissingSchema
from six.moves.urllib.parse import urlparse
from . import uri
def get_image_data_and_filename(image_data_or_url, filename):
"""
If the image is an external image then the image_data is actually a link to
the image and the filename is likely garbage.
"""
parsed_url = urlparse(image_data_or_url)
_, real_filename = posixpath.split(parsed_url.path)
match = uri.is_encoded_image_uri(image_data_or_url)
sanitized_filename = None
if not match:
sanitized_filename = uri.sanitize_filename(real_filename)
real_image_data = get_image_from_src(image_data_or_url)
if not real_image_data:
return image_data_or_url, filename
return real_image_data, sanitized_filename
def get_image_from_src(src):
"""
Take a src attribute from an image tag and return the content image data
associated with that image. At the minimum we should handle https:// and
base64 encoded images.
"""
# Handle the easy case first, its an external link to somewhere else.
try:
response = requests.get(src)
except (InvalidSchema, MissingSchema):
pass
else:
return response.content
# Check to see if it's a base64 encoded image.
match = uri.is_encoded_image_uri(src)
if match:
return src
# Not really sure what is going on here, punt for now.
return src
def get_image_format(filename):
"""Return the format based on filename extension"""
return os.path.splitext(filename)[1].strip('.')
```
#### File: tests/util/test_uri.py
```python
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from unittest import TestCase
from pydocxs3upload.util import uri
class UriUtilsTestCase(TestCase):
def test_is_encoded_image_uri(self):
image_data = 'data:image/png;base64,iVBOR='
self.assertTrue(uri.is_encoded_image_uri(image_data))
self.assertFalse(uri.is_encoded_image_uri('data:image/png;base64,'))
self.assertFalse(
uri.is_encoded_image_uri('http://example.com/logo.png')
)
self.assertEqual(
{'image_data': 'iVBOR=', 'extension': 'png'},
uri.is_encoded_image_uri(image_data).groupdict()
)
def test_sanitize_filename(self):
filenames = {
'1409764011-image1.gif': 'image1.gif',
'409764011-image1.gif': '409764011-image1.gif',
'409764011-image.gif': '409764011-image.gif',
'image%20%232014.gif': 'image #2014.gif',
}
for before, after in filenames.iteritems():
self.assertEqual(after, uri.sanitize_filename(before))
``` |
{
"source": "jhub/Object_Tracker",
"score": 2
} |
#### File: Object_Tracker/src/obj_tr_constants.py
```python
import rospy
import sys
import pudb
import numpy as np
from math import pi, atan2, sqrt
from random import gauss
from pyswarm import pso
x,y,th = 0,1,2
v_x, v_th = 0,1
data, pred_beh = 0,1
bc_interval = 1
d_limits = np.array([[0,10],[0,10],[-np.pi,np.pi]]) #x,y,theta,v_x,v_r limits
array_size = 100 #How many particles we want
R = np.array([[.01,.001,.001],[.001,.01,.001],[.001,.001,.03]]) #Motion model noise
Q = np.array([[.5,.01,.02],[.01,.5,.02],[.01,.01,.2]]) #Measurement noise
PROB_SWITCH = .3
CIRC_CENTER = [6,6]
r = 2
TURTLE_COUNT = 1
'''
Changes state based on the behv given
'''
def g_func(tmp_state, behv, dt, R = None):
state = tmp_state[:]
if behv[v_th] != 0:
state[x] = state[x] - (behv[v_x] / behv[v_th]) * np.sin(state[th]) + (behv[v_x] / behv[v_th]) * np.sin(state[th] + behv[v_th] * dt)
state[y] = state[y] + (behv[v_x] / behv[v_th]) * np.cos(state[th]) - (behv[v_x] / behv[v_th]) * np.cos(state[th] + behv[v_th] * dt)
state[th] = state[th] + behv[v_th] * dt
else:
state[x] = state[x] + np.cos(state[th]) * dt * behv[v_x]
state[y] = state[y] + np.sin(state[th]) * dt * behv[v_x]
state[th] = state[th]
if R is not None:
state = np.diag(gauss(state, R)).copy()
state[th] = rot_diff(state[th])
return state
# def g_func_comp(tmp_state, behv, dt, R = None):
# state = tmp_state[:]
# return np.diag(gauss(state, R)).copy()
'''
Residual for motion model
'''
def g_res_fn(z, mean):
residual = z[0:3] - mean
residual[2] = rot_diff(residual[2])
return residual
def rot_diff(rot_in):
while rot_in > pi:
rot_in -= 2 * pi
while rot_in < -pi:
rot_in += 2 * pi
return rot_in
'''
Obtains the observation data from our input data
'''
def h_function(mean):
return mean[0:3]
'''
Used for the obtaining the command vel from position
'''
def policy(point,c_last):
global v_x, v_th, x,y,th
behv = [0,0]
# p_x = point[x] - CIRC_CENTER[x]
# p_y = point[y] - CIRC_CENTER[y]
# p_th = point[th]
# r_x = (p_x/abs(p_x))*r if p_x != 0 else 0
# r_y = (p_y/abs(p_y))*r if p_y != 0 else 0
# d_conv = 1
if c_last < .5: #TODO: does this make sense? esentially rounding
behv[v_x] = float(3)/2
# D = sqrt(p_x**2 + p_y ** 2)
# if D < r: #inside the circle
# d = r - D
# else: #outside the circle
# d = D - r
# if d > d_conv:
# pudb.set_trace()
# weight = 1/(d+1) *.1
# if D < r: #inside the circle
# phi = atan2(p_x,p_y) #pointing away form center
# behv[v_th] = float(1)/2 - circ_diff(p_th,phi)*weight
# else:
# phi = atan2(p_y,p_x) #pointing towards center
# behv[v_th] = float(1)/2 + circ_diff(p_th,phi)*weight
# else:
# phi = atan2(p_x,-p_y) #tangental to the circle
# weight = d/d_conv *.5
# if D < r: #inside the circle
# behv[v_th] = float(1)/2 - circ_diff(p_th,phi)*weight
# else:
# behv[v_th] = float(1)/2 + circ_diff(p_th,phi)*weight
behv[v_th] = float(1)
else:
behv[v_x] = gauss(0,3)
behv[v_th] = gauss(0,pi)
return behv
def circ_diff(u,v):
mn_val,mx_val = (u,v) if u > v else (v,u)
diff1 = mn_val - mx_val
diff2 = mx_val - mn_val + 2*pi
lesser_diff = diff1 if diff1 < diff2 else diff2
return rot_diff(lesser_diff)
def rot_diff(rot_in):
while rot_in > pi:
rot_in -= 2 * pi
while rot_in < -pi:
rot_in += 2 * pi
return rot_in
'''
Used for the obtaining the compromised probability from position and last compromised state
'''
def zone(z, c):
prob = 0
zone1 = [2,4]
zone2 = [1,5]#-[2,4]
#zone3 = [0,inf]-[1,5]
#Good zone
if (within_circ_bnds(CIRC_CENTER[x],CIRC_CENTER[y],zone1[0],zone1[1],z[0],z[1])):
prob = .1
#Preventive zone
elif (within_circ_bnds(CIRC_CENTER[x],CIRC_CENTER[y],zone2[0],zone2[1],z[0],z[1])):
prob = .5
#Restricted zone
else:
prob = .9
return prob * max(min(1, gauss(c,PROB_SWITCH)), 0)
def within_circ_bnds(x_cent,y_cent,r1,r2,z_x,z_y):
dst_cubed = (x_cent + z_x)**2 + (y_cent + z_y)**2
return dst_cubed >= r1**2 and dst_cubed <= r2**2
```
#### File: Object_Tracker/src/secure_state_sim.py
```python
import rospy
import roslib
import sys
import pudb
import tf
from turtlesim.msg import Pose
from nav_msgs.msg import Odometry
from threading import Thread
'''
The purpose of this class is to simulate a state tracker that is able to identify
the different agents in the state space, assign IDs to each and emit their position.
This position is simulated in our case using the ground truth by adding error.
'''
class sec_state_trk:
def __init__ (self, turtle_List, ID_List):
for i in range(len(turtle_List)):
rospy.Subscriber(turtle_List[i] + "/pose", Pose, self.sns_callback)
self.sec_state_pub = rospy.Publisher(ID_List[i] + "/noisedOdom", Odometry, queue_size=1)
self.odom_msg = Odometry()
def sns_callback(self, point):
x_p, y_p, th_p = point.x,point.y,point.theta
self.odom_msg.pose.pose.position.x = x_p
self.odom_msg.pose.pose.position.y = y_p
quat = get_quat(0,0,th_p)
self.odom_msg.pose.pose.orientation.x = quat[0]
self.odom_msg.pose.pose.orientation.y = quat[1]
self.odom_msg.pose.pose.orientation.z = quat[2]
self.odom_msg.pose.pose.orientation.w = quat[3]
self.sec_state_pub.publish(self.odom_msg)
def main(turtle_List,ID_List):
sec_state_trk(turtle_List,ID_List)
rospy.spin()
'''
Other methods
'''
def get_quat(roll, pitch, yaw):
return tf.transformations.quaternion_from_euler(roll, pitch, yaw)
if __name__ == '__main__':
rospy.init_node("secure_state_sim", anonymous=True)
turtle_List = ["/turtle2"]#,"/turtle3"]
ID_List = ["/F9DRR"]#,"/A4DFB"]
#for i in range(TURTLE_COUNT): #For expansion (not only one observer)
turtle_thread = Thread(target = main, args = (turtle_List,ID_List, ))
turtle_thread.setDaemon(True)
turtle_thread.start()
rospy.spin()
``` |
{
"source": "jhuboo/ROSbot",
"score": 3
} |
#### File: jhuboo/ROSbot/motor_command_model.py
```python
import numpy as np
from math import cos, sin
def model_parameters():
"""Returns two constant model parameters"""
k = 1.0
d = 0.5
return k, d
def system_matrix(theta):
"""Returns a numpy array with the A(theta) matrix for a differential drive robot"""
return A
def system_field(z, u):
"""Computes the field at a given state for the dynamical model"""
return dot_z
def euler_step(z, u, stepSize):
"""Integrates the dynamical model for one time step using Euler's method"""
return zp
``` |
{
"source": "jhuboo/sparky",
"score": 2
} |
#### File: Code/Client/Main.py
```python
from ui_led import Ui_led
from ui_face import Ui_Face
from ui_client import Ui_client
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from Client import *
from Calibration import *
class MyWindow(QMainWindow,Ui_client):
def __init__(self):
super(MyWindow,self).__init__()
self.setupUi(self)
self.setWindowIcon(QIcon('Picture/logo_Mini.png'))
self.Video.setScaledContents (True)
self.Video.setPixmap(QPixmap('Picture/dog_client.png'))
self.setFocusPolicy(Qt.StrongFocus)
self.Key_W=False
self.Key_A=False
self.Key_S=False
self.Key_D=False
self.Key_Q=False
self.Key_E=False
self.Key_Space=False
self.client=Client()
self.client.move_speed=str(self.slider_speed.value())
file = open('IP.txt', 'r')
self.lineEdit_IP_Adress.setText(str(file.readline()))
file.close()
#ProgressBar
self.progress_Power.setMinimum(0)
self.progress_Power.setMaximum(100)
self.progress_Power.setValue(90)
#Button click event
self.Button_Connect.clicked.connect(self.connect)
self.Button_Video.clicked.connect(self.video)
self.Button_Ball_And_Face.clicked.connect(self.chase_ball_and_find_face)
self.Button_IMU.clicked.connect(self.imu)
self.Button_Calibration.clicked.connect(self.showCalibrationWindow)
self.Button_LED.clicked.connect(self.showLedWindow)
self.Button_Sonic.clicked.connect(self.sonic)
self.Button_Relax.clicked.connect(self.relax)
self.Button_Face_ID.clicked.connect(self.showFaceWindow)
self.Button_ForWard.pressed.connect(self.forward)
self.Button_ForWard.released.connect(self.stop)
self.Button_BackWard.pressed.connect(self.backward)
self.Button_BackWard.released.connect(self.stop)
self.Button_Left.pressed.connect(self.left)
self.Button_Left.released.connect(self.stop)
self.Button_Right.pressed.connect(self.right)
self.Button_Right.released.connect(self.stop)
self.Button_Step_Left.pressed.connect(self.step_left)
self.Button_Step_Left.released.connect(self.stop)
self.Button_Step_Right.pressed.connect(self.step_right)
self.Button_Step_Right.released.connect(self.stop)
self.Button_Buzzer.pressed.connect(self.buzzer)
self.Button_Buzzer.released.connect(self.buzzer)
#Slider
self.slider_head.setMinimum(50)
self.slider_head.setMaximum(180)
self.slider_head.setSingleStep(1)
self.slider_head.setValue(90)
self.slider_head.valueChanged.connect(self.head)
self.slider_horizon.setMinimum(-20)
self.slider_horizon.setMaximum(20)
self.slider_horizon.setSingleStep(1)
self.slider_horizon.setValue(0)
self.slider_horizon.valueChanged.connect(self.horizon)
self.slider_height.setMinimum(-20)
self.slider_height.setMaximum(20)
self.slider_height.setSingleStep(1)
self.slider_height.setValue(0)
self.slider_height.valueChanged.connect(self.height)
self.slider_pitch.setMinimum(-20)
self.slider_pitch.setMaximum(20)
self.slider_pitch.setSingleStep(1)
self.slider_pitch.setValue(0)
self.slider_pitch.valueChanged.connect(lambda:self.attitude(self.label_pitch,self.slider_pitch))
self.slider_yaw.setMinimum(-20)
self.slider_yaw.setMaximum(20)
self.slider_yaw.setSingleStep(1)
self.slider_yaw.setValue(0)
self.slider_yaw.valueChanged.connect(lambda:self.attitude(self.label_yaw,self.slider_yaw))
self.slider_roll.setMinimum(-20)
self.slider_roll.setMaximum(20)
self.slider_roll.setSingleStep(1)
self.slider_roll.setValue(0)
self.slider_roll.valueChanged.connect(lambda:self.attitude(self.label_roll,self.slider_roll))
self.slider_speed.setMinimum(2)
self.slider_speed.setMaximum(10)
self.slider_speed.setSingleStep(1)
self.slider_speed.setValue(8)
self.slider_speed.valueChanged.connect(self.speed)
self.client.move_speed=str(self.slider_speed.value())
#Timer
self.timer=QTimer(self)
self.timer.timeout.connect(self.refresh_image)
self.timer_power = QTimer(self)
self.timer_power.timeout.connect(self.power)
self.timer_sonic = QTimer(self)
self.timer_sonic.timeout.connect(self.getSonicData)
self.drawpoint=[585,135]
self.initial=True
#keyboard
def keyPressEvent(self, event):
if(event.key() == Qt.Key_C):
print("C")
self.connect()
if(event.key() == Qt.Key_V):
print("V")
if self.Button_Video.text() == 'Open Video':
self.timer.start(10)
self.Button_Video.setText('Close Video')
else:
self.timer.stop()
self.Button_Video.setText('Open Video')
if(event.key() == Qt.Key_R):
print("R")
self.relax()
if(event.key() == Qt.Key_L):
print("L")
self.showLedWindow()
if(event.key() == Qt.Key_U):
print("U")
self.sonic()
if(event.key() == Qt.Key_F):
print("F")
self.chase_ball_and_find_face()
if(event.key() == Qt.Key_B):
print("B")
self.imu()
if(event.key() == Qt.Key_M):
print("M")
self.showCalibrationWindow()
if event.isAutoRepeat():
pass
else :
if event.key() == Qt.Key_W:
print("W")
self.forward()
self.Key_W=True
elif event.key() == Qt.Key_S:
print("S")
self.backward()
self.Key_S=True
elif event.key() == Qt.Key_A:
print("A")
self.left()
self.Key_A=True
elif event.key() == Qt.Key_D:
print("D")
self.right()
self.Key_D=True
elif event.key() == Qt.Key_Q:
print("Q")
self.step_left()
self.Key_Q=True
elif event.key() == Qt.Key_E:
print("E")
self.step_right()
self.Key_E=True
elif event.key() == Qt.Key_Space:
print("Space")
self.buzzer()
self.Key_Space=True
def keyReleaseEvent(self, event):
if(event.key() == Qt.Key_W):
if not(event.isAutoRepeat()) and self.Key_W==True:
print("release W")
self.stop()
self.Key_W=False
elif(event.key() == Qt.Key_A):
if not(event.isAutoRepeat()) and self.Key_A==True:
print("release A")
self.stop()
self.Key_A=False
elif(event.key() == Qt.Key_S):
if not(event.isAutoRepeat()) and self.Key_S==True:
print("release S")
self.stop()
self.Key_S=False
elif(event.key() == Qt.Key_D):
if not(event.isAutoRepeat()) and self.Key_D==True:
print("release D")
self.stop()
self.Key_D=False
elif(event.key() == Qt.Key_Q):
if not(event.isAutoRepeat()) and self.Key_Q==True:
print("release Q")
self.stop()
self.Key_Q=False
elif(event.key() == Qt.Key_E):
if not(event.isAutoRepeat()) and self.Key_E==True:
print("release E")
self.stop()
self.Key_E=False
if(event.key() == Qt.Key_Space):
if not(event.isAutoRepeat()) and self.Key_Space==True:
print("release Space")
self.buzzer()
self.Key_Space=False
def paintEvent(self,e):
try:
qp=QPainter()
qp.begin(self)
pen=QPen(Qt.white,2,Qt.SolidLine)
qp.setPen(pen)
qp.drawRect(485,35,200,200)
pen=QPen(QColor(0,138,255),2,Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(self.drawpoint[0],35,self.drawpoint[0],235)
qp.drawLine(485,self.drawpoint[1],685,self.drawpoint[1])
self.label_point.move(self.drawpoint[0] + 10, self.drawpoint[1] + 10)
pitch = round((self.drawpoint[1] - 135) / 100.0 * 20)
yaw = round((self.drawpoint[0] - 585) / 100.0 * 20)
self.label_point.setText(str((yaw, pitch)))
qp.end()
if pitch !=self.slider_pitch.value():
self.slider_pitch.setValue(pitch)
if yaw !=self.slider_yaw.value():
self.slider_yaw.setValue(yaw)
except Exception as e:
print(e)
def mouseMoveEvent(self, event):
x=event.pos().x()
y=event.pos().y()
#print(x,y)
if x > 485 and x < 685 and y > 35 and y < 235:
try:
self.drawpoint[0]=x
self.drawpoint[1]=y
#self.update()
self.repaint()
except Exception as e:
print(e)
def mousePressEvent(self, event):
x=event.pos().x()
y=event.pos().y()
if x > 485 and x < 685 and y > 35 and y < 235:
try:
self.drawpoint[0]=x
self.drawpoint[1]=y
#self.update()
self.repaint()
except Exception as e:
print(e)
def closeEvent(self,event):
try:
self.timer_power.stop()
self.timer.stop()
stop_thread(self.video)
stop_thread(self.instruction)
except Exception as e:
print(e)
self.client.turn_off_client()
print("close")
QCoreApplication.instance().quit()
#os._exit(0)
def video(self):
if self.Button_Video.text() == 'Open Video':
self.timer.start(10)
self.Button_Video.setText('Close Video')
else:
self.timer.stop()
self.Button_Video.setText('Open Video')
def receive_instruction(self,ip):
try:
self.client.client_socket1.connect((ip,5001))
self.client.tcp_flag=True
print ("Connecttion Successful !")
except Exception as e:
print ("Connect to server Faild!: Server IP is right? Server is opend?")
self.client.tcp_flag=False
while True:
try:
alldata=self.client.receive_data()
except:
self.client.tcp_flag=False
break
#print(alldata)
if alldata=='':
break
else:
cmdArray=alldata.split('\n')
#print(cmdArray)
if cmdArray[-1] !="":
cmdArray==cmdArray[:-1]
for oneCmd in cmdArray:
data=oneCmd.split("#")
#print(data)
if data=="":
self.client.tcp_flag=False
break
elif data[0]==cmd.CMD_SONIC:
self.Button_Sonic.setText(data[1]+'cm')
#self.label_sonic.setText('Obstacle:'+data[1]+'cm')
#print('Obstacle:',data[1])
elif data[0]==cmd.CMD_POWER:
if data[1]!="":
power_value=round((float(data[1]) - 7.00) / 1.40 * 100)
self.progress_Power.setValue(power_value)
elif data[0]==cmd.CMD_RELAX:
if data[1]=="0":
self.Button_Relax.setText('Relax')
else:
self.Button_Relax.setText('"Too tired..."')
def refresh_image(self):
try:
if self.client.video_flag == False:
height, width, bytesPerComponent=self.client.image.shape
#print (height, width, bytesPerComponent)
cv2.cvtColor(self.client.image, cv2.COLOR_BGR2RGB, self.client.image)
QImg = QImage(self.client.image.data, width, height, 3 * width, QImage.Format_RGB888)
self.Video.setPixmap(QPixmap.fromImage(QImg))
self.client.video_flag = True
except Exception as e:
print(e)
#BALL
def chase_ball_and_find_face(self):
if self.Button_Ball_And_Face.text() == 'Face':
self.client.face_flag=True
self.client.ball_flag = False
self.Button_Ball_And_Face.setText('Ball')
elif self.Button_Ball_And_Face.text() == 'Ball':
self.client.face_flag=False
self.client.ball_flag = True
self.Button_Ball_And_Face.setText('Close')
else:
self.client.face_flag = False
self.client.ball_flag = False
self.stop()
self.Button_Ball_And_Face.setText('Face')
#CONNECT
def connect(self):
file=open('IP.txt','w')
file.write(self.lineEdit_IP_Adress.text())
file.close()
if self.Button_Connect.text()=='Connect':
self.IP = self.lineEdit_IP_Adress.text()
self.client.turn_on_client(self.IP)
self.video=threading.Thread(target=self.client.receiving_video,args=(self.IP,))
self.instruction=threading.Thread(target=self.receive_instruction,args=(self.IP,))
self.video.start()
self.instruction.start()
self.Button_Connect.setText('Disconnect')
self.timer_power.start(1000)
else:
try:
stop_thread(self.video)
except:
pass
try:
stop_thread(self.instruction)
except:
pass
self.client.tcp_flag=False
self.client.turn_off_client()
self.Button_Connect.setText('Connect')
self.timer_power.stop()
def stand(self):
self.initial=False
#self.drawpoint = [585, 135]
self.Button_IMU.setText('Balance')
self.slider_roll.setValue(0)
time.sleep(0.1)
self.slider_pitch.setValue(0)
time.sleep(0.1)
self.slider_yaw.setValue(0)
time.sleep(0.1)
self.slider_horizon.setValue(0)
time.sleep(0.1)
self.initial = True
#MOVE
def stop(self):
command=cmd.CMD_MOVE_STOP+"#"+str(self.slider_speed.value())+'\n'
self.client.send_data(command)
#print (command)
def forward(self):
self.stand()
command=cmd.CMD_MOVE_FORWARD+"#"+str(self.slider_speed.value())+'\n'
self.client.send_data(command)
#print (command)
def backward(self):
self.stand()
command=cmd.CMD_MOVE_BACKWARD+"#"+str(self.slider_speed.value())+'\n'
self.client.send_data(command)
#print (command)
def step_left(self):
self.stand()
command=cmd.CMD_MOVE_LEFT+"#"+str(self.slider_speed.value())+'\n'
self.client.send_data(command)
#print (command)
def step_right(self):
self.stand()
command=cmd.CMD_MOVE_RIGHT+"#"+str(self.slider_speed.value())+'\n'
self.client.send_data(command)
#print (command)
def left(self):
self.stand()
command=cmd.CMD_TURN_LEFT+"#"+str(self.slider_speed.value())+'\n'
self.client.send_data(command)
#print (command)
def right(self):
self.stand()
command=cmd.CMD_TURN_RIGHT+"#"+str(self.slider_speed.value())+'\n'
self.client.send_data(command)
#print (command)
def speed(self):
self.client.move_speed=str(self.slider_speed.value())
self.label_speed.setText(str(self.slider_speed.value()))
#relax
def relax(self):
if self.Button_Relax.text() == 'Relax':
command=cmd.CMD_RELAX+'\n'
self.client.send_data(command)
#print (command)
else:
pass
#BUZZER
def buzzer(self):
if self.Button_Buzzer.text() == 'Buzzer':
command=cmd.CMD_BUZZER+'#1'+'\n'
self.client.send_data(command)
self.Button_Buzzer.setText('Noise')
#print (command)
else:
command=cmd.CMD_BUZZER+'#0'+'\n'
self.client.send_data(command)
self.Button_Buzzer.setText('Buzzer')
#print (command)
#BALANCE
def imu(self):
if self.Button_IMU.text()=='Balance':
command=cmd.CMD_BALANCE+'#1'+'\n'
self.client.send_data(command)
self.Button_IMU.setText("Close")
#print (command)
else:
command=cmd.CMD_BALANCE+'#0'+'\n'
self.client.send_data(command)
self.Button_IMU.setText('Balance')
#print (command)
#SNOIC
def sonic(self):
if self.Button_Sonic.text() == 'Sonic':
self.timer_sonic.start(100)
self.Button_Sonic.setText('Close')
else:
self.timer_sonic.stop()
self.Button_Sonic.setText('Sonic')
#
def getSonicData(self):
command=cmd.CMD_SONIC+'\n'
self.client.send_data(command)
#print (command)
#HEIGHT
def height(self):
try:
hei=str(self.slider_height.value())
self.label_height.setText(hei)
command=cmd.CMD_HEIGHT+"#"+hei+'\n'
self.client.send_data(command)
#print(command)
except Exception as e:
print(e)
#HORIZON
def horizon(self):
try:
hor=str(self.slider_horizon.value())
self.label_horizon.setText(hor)
command=cmd.CMD_HORIZON+"#"+hor+'\n'
if self.initial:
self.client.send_data(command)
#print(command)
except Exception as e:
print(e)
#HEAD
def head(self):
try:
angle=str(self.slider_head.value())
self.label_head.setText(angle)
command=cmd.CMD_HEAD+"#"+angle+'\n'
self.client.send_data(command)
#print(command)
except Exception as e:
print(e)
#POWER
def power(self):
try:
command=cmd.CMD_POWER+'\n'
self.client.send_data(command)
#print (command)
command = "CMD_WORKING_TIME" + '\n'
self.client.send_data(command)
except Exception as e:
print(e)
#ATTITUDE
def attitude(self,target1,target2):
try:
r=str(self.slider_roll.value())
p=str(self.slider_pitch.value())
y=str(self.slider_yaw.value())
command = cmd.CMD_ATTITUDE + '#' + r + '#' + p + '#' + y + '\n'
if self.initial:
self.client.send_data(command)
target1.setText(str(target2.value()))
self.drawpoint[0]=585+self.slider_yaw.value()*5
self.drawpoint[1]=135+self.slider_pitch.value()*5
self.update()
#self.repaint()
#print(command)
except Exception as e:
print(e)
def showCalibrationWindow(self):
self.stop()
self.calibrationWindow=calibrationWindow(self.client)
self.calibrationWindow.setWindowModality(Qt.ApplicationModal)
self.calibrationWindow.show()
#LED
def showLedWindow(self):
try:
self.ledWindow=ledWindow(self.client)
self.ledWindow.setWindowModality(Qt.ApplicationModal)
self.ledWindow.show()
except Exception as e:
print(e)
def showFaceWindow(self):
try:
self.faceWindow = faceWindow(self.client)
self.faceWindow.setWindowModality(Qt.ApplicationModal)
self.faceWindow.show()
self.client.face_id = True
except Exception as e:
print(e)
class faceWindow(QMainWindow,Ui_Face):
def __init__(self,client):
super(faceWindow,self).__init__()
self.setupUi(self)
self.setWindowIcon(QIcon('Picture/logo_Mini.png'))
self.label_video.setScaledContents(True)
self.label_video.setPixmap(QPixmap('Picture/dog_client.png'))
self.Button_Read_Face.clicked.connect(self.readFace)
self.client = client
self.face_image=''
self.photoCount=0
self.timeout=0
self.name = ''
self.readFaceFlag=False
# Timer
self.timer1 = QTimer(self)
self.timer1.timeout.connect(self.faceDetection)
self.timer1.start(10)
self.timer2 = QTimer(self)
self.timer2.timeout.connect(self.facePhoto)
def closeEvent(self, event):
self.timer1.stop()
self.client.face_id = False
def readFace(self):
try:
if self.Button_Read_Face.text()=="Read Face":
self.Button_Read_Face.setText("Reading")
self.timer2.start(10)
self.timeout=time.time()
else:
self.timer2.stop()
if self.photoCount!=0:
self.Button_Read_Face.setText("Waiting ")
self.client.face.trainImage()
QMessageBox.information(self, "Message", "success", QMessageBox.Yes)
self.Button_Read_Face.setText("Read Face")
self.name = self.lineEdit.setText("")
self.photoCount == 0
except Exception as e:
print(e)
def facePhoto(self):
try:
if self.photoCount==30:
self.photoCount==0
self.timer2.stop()
self.Button_Read_Face.setText("Waiting ")
self.client.face.trainImage()
QMessageBox.information(self, "Message", "success", QMessageBox.Yes)
self.Button_Read_Face.setText("Read Face")
self.name = self.lineEdit.setText("")
else:
if len(self.face_image)>0:
self.name = self.lineEdit.text()
if len(self.name) > 0:
height, width= self.face_image.shape[:2]
QImg = QImage(self.face_image.data.tobytes(), width, height,3 * width,QImage.Format_RGB888)
self.label_photo.setPixmap(QPixmap.fromImage(QImg))
second=int(time.time() - self.timeout)
if second > 1:
self.saveFcaePhoto()
self.timeout=time.time()
else:
self.Button_Read_Face.setText("Reading "+str(1-second)+"S "+str(self.photoCount)+"/30")
self.face_image=''
else:
QMessageBox.information(self, "Message", "Please enter your name", QMessageBox.Yes)
self.timer2.stop()
self.Button_Read_Face.setText("Read Face")
except Exception as e:
print(e)
def saveFcaePhoto(self):
cv2.cvtColor(self.face_image, cv2.COLOR_BGR2RGB, self.face_image)
cv2.imwrite('Face/'+str(len(self.client.face.name))+'.jpg', self.face_image)
self.client.face.name.append([str(len(self.client.face.name)),str(self.name)])
self.client.face.Save_to_txt(self.client.face.name, 'Face/name')
self.client.face.name = self.client.face.Read_from_txt('Face/name')
self.photoCount += 1
self.Button_Read_Face.setText("Reading "+str(0)+" S "+str(self.photoCount)+"/30")
def faceDetection(self):
try:
if len(self.client.image)>0:
gray = cv2.cvtColor(self.client.image, cv2.COLOR_BGR2GRAY)
faces = self.client.face.detector.detectMultiScale(gray, 1.2, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
self.face_image = self.client.image[y-5:y + h+5, x-5:x + w+5]
cv2.rectangle(self.client.image, (x-20, y-20), (x + w+20, y + h+20), (0, 255, 0), 2)
if self.client.video_flag == False:
height, width, bytesPerComponent = self.client.image.shape
cv2.cvtColor(self.client.image, cv2.COLOR_BGR2RGB, self.client.image)
QImg = QImage(self.client.image.data.tobytes(), width, height, 3 * width, QImage.Format_RGB888)
self.label_video.setPixmap(QPixmap.fromImage(QImg))
self.client.video_flag = True
except Exception as e:
print(e)
class calibrationWindow(QMainWindow,Ui_calibration):
def __init__(self,client):
super(calibrationWindow,self).__init__()
self.setupUi(self)
self.setWindowIcon(QIcon('Picture/logo_Mini.png'))
self.label_picture.setScaledContents (True)
self.label_picture.setPixmap(QPixmap('Picture/dog_calibration.png'))
self.point=self.Read_from_txt('point')
self.set_point(self.point)
self.client=client
self.leg='one'
self.x=0
self.y=0
self.z=0
self.radioButton_one.setChecked(True)
self.radioButton_one.toggled.connect(lambda: self.leg_point(self.radioButton_one))
self.radioButton_two.setChecked(False)
self.radioButton_two.toggled.connect(lambda: self.leg_point(self.radioButton_two))
self.radioButton_three.setChecked(False)
self.radioButton_three.toggled.connect(lambda: self.leg_point(self.radioButton_three))
self.radioButton_four.setChecked(False)
self.radioButton_four.toggled.connect(lambda: self.leg_point(self.radioButton_four))
self.Button_Save.clicked.connect(self.save)
self.Button_X1.clicked.connect(self.X1)
self.Button_X2.clicked.connect(self.X2)
self.Button_Y1.clicked.connect(self.Y1)
self.Button_Y2.clicked.connect(self.Y2)
self.Button_Z1.clicked.connect(self.Z1)
self.Button_Z2.clicked.connect(self.Z2)
def X1(self):
self.get_point()
self.x +=1
command=cmd.CMD_CALIBRATION+'#'+self.leg+'#'+str(self.x)+'#'+str(self.y)+'#'+str(self.z)+'\n'
self.client.send_data(command)
#print(command)
self.set_point()
def X2(self):
self.get_point()
self.x -= 1
command=cmd.CMD_CALIBRATION+'#'+self.leg+'#'+str(self.x)+'#'+str(self.y)+'#'+str(self.z)+'\n'
self.client.send_data(command)
#print(command)
self.set_point()
def Y1(self):
self.get_point()
self.y += 1
command=cmd.CMD_CALIBRATION+'#'+self.leg+'#'+str(self.x)+'#'+str(self.y)+'#'+str(self.z)+'\n'
self.client.send_data(command)
#print(command)
self.set_point()
def Y2(self):
self.get_point()
self.y -= 1
command=cmd.CMD_CALIBRATION+'#'+self.leg+'#'+str(self.x)+'#'+str(self.y)+'#'+str(self.z)+'\n'
self.client.send_data(command)
#print(command)
self.set_point()
def Z1(self):
self.get_point()
self.z += 1
command=cmd.CMD_CALIBRATION+'#'+self.leg+'#'+str(self.x)+'#'+str(self.y)+'#'+str(self.z)+'\n'
self.client.send_data(command)
#print(command)
self.set_point()
def Z2(self):
self.get_point()
self.z -= 1
command=cmd.CMD_CALIBRATION+'#'+self.leg+'#'+str(self.x)+'#'+str(self.y)+'#'+str(self.z)+'\n'
self.client.send_data(command)
#print(command)
self.set_point()
def set_point(self,data=None):
if data==None:
if self.leg== "one":
self.one_x.setText(str(self.x))
self.one_y.setText(str(self.y))
self.one_z.setText(str(self.z))
self.point[0][0]=self.x
self.point[0][1]=self.y
self.point[0][2]=self.z
elif self.leg== "two":
self.two_x.setText(str(self.x))
self.two_y.setText(str(self.y))
self.two_z.setText(str(self.z))
self.point[1][0]=self.x
self.point[1][1]=self.y
self.point[1][2]=self.z
elif self.leg== "three":
self.three_x.setText(str(self.x))
self.three_y.setText(str(self.y))
self.three_z.setText(str(self.z))
self.point[2][0]=self.x
self.point[2][1]=self.y
self.point[2][2]=self.z
elif self.leg== "four":
self.four_x.setText(str(self.x))
self.four_y.setText(str(self.y))
self.four_z.setText(str(self.z))
self.point[3][0]=self.x
self.point[3][1]=self.y
self.point[3][2]=self.z
else:
self.one_x.setText(str(data[0][0]))
self.one_y.setText(str(data[0][1]))
self.one_z.setText(str(data[0][2]))
self.two_x.setText(str(data[1][0]))
self.two_y.setText(str(data[1][1]))
self.two_z.setText(str(data[1][2]))
self.three_x.setText(str(data[2][0]))
self.three_y.setText(str(data[2][1]))
self.three_z.setText(str(data[2][2]))
self.four_x.setText(str(data[3][0]))
self.four_y.setText(str(data[3][1]))
self.four_z.setText(str(data[3][2]))
def get_point(self):
if self.leg== "one":
self.x = int(self.one_x.text())
self.y = int(self.one_y.text())
self.z = int(self.one_z.text())
elif self.leg== "two":
self.x = int(self.two_x.text())
self.y = int(self.two_y.text())
self.z = int(self.two_z.text())
elif self.leg== "three":
self.x = int(self.three_x.text())
self.y = int(self.three_y.text())
self.z = int(self.three_z.text())
elif self.leg== "four":
self.x = int(self.four_x.text())
self.y = int(self.four_y.text())
self.z = int(self.four_z.text())
def save(self):
command=cmd.CMD_CALIBRATION+'#'+'save'+'\n'
self.client.send_data(command)
self.point[0][0] = self.one_x.text()
self.point[0][1] = self.one_y.text()
self.point[0][2] = self.one_z.text()
self.point[1][0] = self.two_x.text()
self.point[1][1] = self.two_y.text()
self.point[1][2] = self.two_z.text()
self.point[2][0] = self.three_x.text()
self.point[2][1] = self.three_y.text()
self.point[2][2] = self.three_z.text()
self.point[3][0] = self.four_x.text()
self.point[3][1] = self.four_y.text()
self.point[3][2] = self.four_z.text()
self.Save_to_txt(self.point,'point')
reply = QMessageBox.information(self,
"Message",
"Saved successfully",
QMessageBox.Yes)
#print(command)
def Read_from_txt(self,filename):
file1 = open(filename + ".txt", "r")
list_row = file1.readlines()
list_source = []
for i in range(len(list_row)):
column_list = list_row[i].strip().split("\t")
list_source.append(column_list)
for i in range(len(list_source)):
for j in range(len(list_source[i])):
list_source[i][j] = int(list_source[i][j])
file1.close()
return list_source
def Save_to_txt(self,list, filename):
file2 = open(filename + '.txt', 'w')
for i in range(len(list)):
for j in range(len(list[i])):
file2.write(str(list[i][j]))
file2.write('\t')
file2.write('\n')
file2.close()
def leg_point(self,leg):
if leg.text() == "One":
if leg.isChecked() == True:
self.leg = "one"
elif leg.text() == "Two":
if leg.isChecked() == True:
self.leg = "two"
elif leg.text() == "Three":
if leg.isChecked() == True:
self.leg = "three"
elif leg.text() == "Four":
if leg.isChecked() == True:
self.leg = "four"
class ColorDialog(QtWidgets.QColorDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setOptions(self.options() | QtWidgets.QColorDialog.DontUseNativeDialog)
for children in self.findChildren(QtWidgets.QWidget):
classname = children.metaObject().className()
if classname not in ("QColorPicker", "QColorLuminancePicker"):
children.hide()
class ledWindow(QMainWindow,Ui_led):
def __init__(self,client):
super(ledWindow,self).__init__()
self.setupUi(self)
self.client = client
self.setWindowIcon(QIcon('Picture/logo_Mini.png'))
self.hsl = [0, 0, 1]
self.rgb = [0, 0, 0]
self.dial_color.setRange(0, 360)
self.dial_color.setNotchesVisible(True)
self.dial_color.setWrapping(True)
self.dial_color.setPageStep(10)
self.dial_color.setNotchTarget(10)
self.dial_color.valueChanged.connect(self.dialValueChanged)
composite_2f = lambda f, g: lambda t: g(f(t))
self.hsl_to_rgb255 = composite_2f(self.hsl_to_rgb01, self.rgb01_to_rgb255)
self.hsl_to_rgbhex = composite_2f(self.hsl_to_rgb255, self.rgb255_to_rgbhex)
self.rgb255_to_hsl = composite_2f(self.rgb255_to_rgb01, self.rgb01_to_hsl)
self.rgbhex_to_hsl = composite_2f(self.rgbhex_to_rgb255, self.rgb255_to_hsl)
self.colordialog = ColorDialog()
self.colordialog.currentColorChanged.connect(self.onCurrentColorChanged)
lay = QtWidgets.QVBoxLayout(self.widget)
lay.addWidget(self.colordialog, alignment=QtCore.Qt.AlignCenter)
self.pushButtonLightsOut.clicked.connect(self.turnOff)
self.radioButtonOne.setChecked(True)
self.radioButtonOne.toggled.connect(lambda: self.ledMode(self.radioButtonOne))
self.radioButtonTwo.setChecked(False)
self.radioButtonTwo.toggled.connect(lambda: self.ledMode(self.radioButtonTwo))
self.radioButtonThree.setChecked(False)
self.radioButtonThree.toggled.connect(lambda: self.ledMode(self.radioButtonThree))
self.radioButtonFour.setChecked(False)
self.radioButtonFour.toggled.connect(lambda: self.ledMode(self.radioButtonFour))
self.radioButtonFive.setChecked(False)
self.radioButtonFive.toggled.connect(lambda: self.ledMode(self.radioButtonFive))
def turnOff(self):
command = cmd.CMD_LED_MOD + '#' + '0' + '\n'
self.client.send_data(command)
#print(command)
def ledMode(self,index):
if index.text() == "Mode 1":
if index.isChecked() == True:
command = cmd.CMD_LED_MOD + '#' + '1' + '\n'
self.client.send_data(command)
#print(command)
elif index.text() == "Mode 2":
if index.isChecked() == True:
command = cmd.CMD_LED_MOD + '#' + '2' + '\n'
self.client.send_data(command)
#print(command)
elif index.text() == "Mode 3":
if index.isChecked() == True:
command = cmd.CMD_LED_MOD + '#' + '3' + '\n'
self.client.send_data(command)
#print(command)
elif index.text() == "Mode 4":
if index.isChecked() == True:
command = cmd.CMD_LED_MOD + '#' + '4' + '\n'
self.client.send_data(command)
#print(command)
elif index.text() == "Mode 5":
if index.isChecked() == True:
command = cmd.CMD_LED_MOD + '#' + '5' + '\n'
self.client.send_data(command)
#print(command)
def mode1Color(self):
if (self.radioButtonOne.isChecked() == True) or (self.radioButtonThree.isChecked() == True):
command = cmd.CMD_LED + '#' + '255' + '#' + str(self.rgb[0]) + '#' + str(self.rgb[1]) + '#' + str(self.rgb[2]) + '\n'
self.client.send_data(command)
#print(command)
def onCurrentColorChanged(self, color):
try:
self.rgb = self.rgbhex_to_rgb255(color.name())
self.hsl = self.rgb255_to_hsl(self.rgb)
self.changeHSLText()
self.changeRGBText()
#print(color.name(), self.rgb, self.hsl)
self.mode1Color()
self.update()
except Exception as e:
print(e)
def paintEvent(self, e):
try:
qp = QPainter()
qp.begin(self)
brush = QBrush(QColor(self.rgb[0], self.rgb[1], self.rgb[2]))
qp.setBrush(brush)
qp.drawRect(20, 10, 80, 30)
qp.end()
except Exception as e:
print(e)
def dialValueChanged(self):
try:
self.lineEdit_H.setText(str(self.dial_color.value()))
self.changeHSL()
self.hex = self.hsl_to_rgbhex((self.hsl[0], self.hsl[1], self.hsl[2]))
self.rgb = self.rgbhex_to_rgb255(self.hex)
self.changeRGBText()
#print(self.rgb, self.hsl)
self.mode1Color()
self.update()
except Exception as e:
print(e)
def changeHSL(self):
self.hsl[0] = float(self.lineEdit_H.text())
self.hsl[1] = float(self.lineEdit_S.text())
self.hsl[2] = float(self.lineEdit_L.text())
def changeHSLText(self):
self.lineEdit_H.setText(str(int(self.hsl[0])))
self.lineEdit_S.setText(str(round(self.hsl[1], 1)))
self.lineEdit_L.setText(str(round(self.hsl[2], 1)))
def changeRGBText(self):
self.lineEdit_R.setText(str(self.rgb[0]))
self.lineEdit_G.setText(str(self.rgb[1]))
self.lineEdit_B.setText(str(self.rgb[2]))
def rgb255_to_rgbhex(self, rgb: np.array) -> str:
f = lambda n: 0 if n < 0 else 255 if n > 255 else int(n)
return '#%02x%02x%02x' % (f(rgb[0]), f(rgb[1]), f(rgb[2]))
def rgbhex_to_rgb255(self, rgbhex: str) -> np.array:
if rgbhex[0] == '#':
rgbhex = rgbhex[1:]
r = int(rgbhex[0:2], 16)
g = int(rgbhex[2:4], 16)
b = int(rgbhex[4:6], 16)
return np.array((r, g, b))
def rgb01_to_rgb255(self, rgb: np.array) -> np.array:
return rgb * 255
def rgb255_to_rgb01(self, rgb: np.array) -> np.array:
return rgb / 255
def rgb01_to_hsl(self, rgb: np.array) -> np.array:
r, g, b = rgb
lmin = min(r, g, b)
lmax = max(r, g, b)
if lmax == lmin:
h = 0
elif lmin == b:
h = 60 + 60 * (g - r) / (lmax - lmin)
elif lmin == r:
h = 180 + 60 * (b - g) / (lmax - lmin)
elif lmin == g:
h = 300 + 60 * (r - b) / (lmax - lmin)
else:
h = 0
s = lmax - lmin
l = (lmax + lmin) / 2
hsl = np.array((h, s, l))
return hsl
def hsl_to_rgb01(self, hsl: np.array) -> np.array:
h, s, l = hsl
lmin = l - s / 2
lmax = l + s / 2
ldif = lmax - lmin
if h < 60:
r, g, b = lmax, lmin + ldif * (0 + h) / 60, lmin
elif h < 120:
r, g, b = lmin + ldif * (120 - h) / 60, lmax, lmin
elif h < 180:
r, g, b = lmin, lmax, lmin + ldif * (h - 120) / 60
elif h < 240:
r, g, b = lmin, lmin + ldif * (240 - h) / 60, lmax
elif h < 300:
r, g, b = lmin + ldif * (h - 240) / 60, lmin, lmax
else:
r, g, b = lmax, lmin, lmin + ldif * (360 - h) / 60
rgb = np.array((r, g, b))
return rgb
if __name__ == '__main__':
app = QApplication(sys.argv)
myshow=MyWindow()
myshow.show()
sys.exit(app.exec_())
``` |
{
"source": "jhuckestein/Python",
"score": 3
} |
#### File: jhuckestein/Python/publishOutlinev2.py
```python
import sys
import openpyxl
import os
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch
from reportlab.platypus import Paragraph, Frame
from PyPDF2 import PdfFileWriter, PdfFileReader
def OpenExcelFile(WBName, WSName, APIName):
try:
WBHandle = openpyxl.load_workbook(WBName)
if WSName in WBHandle.get_sheet_names():
#We found the worksheet name in the workbook
WSHandle = WBHandle.get_sheet_by_name(WSName)
if APIName in WBHandle.get_sheet_names():
#We found the 0-ProjectAPI worksheet in the workbook
APIHandle = WBHandle.get_sheet_by_name(APIName)
return(WBHandle, WSHandle, APIHandle)
else:
print("Was not able to find ", APIName)
else:
print("Was not able to find ", WSName)
sys.exit()
except IOError:
print("Was not able to open ", WBName)
sys.exit()
def CollectTextStrings(OH, AH):
cseName = str(AH['C6'].value)
cseNumber = str(AH['C7'].value)
durat = str(AH['C21'].value) + ' ' + AH['C22'].value
cseType = str(AH['C23'].value)
desc = str(OH['B23'].value)
aud = str(OH['B32'].value)
reqs = str(OH['B36'].value)
return cseName, cseNumber, durat, cseType, desc, aud, reqs
#Note: GetObjectives is currently developed for 1-Outline as of 12/13/2017. This means
#that the column and row numbers could change at some future point.
def GetObjectives(WSHandle):
objectives = []
i = 40 #This value is used because 1-Outline currently has Learning Objectives beginning at C40-C47
while WSHandle.cell(row = i, column = 3).value:
objectives.append(WSHandle.cell(row = i, column = 3).value)
i += 1
#Now see if the list is populated, if not append a string to the first value
if objectives:
return objectives
else:
objectives.append(' ')
return objectives
#Note: The current course outline populates 1-Outline on D101-D105.
def GetPreReqs(WSHandle):
preReqs = []
i = 101 #This value is used because 1-Outline currently has Prerequisites beginning at D101
while WSHandle.cell(row = i, column = 4).value:
preReqs.append(WSHandle.cell(row = i, column = 4).value)
i += 1
#Now see if the list is populated, if not append a string to the first value
if preReqs:
return preReqs
else:
preReqs.append(' ')
return preReqs
#Note: GetOutline is currently developed for 1RS-Outline as of 12/7/2017. This means
#that the column and row numbers could change at some future point.
def GetOutline(WSHandle):
Coutline = [] #Center pane outline Ch1-4
Routline = [] #Right pane outline Ch5-8
CCont = [] #Keeps track of major headers for printing
RCont = [] #Keeps track of major headers for printing
lineNum = 49 #initialized to starting line for course outlines
for i in range(1, 5):
lineNum += 1
if WSHandle.cell(row = lineNum, column = 3).value: #Case where we hit major header
CCont.append('Major')
Coutline.append(str(i) + ' ' + str(WSHandle.cell(row = lineNum, column = 3).value))
for k in range (1, 6): #Case where we check for minor header
lineNum += 1
if WSHandle.cell(row = lineNum, column = 4).value:
CCont.append('minor')
Coutline.append(str(i) + '.' + str(k) + ' ' + str(WSHandle.cell(row = lineNum, column = 4).value))
lineNum = 73 #reset the initialization just to be sure to process right half of outline
for i in range(5, 9):
lineNum += 1
if WSHandle.cell(row = lineNum, column = 3).value:
RCont.append('Major')
Routline.append(str(i) + ' ' + str(WSHandle.cell(row = lineNum, column = 3).value))
for k in range (1, 6):
lineNum += 1
if WSHandle.cell(row = lineNum, column = 4).value:
RCont.append('minor')
Routline.append(str(i) + '.' + str(k) + ' ' + str(WSHandle.cell(row = lineNum, column = 4).value))
return Coutline, CCont, Routline, RCont
#The next function creates a PDF text file from collected information to be used as a watermark
def CreateTextFile(tFile, cName, cNum, dur, cType, desc, aud, objectives, prereqs, required, cOutline, cControl, rOutline, rControl):
#Create a new PDF (called a Canvas) with our collected Outline
c = canvas.Canvas(tFile, pagesize=letter)
##Now that the file is created draw in the top header section to include the course name, course type,
##the duration, and the course number. Note: these are not flowable fields just plain text strings.
c.setFont("Helvetica-Bold", 21)
c.drawString(40, 570, cName)
c.setFont("Helvetica", 12)
c.drawString(40, 548, cType)
c.drawString(175, 548, dur)
c.drawString(330, 548, cNum)
#c.setFont("Helvetica", 10.5)
#c.drawString(40, 520, description)
##This next block is for the top frame used for the description
styles = getSampleStyleSheet()
styleN = styles['Normal']
story = [] #Note: a 'story' is a ReportLabs term for 'flowables'
story.append(Paragraph(desc, styleN))
fTop = Frame(0.5*inch, 6.2*inch, 10*inch, 1.25*inch, showBoundary = 0)
fTop.addFromList(story, c)
##This next block is for the left frame used for audience, objectives, pre-requisites, and equipment
styleH = styles['Heading2']
styleh = styles['Heading5']
stylen = styles['BodyText']
styleB = styles['Bullet']
storyL = []
storyL.append(Paragraph('Intended Audience', styleH))
storyL.append(Paragraph(aud, styleN))
storyL.append(Paragraph('Learning Objectives', styleH))
for obj in range(0, len(objectives)):
storyL.append(Paragraph(' -' + objectives[obj], styleB))
storyL.append(Paragraph('Suggested Prerequisites', styleH))
for pre in range(0, len(prereqs)):
storyL.append(Paragraph(' -' + prereqs[pre], styleB))
storyL.append(Paragraph('Required Equipment', styleH))
storyL.append(Paragraph(required, styleB))
#Note: previously we thought prereqs would be a bulleted list and now it is just text
#for req in range(0, len(required)):
# storyL.append(Paragraph(' -' + required[req], styleB))
fLeft = Frame(0.5*inch, inch, 4.7*inch, 5*inch, showBoundary = 0)
fLeft.addFromList(storyL, c)
##This next block separates the outline content into center and right frame stories
storyC = []
storyR = []
BHCount = 0 ##This variable keeps track of the number of Big Headers encountered in the outline list
storyC.append(Paragraph('Course Outline', styleH))
for line in range(0, len(cOutline)):
if cControl[line] == 'Major':
storyC.append(Paragraph(cOutline[line], styleh)) #Use bold format for Major Headers
else:
storyC.append(Paragraph(cOutline[line], styleN)) #Use normal format for minor headers
for line in range(0, len(rOutline)):
if rControl[line] == 'Major':
storyR.append(Paragraph(rOutline[line], styleh)) #Use bold format for Major Headers
else:
storyR.append(Paragraph(rOutline[line], styleN)) #Use normal format for minor headers
##This next block populates the center frame (Big Headers 1-4)
fCent = Frame(5.4*inch, 0.5*inch, 2.5*inch, 5.5*inch, showBoundary = 0)
fCent.addFromList(storyC, c)
##This next block is for the right frame (Big Headers 5-8)
fRight = Frame(8*inch, 0.5*inch, 2.5*inch, 5.15*inch, showBoundary = 0)
fRight.addFromList(storyR, c)
c.save()
#The next function takes the text-file PDF and uses it as a watermark to be placed upon the
#graphics template, and produces the outputOutline.pdf
def CreateOutline(template, inFile, outFile):
output = PdfFileWriter()
ipdf = PdfFileReader(open(template, 'rb'))
wpdf = PdfFileReader(open(inFile, 'rb'))
watermark = wpdf.getPage(0)
for i in xrange(ipdf.getNumPages()):
page = ipdf.getPage(i)
page.mergePage(watermark)
output.addPage(page)
with open(outFile, 'wb') as f:
output.write(f)
################################################
# Main Program begins below
OutlineTab = '1-Outline' #If the tabs on the Course Outline in Excel are re-named this will change.
APITab = '1-Budgeting' #As above this tracks the worksheet tab in the Excel Course Outline.
graphicsTemplate = 'Example-NWV.pdf' #This variable holds the pdf file used as a graphics template
#TextFile = This variable is the name of the text file which will be used as a watermark set below
#outputFile = This is the name of the final outline produced as a PDF set below
if (len(sys.argv) > 1):
if sys.argv[1] == 'all': #case where a batch process of a directory is desired
inputFiles = []
inputFiles = os.listdir(".")
# for dirpath, dirs, files in os.walk("."):
# inputFiles.append(files)
print("inputFiles =", inputFiles)
for file in range(0, len(inputFiles)):
if inputFiles[file].endswith('xlsm'): #case where a file in the directory is the correct xlsm file
##Set the watermark file and the finished outline to match the given filename
currentFile = inputFiles[file]
TextFile = 'text' + currentFile.replace("xlsm", "pdf")
outputFile = currentFile.replace("xlsm", "pdf")
##Attempt to open the workbook and sheets
try:
WorkbookH, OutlineWksheetH, ApiH = OpenExcelFile(currentFile, OutlineTab, APITab)
##Grab all of the key fields listed in the design section
courseName, courseNumber, duration, courseType, description, audience, requirements = CollectTextStrings(OutlineWksheetH, ApiH)
learnObjectives = list(GetObjectives(OutlineWksheetH))
prerequisites = list(GetPreReqs(OutlineWksheetH))
courseOutline, CControl, rightOutline, RControl = GetOutline(OutlineWksheetH)
#At this point I have all of the information scraped from the Excel
#outline, and am ready to create a new PDF to watermark with the PDF Template
try:
CreateTextFile(TextFile, courseName, courseNumber, duration, courseType, description, audience, learnObjectives, prerequisites, requirements, courseOutline, CControl, rightOutline, RControl)
except:
print("Was not able to create a textfile.pdf to watermark the graphics template.")
#Now merge the example.pdf with the template to get output
try:
CreateOutline(graphicsTemplate, TextFile, outputFile)
except:
print("Was not able to assemble the final Outline.pdf from the watermark and template.")
#Tell the user the outline is complete
print('Done processing ', currentFile)
except:
print('Was not able to open the requested file =', sys.argv[1])
#End of the if test on line 241 - the else is do nothing
#End of for loop on 240
else: #case where the user input a specific filename
##Set the watermark file and the finished outline to match the given filename
enteredFile = sys.argv[1]
TextFile = 'text' + enteredFile.replace("xlsm", "pdf")
outputFile = enteredFile.replace("xlsm", "pdf")
##Attempt to open the workbook and sheets
try:
WorkbookH, OutlineWksheetH, ApiH = OpenExcelFile(sys.argv[1], OutlineTab, APITab)
##Grab all of the key fields listed in the design section
courseName, courseNumber, duration, courseType, description, audience, requirements = CollectTextStrings(OutlineWksheetH, ApiH)
learnObjectives = list(GetObjectives(OutlineWksheetH))
prerequisites = list(GetPreReqs(OutlineWksheetH))
courseOutline, CControl, rightOutline, RControl = GetOutline(OutlineWksheetH)
#At this point I have all of the information scraped from the Excel
#outline, and am ready to create a new PDF to watermark with the PDF Template
try:
CreateTextFile(TextFile, courseName, courseNumber, duration, courseType, description, audience, learnObjectives, prerequisites, requirements, courseOutline, CControl, rightOutline, RControl)
except:
print("Was not able to create a textfile.pdf to watermark the graphics template.")
#Now merge the example.pdf with the template to get output
try:
CreateOutline(graphicsTemplate, TextFile, outputFile)
except:
print("Was not able to assemble the final Outline.pdf from the watermark and template.")
#Tell the user the outline is complete
print('Done')
except:
print('Was not able to open the requested file =', sys.argv[1])
#If enough arguments were not specified give the user a hint
else:
print('Input Arguments <CourseOutline.xlsm> or <all>')
``` |
{
"source": "jhu-dvrk/ros2_dvrk_python",
"score": 2
} |
#### File: ros2_dvrk_python/scripts/dvrk_psm_test.py
```python
import argparse
import sys
import time
import threading
import rclpy
import dvrk
import math
import numpy
import PyKDL
# example of application using arm.py
class example_application:
# configuration
def configure(self, node, expected_interval):
print('configuring dvrk_psm_test for node %s using namespace %s' % (node.get_name(), node.get_namespace()))
self.expected_interval = expected_interval
self.arm = dvrk.psm(arm_name = node.get_namespace(),
ros_node = node,
expected_interval = expected_interval)
# homing example
def home(self):
print('starting enable')
if not self.arm.enable(10):
sys.exit('failed to enable within 10 seconds')
print('starting home')
if not self.arm.home(10):
sys.exit('failed to home within 10 seconds')
# get current joints just to set size
print('move to starting position')
goal = numpy.copy(self.arm.setpoint_jp())
# go to zero position, make sure 3rd joint is past cannula
goal.fill(0)
goal[2] = 0.12
self.arm.move_jp(goal).wait()
# utility to position tool/camera deep enough before cartesian examples
def prepare_cartesian(self):
# make sure the tip is past the cannula and tool vertical
goal = numpy.copy(self.arm.setpoint_jp())
if ((self.arm.name().endswith('PSM1')) or (self.arm.name().endswith('PSM2'))
or (self.arm.name().endswith('PSM3'))):
print('preparing for cartesian motion')
# set in position joint mode
goal[0] = 0.0
goal[1] = 0.0
goal[2] = 0.12
goal[3] = 0.0
self.arm.move_jp(goal).wait()
# goal jaw control example
def run_jaw_move(self):
print('starting jaw move')
# try to open and close with the cartesian part of the arm in different modes
print('close and open without other move command')
input(" Press Enter to continue...")
print('closing (1)')
self.arm.jaw.close().wait()
print('opening (2)')
self.arm.jaw.open().wait()
print('closing (3)')
self.arm.jaw.close().wait()
print('opening (4)')
self.arm.jaw.open().wait()
# try to open and close with a joint goal
print('close and open with joint move command')
input(" Press Enter to continue...")
print('closing and moving up (1)')
self.arm.jaw.close().wait(is_busy = True)
self.arm.insert_jp(0.1).wait()
print('opening and moving down (2)')
self.arm.jaw.open().wait(is_busy = True)
self.arm.insert_jp(0.15).wait()
print('closing and moving up (3)')
self.arm.jaw.close().wait(is_busy = True)
self.arm.insert_jp(0.1).wait()
print('opening and moving down (4)')
self.arm.jaw.open().wait(is_busy = True)
self.arm.insert_jp(0.15).wait()
print('close and open with cartesian move command')
input(" Press Enter to continue...")
# try to open and close with a cartesian goal
self.prepare_cartesian()
initial_cartesian_position = PyKDL.Frame()
initial_cartesian_position.p = self.arm.setpoint_cp().p
initial_cartesian_position.M = self.arm.setpoint_cp().M
goal = PyKDL.Frame()
goal.p = self.arm.setpoint_cp().p
goal.M = self.arm.setpoint_cp().M
# motion parameters
amplitude = 0.05 # 5 cm
# first motion
goal.p[0] = initial_cartesian_position.p[0] - amplitude
goal.p[1] = initial_cartesian_position.p[1]
print('closing and moving right (1)')
self.arm.move_cp(goal).wait(is_busy = True)
self.arm.jaw.close().wait()
# second motion
goal.p[0] = initial_cartesian_position.p[0] + amplitude
goal.p[1] = initial_cartesian_position.p[1]
print('opening and moving left (1)')
self.arm.move_cp(goal).wait(is_busy = True)
self.arm.jaw.open().wait()
# back to starting point
goal.p[0] = initial_cartesian_position.p[0]
goal.p[1] = initial_cartesian_position.p[1]
print('moving back (3)')
self.arm.move_cp(goal).wait()
# goal jaw control example
def run_jaw_servo(self):
print('starting jaw servo')
# try to open and close directly, needs interpolation
print('close and open without other servo command')
input(" Press Enter to continue...")
start_angle = math.radians(50.0)
self.arm.jaw.open(angle = start_angle).wait()
# assume we start at 30 the move +/- 30
amplitude = math.radians(30.0)
duration = 5 # seconds
samples = int(duration / self.expected_interval)
# create a new goal starting with current position
for i in range(samples * 4):
tic = time.time()
goal = start_angle + amplitude * (math.cos(i * math.radians(360.0) / samples) - 1.0)
self.arm.jaw.servo_jp(numpy.array([goal]))
dt = time.time() - tic
time_left = self.expected_interval - dt
if time_left > 0:
time.sleep(time_left)
# main method
def run(self):
self.home()
self.run_jaw_move()
self.run_jaw_servo()
self.run_jaw_move() # just to make sure we can transition back to trajectory mode
if __name__ == '__main__':
# ros init node so we can use default ros arguments (e.g. __ns:= for namespace)
rclpy.init(args = sys.argv)
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--arm', type=str, required=True,
choices=['PSM1', 'PSM2', 'PSM3'],
help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace')
parser.add_argument('-i', '--interval', type=float, default=0.01,
help = 'expected interval in seconds between messages sent by the device')
args = parser.parse_args(sys.argv[1:]) # skip argv[0], script name
node = rclpy.create_node('dvrk_arm_test', namespace = args.arm)
application = example_application()
application.configure(node, args.interval)
executor = rclpy.executors.MultiThreadedExecutor()
executor.add_node(node)
executor_thread = threading.Thread(target = executor.spin, daemon = True)
executor_thread.start()
try:
application.run()
except KeyboardInterrupt:
pass
print('stopping ROS thread')
rclpy.shutdown()
executor_thread.join()
node.destroy_node()
```
#### File: src/dvrk/teleop_psm.py
```python
import rclpy
from std_msgs.msg import Bool, Float64, Empty, String
from geometry_msgs.msg import Quaternion
class teleop_psm(object):
"""Simple dVRK teleop PSM API wrapping around ROS messages
"""
# initialize the teleop
def __init__(self, teleop_name, ros_namespace = ''):
# base class constructor in separate method so it can be called in derived classes
self.__init_teleop_psm(teleop_name, ros_namespace)
def __init_teleop_psm(self, teleop_name, ros_namespace = ''):
"""Constructor. This initializes a few data members. It
requires a teleop name, this will be used to find the ROS topics
for the console being controlled."""
# data members
self.__teleop_name = teleop_name
self.__ros_namespace = ros_namespace
self.__full_ros_namespace = self.__ros_namespace + self.__teleop_name
self.__scale = 0.0
# publishers
self.__set_scale_pub = rospy.Publisher(self.__full_ros_namespace
+ '/set_scale',
Float64, latch = True, queue_size = 1)
self.__set_registration_rotation_pub = rospy.Publisher(self.__full_ros_namespace
+ '/set_registration_rotation',
Quaternion, latch = True, queue_size = 1)
self.__set_desired_state_pub = rospy.Publisher(self.__full_ros_namespace
+ '/set_desired_state',
String, latch = True, queue_size = 1)
# subscribers
rospy.Subscriber(self.__full_ros_namespace
+ '/scale',
Float64, self.__scale_cb)
# create node
if not rospy.get_node_uri():
rospy.init_node('teleop_api', anonymous = True, log_level = rospy.WARN)
else:
rospy.logdebug(rospy.get_caller_id() + ' -> ROS already initialized')
def __scale_cb(self, data):
"""Callback for teleop scale.
:param data: the latest scale requested for the teleop"""
self.__scale = data.data
def set_scale(self, scale):
self.__set_scale_pub.publish(scale)
def get_scale(self):
return self.__scale
def set_registration_rotation(self, rotation):
"""Expect a PyKDL rotation matrix (PyKDL.Rotation)"""
q = Quaternion()
q.x, q.y, q.z, q.w = rotation.GetQuaternion()
self.__set_registration_rotation_pub.publish(q)
def enable(self):
self.__set_desired_state_pub.publish('ENABLED')
def disable(self):
self.__set_desired_state_pub.publish('DISABLED')
``` |
{
"source": "jhuebotter/CartpoleSNNdemo",
"score": 3
} |
#### File: CartpoleSNNdemo/CartPole/load_cartpole_parameters.py
```python
from types import SimpleNamespace
import csv
def load_cartpole_parameters(dataset_path):
p = SimpleNamespace()
# region Get information about the pretrained network from the associated txt file
with open(dataset_path) as f:
reader = csv.reader(f)
updated_features = 0
for line in reader:
line = line[0]
if line[:len('# m: ')] == '# m: ':
p.m = float(line[len('# m: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# M: ')] == '# M: ':
p.M = float(line[len('# M: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# L: ')] == '# L: ':
p.L = float(line[len('# L: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# u_max: ')] == '# u_max: ':
p.u_max = float(line[len('# u_max: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# M_fric: ')] == '# M_fric: ':
p.M_fric = float(line[len('# M_fric: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# J_fric: ')] == '# J_fric: ':
p.J_fric = float(line[len('# J_fric: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# v_max: ')] == '# v_max: ':
p.v_max = float(line[len('# v_max: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# TrackHalfLength: ')] == '# TrackHalfLength: ':
p.TrackHalfLength = float(line[len('# TrackHalfLength: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# controlDisturbance: ')] == '# controlDisturbance: ':
p.controlDisturbance = float(line[len('# controlDisturbance: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# sensorNoise: ')] == '# sensorNoise: ':
p.sensorNoise = float(line[len('# sensorNoise: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# g: ')] == '# g: ':
p.g = float(line[len('# g: '):].rstrip("\n"))
updated_features += 1
continue
if line[:len('# k: ')] == '# k: ':
p.k = float(line[len('# k: '):].rstrip("\n"))
updated_features += 1
continue
if updated_features == 12:
break
return p
```
#### File: CartpoleSNNdemo/Controllers/controller_do_mpc.py
```python
import do_mpc
import numpy as np
from Controllers.template_controller import template_controller
from CartPole.cartpole_model import Q2u, cartpole_ode_namespace
from CartPole.state_utilities import cartpole_state_vector_to_namespace
from types import SimpleNamespace
import yaml
config = yaml.load(open("config.yml", "r"), Loader=yaml.FullLoader)
dt_mpc_simulation = config["controller"]["do_mpc"]["dt_mpc_simulation"]
mpc_horizon = config["controller"]["do_mpc"]["mpc_horizon"]
# Perturbation factors:
# Change of output from optimal
p_Q = config["controller"]["do_mpc"]["p_Q"]
# Change of cost function
p_position = config["controller"]["do_mpc"]["p_position"]
p_positionD = config["controller"]["do_mpc"]["p_positionD"]
p_angle = config["controller"]["do_mpc"]["p_angle"]
l_angle = config["controller"]["do_mpc"]["l_angle"]
l_position = config["controller"]["do_mpc"]["l_position"]
l_positionD = config["controller"]["do_mpc"]["l_positionD"]
w_sum = l_angle + l_position + l_positionD
l_angle /= w_sum
l_position /= w_sum
l_positionD /= w_sum
class controller_do_mpc(template_controller):
def __init__(self,
position_init=0.0,
positionD_init=0.0,
angle_init=0.0,
angleD_init=0.0,
):
"""
Get configured do-mpc modules:
"""
# Container for the state of the cart
s = SimpleNamespace() # s like state
model_type = 'continuous' # either 'discrete' or 'continuous'
self.model = do_mpc.model.Model(model_type)
s.position = self.model.set_variable(var_type='_x', var_name='s.position', shape=(1, 1))
s.positionD = self.model.set_variable(var_type='_x', var_name='s.positionD', shape=(1, 1))
s.angle = self.model.set_variable(var_type='_x', var_name='s.angle', shape=(1, 1))
s.angleD = self.model.set_variable(var_type='_x', var_name='s.angleD', shape=(1, 1))
Q = self.model.set_variable(var_type='_u', var_name='Q')
target_position = self.model.set_variable('_tvp', 'target_position')
self.model.set_rhs('s.position', s.positionD)
self.model.set_rhs('s.angle', s.angleD)
angleD_next, positionD_next = cartpole_ode_namespace(s, Q2u(Q))
self.model.set_rhs('s.positionD', positionD_next)
self.model.set_rhs('s.angleD', angleD_next)
# Simplified, normalized expressions for E_kin and E_pot as a port of cost function
cost_position = (s.position - target_position) ** 2
cost_positionD = s.positionD ** 2
cost_angleD = s.angleD**2
cost_angle_sin = np.sin(s.angle)**2
cost_angle = (s.angle/np.pi)**2
self.model.set_expression('cost_positionD', cost_positionD)
self.model.set_expression('cost_angleD', cost_angleD)
self.model.set_expression('cost_angle', cost_angle)
self.model.set_expression('cost_position', cost_position)
self.model.setup()
self.mpc = do_mpc.controller.MPC(self.model)
setup_mpc = {
'n_horizon': mpc_horizon,
't_step': dt_mpc_simulation,
'n_robust': 0,
'store_full_solution': False,
'store_lagr_multiplier': False,
'store_solver_stats': []
}
self.mpc.set_param(**setup_mpc)
# self.mpc.set_param(nlpsol_opts={'ipopt.linear_solver': 'mumps'})
# Other possible linear solvers from hsl library
# The give better performance 2-3 times.
# However if simulating at max speedup the simulation blocks. Issue with memory leak?
# self.mpc.set_param(nlpsol_opts={'ipopt.linear_solver': 'mumps'})
self.mpc.set_param(nlpsol_opts = {'ipopt.linear_solver': 'MA57'})
# # Standard version
lterm = (
l_angle * (1+p_angle*np.random.uniform(-1.0, 1.0)) * self.model.aux['cost_angle']
+ l_position * (1+p_position*np.random.uniform(-1.0, 1.0)) * cost_position
+ l_positionD * (1+p_positionD*np.random.uniform(-1.0, 1.0)) * self.model.aux['cost_positionD']
)
# mterm = 400.0 * self.model.aux['E_kin_cart']
mterm = 0.0 * self.model.aux['cost_positionD']
# mterm = 0.0 * distance_difference # 5 * self.model.aux['E_kin_pol'] - 5 * self.model.aux['E_pot'] + 5 * self.model.aux['E_kin_cart']
self.mpc.set_rterm(Q=0.1)
# # Alternative versions of cost function to get more diverse data for learning cartpole model
# lterm = 20.0 * distance_difference
# mterm = 5 * self.model.aux['E_kin_pol'] - 5 * self.model.aux['E_pot'] + 5 * self.model.aux['E_kin_cart']
# self.mpc.set_rterm(Q=0.2)
#
# lterm = 20.0 * distance_difference + 5 * self.model.aux['E_kin_cart']
# mterm = 5 * self.model.aux['E_kin_pol'] - 5 * self.model.aux['E_pot'] + 200.0 * distance_difference
# self.mpc.set_rterm(Q=0.2)
self.mpc.set_objective(mterm=mterm, lterm=lterm)
self.mpc.bounds['lower', '_u', 'Q'] = -1.0
self.mpc.bounds['upper', '_u', 'Q'] = 1.0
self.tvp_template = self.mpc.get_tvp_template()
self.mpc.set_tvp_fun(self.tvp_fun)
# Suppress IPOPT outputs (optimizer info printed to the console)
suppress_ipopt = {'ipopt.print_level': 0, 'ipopt.sb': 'yes', 'print_time': 0}
self.mpc.set_param(nlpsol_opts=suppress_ipopt)
self.mpc.setup()
# Set initial state
self.x0 = self.mpc.x0
self.x0['s.position'] = position_init
self.x0['s.positionD'] = positionD_init
self.x0['s.angle'] = angle_init
self.x0['s.angleD'] = angleD_init
self.mpc.x0 = self.x0
self.mpc.set_initial_guess()
def tvp_fun(self, t_ind):
return self.tvp_template
def step(self, s, target_position, time=None):
s = cartpole_state_vector_to_namespace(s)
self.x0['s.position'] = s.position
self.x0['s.positionD'] = s.positionD
self.x0['s.angle'] = s.angle
self.x0['s.angleD'] = s.angleD
self.tvp_template['_tvp', :, 'target_position'] = target_position
Q = self.mpc.make_step(self.x0)
return Q.item()*(1+p_Q*np.random.uniform(-1.0, 1.0))
```
#### File: CartpoleSNNdemo/GUI/_ControllerGUI_MPPIOptionsWindow.py
```python
try:
import ptvsd
except:
pass
import numpy as np
# Import functions from PyQt5 module (creating GUI)
from PyQt5.QtWidgets import (
QMainWindow,
QRadioButton,
QApplication,
QVBoxLayout,
QHBoxLayout,
QLabel,
QPushButton,
QWidget,
QCheckBox,
QSlider,
QLineEdit,
QMessageBox,
QComboBox,
QButtonGroup,
)
from PyQt5.QtCore import QThreadPool, QTimer, Qt
from numpy.core.numeric import roll
import Controllers.controller_mppi as controller_mppi
class MPPIOptionsWindow(QWidget):
def __init__(self):
super(MPPIOptionsWindow, self).__init__()
self.horizon_steps = controller_mppi.mpc_samples
self.num_rollouts = controller_mppi.num_rollouts
self.dd_weight = controller_mppi.dd_weight
self.ep_weight = controller_mppi.ep_weight
self.ekp_weight = controller_mppi.ekp_weight * 1.0e1
self.ekc_weight = controller_mppi.ekc_weight * 1.0e-1
self.cc_weight = controller_mppi.cc_weight * 1.0e-2
self.ccrc_weight = controller_mppi.ccrc_weight * 1.0e-2
self.R = controller_mppi.R # How much to punish Q
self.LBD = controller_mppi.LBD # Cost parameter lambda
self.NU = controller_mppi.NU # Exploration variance
layout = QVBoxLayout()
### Set Horizon Length
horizon_options_layout = QVBoxLayout()
self.horizon_label = QLabel("")
self.horizon_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
horizon_options_layout.addWidget(self.horizon_label)
slider = QSlider(orientation=Qt.Horizontal)
slider.setRange(10, 300)
slider.setValue(self.horizon_steps)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(10)
slider.setSingleStep(10)
horizon_options_layout.addWidget(slider)
slider.valueChanged.connect(self.horizon_length_changed)
### Set Number of Rollouts
rollouts_options_layout = QVBoxLayout()
self.rollouts_label = QLabel("")
self.rollouts_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
rollouts_options_layout.addWidget(self.rollouts_label)
slider = QSlider(orientation=Qt.Horizontal)
slider.setRange(10, 3000)
slider.setValue(self.num_rollouts)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(10)
slider.setSingleStep(10)
rollouts_options_layout.addWidget(slider)
slider.valueChanged.connect(self.num_rollouts_changed)
### Set Cost Weights
cost_weight_layout = QVBoxLayout()
# Distance difference cost
self.dd_weight_label = QLabel("")
self.dd_weight_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.dd_weight_label)
self.dd_label = QLabel("")
self.dd_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.dd_label)
slider = QSlider(orientation=Qt.Horizontal)
slider.setRange(0, 990)
slider.setValue(self.dd_weight)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(10)
slider.setSingleStep(10)
cost_weight_layout.addWidget(slider)
slider.valueChanged.connect(self.dd_weight_changed)
# Potential energy cost
self.ep_weight_label = QLabel("")
self.ep_weight_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.ep_weight_label)
self.ep_label = QLabel("")
self.ep_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.ep_label)
slider = QSlider(orientation=Qt.Horizontal)
slider.setRange(0, 1e5-1e3)
slider.setValue(self.ep_weight)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(1e3)
slider.setSingleStep(1e3)
cost_weight_layout.addWidget(slider)
slider.valueChanged.connect(self.ep_weight_changed)
# Pole kinetic energy cost
self.ekp_weight_label = QLabel("")
self.ekp_weight_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.ekp_weight_label)
self.ekp_label = QLabel("")
self.ekp_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.ekp_label)
slider = QSlider(orientation=Qt.Horizontal)
slider.setRange(0, 99)
slider.setValue(self.ekp_weight)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(1)
slider.setSingleStep(1)
cost_weight_layout.addWidget(slider)
slider.valueChanged.connect(self.ekp_weight_changed)
# Cart kinetic energy cost
self.ekc_weight_label = QLabel("")
self.ekc_weight_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.ekc_weight_label)
self.ekc_label = QLabel("")
self.ekc_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.ekc_label)
slider = QSlider(orientation=Qt.Horizontal)
slider.setRange(0, 99)
slider.setValue(self.ekc_weight)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(1)
slider.setSingleStep(1)
cost_weight_layout.addWidget(slider)
slider.valueChanged.connect(self.ekc_weight_changed)
# Control cost
self.cc_weight_label = QLabel("")
self.cc_weight_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.cc_weight_label)
self.cc_label = QLabel("")
self.cc_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.cc_label)
slider = QSlider(orientation=Qt.Horizontal)
slider.setRange(0, 99)
slider.setValue(self.cc_weight)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(1)
slider.setSingleStep(1)
cost_weight_layout.addWidget(slider)
slider.valueChanged.connect(self.cc_weight_changed)
# Control change rate cost
self.ccrc_weight_label = QLabel("")
self.ccrc_weight_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.ccrc_weight_label)
self.ccrc_label = QLabel("")
self.ccrc_label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
cost_weight_layout.addWidget(self.ccrc_label)
slider = QSlider(orientation=Qt.Horizontal)
slider.setRange(0, 99)
slider.setValue(self.ccrc_weight)
slider.setTickPosition(QSlider.TicksBelow)
slider.setTickInterval(1)
slider.setSingleStep(1)
cost_weight_layout.addWidget(slider)
slider.valueChanged.connect(self.ccrc_weight_changed)
### Set some more MPPI constants
mppi_constants_layout = QVBoxLayout()
# Quadratic cost penalty R
textbox = QLineEdit()
textbox.setText(str(self.R))
textbox.textChanged.connect(self.R_changed)
h_layout = QHBoxLayout()
h_layout.addWidget(QLabel("Quadratic input cost penalty R ="))
h_layout.addWidget(textbox)
mppi_constants_layout.addLayout(h_layout)
# Quadratic cost penalty LBD
textbox = QLineEdit()
textbox.setText(str(self.LBD))
textbox.textChanged.connect(self.LBD_changed)
h_layout = QHBoxLayout()
h_layout.addWidget(QLabel("Importance of higher-cost rollouts LBD ="))
h_layout.addWidget(textbox)
mppi_constants_layout.addLayout(h_layout)
# Quadratic cost penalty NU
textbox = QLineEdit()
textbox.setText(str(self.NU))
textbox.textChanged.connect(self.NU_changed)
h_layout = QHBoxLayout()
h_layout.addWidget(QLabel("Exploration variance NU ="))
h_layout.addWidget(textbox)
mppi_constants_layout.addLayout(h_layout)
# Sampling type
h_layout = QHBoxLayout()
btn1 = QRadioButton("iid")
if btn1.text() == controller_mppi.SAMPLING_TYPE: btn1.setChecked(True)
btn1.toggled.connect(lambda: self.toggle_button(btn1))
h_layout.addWidget(btn1)
btn2 = QRadioButton("random_walk")
if btn2.text() == controller_mppi.SAMPLING_TYPE: btn2.setChecked(True)
btn2.toggled.connect(lambda: self.toggle_button(btn2))
h_layout.addWidget(btn2)
btn3 = QRadioButton("uniform")
if btn3.text() == controller_mppi.SAMPLING_TYPE: btn3.setChecked(True)
btn3.toggled.connect(lambda: self.toggle_button(btn3))
h_layout.addWidget(btn3)
btn4 = QRadioButton("repeated")
if btn4.text() == controller_mppi.SAMPLING_TYPE: btn4.setChecked(True)
btn4.toggled.connect(lambda: self.toggle_button(btn4))
h_layout.addWidget(btn4)
btn5 = QRadioButton("interpolated")
if btn5.text() == controller_mppi.SAMPLING_TYPE: btn5.setChecked(True)
btn5.toggled.connect(lambda: self.toggle_button(btn5))
h_layout.addWidget(btn5)
mppi_constants_layout.addWidget(QLabel("Sampling type:"))
mppi_constants_layout.addLayout(h_layout)
### Put together layout
self.update_labels()
self.update_slider_labels()
layout.addLayout(horizon_options_layout)
layout.addLayout(rollouts_options_layout)
layout.addLayout(cost_weight_layout)
layout.addLayout(mppi_constants_layout)
self.setLayout(layout)
self.setWindowFlags(self.windowFlags() | Qt.WindowStaysOnTopHint)
self.setGeometry(0, 0, 400, 50)
self.show()
self.setWindowTitle("MPPI Options")
self.timer = QTimer()
self.timer.timeout.connect(self.update_labels)
self.timer.start(100)
controller_mppi.LOGGING = False
def horizon_length_changed(self, val: int):
self.horizon_steps = val
# TODO: Replace by setter method
controller_mppi.mpc_samples = self.horizon_steps
self.update_slider_labels()
def num_rollouts_changed(self, val: int):
self.num_rollouts = val
controller_mppi.num_rollouts = self.num_rollouts
self.update_slider_labels()
def dd_weight_changed(self, val: int):
self.dd_weight = val
# TODO: Replace by setter method
controller_mppi.dd_weight = self.dd_weight * 1.0
self.update_slider_labels()
def ep_weight_changed(self, val: int):
self.ep_weight = val
# TODO: Replace by setter method
controller_mppi.ep_weight = self.ep_weight * 1.0
self.update_slider_labels()
def ekp_weight_changed(self, val: int):
self.ekp_weight = val
# TODO: Replace by setter method
controller_mppi.ekp_weight = self.ekp_weight * 1.0e-1
self.update_slider_labels()
def ekc_weight_changed(self, val: int):
self.ekc_weight = val
# TODO: Replace by setter method
controller_mppi.ekc_weight = self.ekc_weight * 1.0e1
self.update_slider_labels()
def cc_weight_changed(self, val: int):
self.cc_weight = val
# TODO: Replace by setter method
controller_mppi.cc_weight = self.cc_weight * 1.0e2
self.update_slider_labels()
def ccrc_weight_changed(self, val: int):
self.ccrc_weight = val
# TODO: Replace by setter method
controller_mppi.ccrc_weight = self.ccrc_weight * 1.0e2
self.update_slider_labels()
def R_changed(self, val: str):
if val == '': val = '0'
val = float(val)
self.R = val
controller_mppi.R = self.R
def LBD_changed(self, val: str):
if val == '': val = '0'
val = float(val)
if val == 0: val = 1.0
self.LBD = val
controller_mppi.LBD = self.LBD
def NU_changed(self, val: str):
if val == '': val = '0'
val = float(val)
if val == 0: val = 1.0
self.NU = val
controller_mppi.NU = self.NU
def toggle_button(self, b):
if b.isChecked(): controller_mppi.SAMPLING_TYPE = b.text()
def update_slider_labels(self):
self.horizon_label.setText(
f"Horizon: {self.horizon_steps} steps = {round(self.horizon_steps * controller_mppi.dt, 2)} s"
)
self.rollouts_label.setText(
f"Rollouts: {self.num_rollouts}"
)
self.dd_weight_label.setText(
f"Distance difference cost weight: {round(self.dd_weight, 2)}"
)
self.ep_weight_label.setText(
f"Pole angle cost weight: {round(self.ep_weight, 2)}"
)
self.ekp_weight_label.setText(
f"Pole kinetic energy cost weight: {round(self.ekp_weight * 1.0e-1, 4)}"
)
self.ekc_weight_label.setText(
f"Cart kinetic energy cost weight: {round(self.ekc_weight * 1.0e1, 3)}"
)
self.cc_weight_label.setText(
f"Control cost weight: {round(self.cc_weight * 1.0e2, 3)}"
)
self.ccrc_weight_label.setText(
f"Control change rate cost weight: {round(self.ccrc_weight * 1.0e2, 3)}"
)
def update_labels(self):
self.dd_label.setText(
f"{round(controller_mppi.gui_dd.item(), 2)}"
)
self.ep_label.setText(
f"{round(controller_mppi.gui_ep.item(), 2)}"
)
self.ekp_label.setText(
f"{round(controller_mppi.gui_ekp.item(), 2)}"
)
self.ekc_label.setText(
f"{round(controller_mppi.gui_ekc.item(), 2)}"
)
self.cc_label.setText(
f"{round(controller_mppi.gui_cc.item(), 2)}"
)
self.ccrc_label.setText(
f"{round(controller_mppi.gui_ccrc.item(), 2)}"
)
```
#### File: others/Controllers-out-of-use/controller_rnn_as_mpc_pytorch.py
```python
import scipy
import numpy as np
import pandas as pd
from Modeling.Pytorch.utilis_rnn import *
from Controllers.template_controller import template_controller
from CartPole.state_utilities import create_cartpole_state, cartpole_state_varname_to_index
import yaml, os
config = yaml.load(open(os.path.join('SI_Toolkit_ApplicationSpecificFiles', 'config.yml'), 'r'), Loader=yaml.FullLoader)
RNN_FULL_NAME = 'GRU-6IN-64H1-64H2-5OUT-0'
INPUTS_LIST = config['modeling']['PyTorch']['INPUTS_LIST']
OUTPUTS_LIST = config['modeling']['PyTorch']['OUTPUTS_LIST']
PATH_SAVE = config['modeling']['PyTorch']['PATH_SAVE']
class controller_rnn_as_mpc_pytorch(template_controller):
def __init__(self):
self.rnn_full_name = RNN_FULL_NAME
self.path_save = PATH_SAVE
self.device = get_device()
# Create rnn instance and update lists of input, outputs and its name (if pretraind net loaded)
self.net, self.rnn_name, self.inputs_list, self.outputs_list \
= create_rnn_instance(load_rnn=self.rnn_full_name, path_save=self.path_save, device=self.device)
self.normalization_info = load_normalization_info(self.path_save, self.rnn_full_name)
self.net.reset()
self.net.eval()
self.rnn_input = pd.DataFrame(columns=self.inputs_list)
self.rnn_output = pd.DataFrame(columns=self.outputs_list)
def step(self, s, target_position, time=None):
# Copy state and target_position into rnn_input
if 'position' in self.rnn_input:
self.rnn_input['position'] = [s[cartpole_state_varname_to_index('position')]]
if 'angle' in self.rnn_input:
self.rnn_input['angle'] = [s[cartpole_state_varname_to_index('angle')]]
if 'positionD' in self.rnn_input:
self.rnn_input['positionD'] = [s[cartpole_state_varname_to_index('positionD')]]
if 'angleD' in self.rnn_input:
self.rnn_input['angleD'] = [s[cartpole_state_varname_to_index('angleD')]]
if 'target_position' in self.rnn_input:
self.rnn_input['target_position'] = [target_position]
rnn_input_normed = normalize_df(self.rnn_input, self.normalization_info)
rnn_input_torch = torch.tensor(rnn_input_normed.values).float().unsqueeze(0).to(self.device)
normalized_rnn_output = self.net(rnn_input=rnn_input_torch)
normalized_rnn_output = normalized_rnn_output.detach().cpu().squeeze().tolist()
normalized_rnn_output = pd.DataFrame(data=[normalized_rnn_output], columns=self.outputs_list)
denormalized_rnn_output = denormalize_df(normalized_rnn_output, self.normalization_info)
Q = float(denormalized_rnn_output['Q'])
return Q
```
#### File: CartpoleSNNdemo/others/globals_and_utils.py
```python
import logging
import math
import os
import sys
import time
from pathlib import Path
from subprocess import TimeoutExpired
import os
from typing import Optional
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' # all TF messages
import tensorflow as tf
import numpy as np
import atexit
from engineering_notation import EngNumber as eng # only from pip
from matplotlib import pyplot as plt
import numpy as np
# https://stackoverflow.com/questions/35851281/python-finding-the-users-downloads-folder
import os
if os.name == 'nt':
import ctypes
from ctypes import windll, wintypes
from uuid import UUID
# ctypes GUID copied from MSDN sample code
class GUID(ctypes.Structure):
_fields_ = [
("Data1", wintypes.DWORD),
("Data2", wintypes.WORD),
("Data3", wintypes.WORD),
("Data4", wintypes.BYTE * 8)
]
def __init__(self, uuidstr):
uuid = UUID(uuidstr)
ctypes.Structure.__init__(self)
self.Data1, self.Data2, self.Data3, \
self.Data4[0], self.Data4[1], rest = uuid.fields
for i in range(2, 8):
self.Data4[i] = rest>>(8-i-1)*8 & 0xff
SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath
SHGetKnownFolderPath.argtypes = [
ctypes.POINTER(GUID), wintypes.DWORD,
wintypes.HANDLE, ctypes.POINTER(ctypes.c_wchar_p)
]
def _get_known_folder_path(uuidstr):
pathptr = ctypes.c_wchar_p()
guid = GUID(uuidstr)
if SHGetKnownFolderPath(ctypes.byref(guid), 0, 0, ctypes.byref(pathptr)):
raise ctypes.WinError()
return pathptr.value
FOLDERID_Download = '{374DE290-123F-4565-9164-39C4925E467B}'
def get_download_folder():
return _get_known_folder_path(FOLDERID_Download)
else:
def get_download_folder():
home = os.path.expanduser("~")
return os.path.join(home, "Downloads")
LOGGING_LEVEL = logging.INFO
PORT = 12000 # UDP port used to send frames from producer to consumer
IMSIZE = 224 # input image size, must match model
UDP_BUFFER_SIZE = int(math.pow(2, math.ceil(math.log(IMSIZE * IMSIZE + 1000) / math.log(2))))
EVENT_COUNT_PER_FRAME = 2300 # events per frame
EVENT_COUNT_CLIP_VALUE = 3 # full count value for colleting histograms of DVS events
SHOW_DVS_OUTPUT = True # producer shows the accumulated DVS frames as aid for focus and alignment
MIN_PRODUCER_FRAME_INTERVAL_MS=7.0 # inference takes about 3ms and normalization takes 1ms, hence at least 2ms
# limit rate that we send frames to about what the GPU can manage for inference time
# after we collect sufficient events, we don't bother to normalize and send them unless this time has
# passed since last frame was sent. That way, we make sure not to flood the consumer
MAX_SHOWN_DVS_FRAME_RATE_HZ=15 # limits cv2 rendering of DVS frames to reduce loop latency for the producer
FINGER_OUT_TIME_S = 2 # time to hold out finger when joker is detected
ROOT_DATA_FOLDER= os.path.join(get_download_folder(),'trixsyDataset') # does not properly find the Downloads folder under Windows if not on same disk as Windows
DATA_FOLDER = os.path.join(ROOT_DATA_FOLDER,'data') #/home/tobi/Downloads/trixsyDataset/data' #'data' # new samples stored here
NUM_NON_JOKER_IMAGES_TO_SAVE_PER_JOKER = 3 # when joker detected by consumer, this many random previous nonjoker frames are also saved
JOKERS_FOLDER = DATA_FOLDER + '/jokers' # where samples are saved during runtime of consumer
NONJOKERS_FOLDER = DATA_FOLDER + '/nonjokers'
SERIAL_PORT = "/dev/ttyUSB0" # port to talk to arduino finger controller
LOG_DIR='logs'
SRC_DATA_FOLDER = os.path.join(ROOT_DATA_FOLDER,'source_data') #'/home/tobi/Downloads/trixsyDataset/source_data'
TRAIN_DATA_FOLDER=os.path.join(ROOT_DATA_FOLDER,'training_dataset') #'/home/tobi/Downloads/trixsyDataset/training_dataset' # the actual training data that is produced by split from dataset_utils/make_train_valid_test()
MODEL_DIR='models' # where models stored
JOKER_NET_BASE_NAME='joker_net' # base name
USE_TFLITE = True # set true to use TFLITE model, false to use full TF model for inference
TFLITE_FILE_NAME=JOKER_NET_BASE_NAME+'.tflite' # tflite model is stored in same folder as full-blown TF2 model
CLASS_DICT={'nonjoker':1, 'joker':2} # class1 and class2 for classifier
JOKER_DETECT_THRESHOLD_SCORE=.95 # minimum 'probability' threshold on joker output of CNN to trigger detection
import signal
def alarm_handler(signum, frame):
raise TimeoutError
def input_with_timeout(prompt, timeout=30):
""" get input with timeout
:param prompt: the prompt to print
:param timeout: timeout in seconds, or None to disable
:returns: the input
:raises: TimeoutError if times out
"""
# set signal handler
if timeout is not None:
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(timeout) # produce SIGALRM in `timeout` seconds
try:
time.sleep(.5) # get input to be printed after logging
return input(prompt)
except TimeoutError as to:
raise to
finally:
if timeout is not None:
signal.alarm(0) # cancel alarm
def yes_or_no(question, default='y', timeout=None):
""" Get y/n answer with default choice and optional timeout
:param question: prompt
:param default: the default choice, i.e. 'y' or 'n'
:param timeout: the timeout in seconds, default is None
:returns: True or False
"""
if default is not None and (default!='y' and default!='n'):
log.error(f'bad option for default: {default}')
quit(1)
y='Y' if default=='y' else 'y'
n='N' if default=='n' else 'n'
while "the answer is invalid":
try:
to_str='' if timeout is None or os.name=='nt' else f'(Timeout {default} in {timeout}s)'
if os.name=='nt':
log.warning('cannot use timeout signal on windows')
time.sleep(.1) # make the warning come out first
reply=str(input(f'{question} {to_str} ({y}/{n}): ')).lower().strip()
else:
reply = str(input_with_timeout(f'{question} {to_str} ({y}/{n}): ',timeout=timeout)).lower().strip()
except TimeoutError:
log.warning(f'timeout expired, returning default={default} answer')
reply=''
if len(reply)==0 or reply=='':
return True if default=='y' else False
elif reply[0].lower() == 'y':
return True
if reply[0].lower() == 'n':
return False
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def my_logger(name):
# logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(name)
logger.setLevel(LOGGING_LEVEL)
# create console handler
ch = logging.StreamHandler()
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
return logger
log=my_logger(__name__)
timers = {}
times = {}
class Timer:
def __init__(self, timer_name='', delay=None, show_hist=False, numpy_file=None):
""" Make a Timer() in a _with_ statement for a block of code.
The timer is started when the block is entered and stopped when exited.
The Timer _must_ be used in a with statement.
:param timer_name: the str by which this timer is repeatedly called and which it is named when summary is printed on exit
:param delay: set this to a value to simply accumulate this externally determined interval
:param show_hist: whether to plot a histogram with pyplot
:param numpy_file: optional numpy file path
"""
self.timer_name = timer_name
self.show_hist = show_hist
self.numpy_file = numpy_file
self.delay=delay
if self.timer_name not in timers.keys():
timers[self.timer_name] = self
if self.timer_name not in times.keys():
times[self.timer_name]=[]
def __enter__(self):
if self.delay is None:
self.start = time.time()
return self
def __exit__(self, *args):
if self.delay is None:
self.end = time.time()
self.interval = self.end - self.start # measured in seconds
else:
self.interval=self.delay
times[self.timer_name].append(self.interval)
def print_timing_info(self, logger=None):
""" Prints the timing information accumulated for this Timer
:param logger: write to the supplied logger, otherwise use the built-in logger
"""
if len(times)==0:
log.error(f'Timer {self.timer_name} has no statistics; was it used without a "with" statement?')
return
a = np.array(times[self.timer_name])
timing_mean = np.mean(a) # todo use built in print method for timer
timing_std = np.std(a)
timing_median = np.median(a)
timing_min = np.min(a)
timing_max = np.max(a)
s='{} n={}: {}s +/- {}s (median {}s, min {}s max {}s)'.format(self.timer_name, len(a),
eng(timing_mean), eng(timing_std),
eng(timing_median), eng(timing_min),
eng(timing_max))
if logger is not None:
logger.info(s)
else:
log.info(s)
def print_timing_info():
for k,v in times.items(): # k is the name, v is the list of times
a = np.array(v)
timing_mean = np.mean(a)
timing_std = np.std(a)
timing_median = np.median(a)
timing_min = np.min(a)
timing_max = np.max(a)
log.info('== Timing statistics from all Timer ==\n{} n={}: {}s +/- {}s (median {}s, min {}s max {}s)'.format(k, len(a),
eng(timing_mean), eng(timing_std),
eng(timing_median), eng(timing_min),
eng(timing_max)))
if timers[k].numpy_file is not None:
try:
log.info(f'saving timing data for {k} in numpy file {timers[k].numpy_file}')
log.info('there are {} times'.format(len(a)))
np.save(timers[k].numpy_file, a)
except Exception as e:
log.error(f'could not save numpy file {timers[k].numpy_file}; caught {e}')
if timers[k].show_hist:
def plot_loghist(x, bins):
hist, bins = np.histogram(x, bins=bins) # histogram x linearly
if len(bins)<2 or bins[0]<=0:
log.error(f'cannot plot histogram since bins={bins}')
return
logbins = np.logspace(np.log10(bins[0]), np.log10(bins[-1]), len(bins)) # use resulting bin ends to get log bins
plt.hist(x, bins=logbins) # now again histogram x, but with the log-spaced bins, and plot this histogram
plt.xscale('log')
dt = np.clip(a,1e-6, None)
# logbins = np.logspace(np.log10(bins[0]), np.log10(bins[-1]), len(bins))
try:
plot_loghist(dt,bins=100)
plt.xlabel('interval[ms]')
plt.ylabel('frequency')
plt.title(k)
plt.show()
except Exception as e:
log.error(f'could not plot histogram: got {e}')
# this will print all the timer values upon termination of any program that imported this file
atexit.register(print_timing_info)
```
#### File: others/Predictores-obsolete/predictor_tests_plotting_helpers.py
```python
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
def pd_plotter_simple(df, x_name=None, y_name=None, idx_range=None,
color='blue', dt=None, marker=None, vline=None, title=''):
if idx_range is None:
idx_range = [0, -1]
if y_name is None:
raise ValueError ('You must provide y_name')
y = df[y_name].iloc[idx_range[0]:idx_range[1]]
if x_name is None:
x = np.arange(len(y))
if dt is not None:
x = x*dt
else:
x = df[x_name].iloc[idx_range[0]:idx_range[1]]
fig = plt.figure(figsize=(18, 10))
ax = fig.add_subplot(111)
ax.plot(x, y, color=color, linewidth=3, marker=marker)
ax.set_ylabel(y_name, fontsize=18)
ax.set_xlabel(x_name, fontsize=18)
if vline is not None:
plt.axvline(x=vline, color='orange')
plt.title(label=title, fontdict={'fontsize': 20})
plt.xticks(fontsize=16)
plt.show()
def pd_plotter_compare_1(d_ground, df, x_name=None, y_name=None, idx_range=None,
color='blue', dt=None, marker=None, vline=None, title=''):
if idx_range is None:
idx_range = [0, -1]
if y_name is None:
raise ValueError ('You must provide y_name')
y_ground = d_ground[y_name].to_numpy(copy=True)[idx_range[0]:idx_range[1]]
y_f = df[y_name].to_numpy(copy=True)[idx_range[0]:idx_range[1]]
if x_name is None:
x = np.arange(len(y_ground))
if dt is not None:
x = x*dt
x_name = 'time (s)'
else:
x = d_ground[x_name].iloc[idx_range[0]:idx_range[1]]
fig = plt.figure(figsize=(18, 10))
ax = fig.add_subplot(111)
ax.plot(x, y_ground, color='k', linewidth=2, linestyle='dotted')
ax.plot(x, y_f, color=color, linewidth=3, marker=marker)
ax.set_ylabel(y_name, fontsize=18)
ax.set_xlabel(x_name, fontsize=18)
if vline is not None:
plt.axvline(x=vline, color='orange')
plt.title(label=title, fontdict={'fontsize': 20})
plt.xticks(fontsize=16)
plt.show()
def pd_plotter_compare_2(d_ground, dfs, names, x_name=None, y_name=None, idx_range=None,
colors=None, dt=None, marker=None, vline=None, title=''):
if idx_range is None:
idx_range = [0, -1]
if colors is None:
colors=['blue', 'green']
if y_name is None:
raise ValueError ('You must provide y_name')
y_ground = d_ground[y_name].to_numpy(copy=True)[idx_range[0]:idx_range[1]]
y_f = dfs[0][y_name].to_numpy(copy=True)[idx_range[0]:idx_range[1]]
y_h = dfs[1][y_name].to_numpy(copy=True)[idx_range[0]:idx_range[1]]
if x_name is None:
x = np.arange(len(y_ground))
if dt is not None:
x = x*dt
x_name = 'time (s)'
else:
x = d_ground[x_name].iloc[idx_range[0]:idx_range[1]]
fig = plt.figure(figsize=(18, 10))
ax = fig.add_subplot(111)
ax.plot(x, y_ground, color='k', linewidth=2, linestyle='dotted', label='ground truth')
ax.plot(x, y_f, color=colors[0], linewidth=3, marker=marker, label=names[0])
ax.plot(x, y_h, color=colors[1], linewidth=3, marker=marker, label=names[1])
ax.set_ylabel(y_name, fontsize=18)
ax.set_xlabel(x_name, fontsize=18)
if vline is not None:
plt.axvline(x=vline, color='orange')
plt.title(label=title, fontdict={'fontsize': 20})
plt.legend(prop={'size': 20})
plt.xticks(fontsize=16)
plt.show()
def get_predictions(predictor, df, autoregres_at_after_start, N_predictions, horizon, prediction_denorm=True):
for row_number in range(autoregres_at_after_start):
initial_state = df.iloc[[row_number], :]
Q = np.atleast_1d(df.loc[df.index[row_number], 'Q'])
predictor.setup(initial_state)
predictor.update_internal_state(Q)
predictions = []
for i in tqdm(range(N_predictions)):
# Prepare initial state for predictions
initial_state = df.iloc[[autoregres_at_after_start+i], :]
predictor.setup(initial_state, prediction_denorm=prediction_denorm)
Q = np.atleast_1d(df.loc[df.index[autoregres_at_after_start+i: autoregres_at_after_start+i + horizon], 'Q'] \
.to_numpy(copy=True, dtype=np.float32).squeeze())
prediction = predictor.predict(Q)
predictions.append(prediction)
predictor.update_internal_state(Q[0])
return predictions
```
#### File: Testing/Testing_Functions/Brunton_GUI.py
```python
from PyQt5.QtWidgets import QMainWindow, QApplication, QVBoxLayout, \
QHBoxLayout, QLabel, QPushButton, QWidget, QCheckBox, \
QComboBox, QSlider, QFrame, QButtonGroup, QRadioButton
from PyQt5.QtCore import Qt
# Import matplotlib
# This import mus go before pyplot so also before our scripts
from matplotlib import use, get_backend
# Use Agg if not in scientific mode of Pycharm
if get_backend() != 'module://backend_interagg':
use('Agg')
# Some more functions needed for interaction of matplotlib with PyQt5
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
from matplotlib import colors
# Other imports for GUI
import sys
# endregion
# region Set color map for the plots
cdict = {'red': ((0.0, 0.22, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.89, 1.0)),
'green': ((0.0, 0.49, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.12, 1.0)),
'blue': ((0.0, 0.72, 0.0),
(0.5, 0.0, 0.0),
(1.0, 0.11, 1.0))}
cmap = colors.LinearSegmentedColormap('custom', cdict)
# endregion
def run_test_gui(features, titles, ground_truth, predictions_list, time_axis):
# Creat an instance of PyQt5 application
# Every PyQt5 application has to contain this line
app = QApplication(sys.argv)
# Create an instance of the GUI window.
window = MainWindow(features, titles, ground_truth, predictions_list, time_axis)
window.show()
# Next line hands the control over to Python GUI
app.exec_()
# Class implementing the main window of CartPole GUI
class MainWindow(QMainWindow):
def __init__(self,
features,
titles,
ground_truth,
predictions_list,
time_axis,
*args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.features = features
self.titles = titles
self.ground_truth = ground_truth
self.predictions_list = predictions_list
self.time_axis = time_axis
self.dataset = predictions_list[0]
self.max_horizon = self.predictions_list[0].shape[-2]-1
self.horizon = self.max_horizon//2
self.show_all = False
self.downsample = False
self.current_point_at_timeaxis = (self.time_axis.shape[0]-self.max_horizon)//2
self.feature_to_display = self.features[0]
# region - Create container for top level layout
layout = QVBoxLayout()
# endregion
# region - Change geometry of the main window
self.setGeometry(300, 300, 2500, 1000)
# endregion
# region - Matplotlib figures (CartPole drawing and Slider)
# Draw Figure
self.fig = Figure(figsize=(25, 10)) # Regulates the size of Figure in inches, before scaling to window size.
self.canvas = FigureCanvas(self.fig)
self.fig.Ax = self.canvas.figure.add_subplot(111)
self.redraw_canvas()
self.toolbar = NavigationToolbar(self.canvas, self)
# Attach figure to the layout
lf = QVBoxLayout()
lf.addWidget(self.toolbar)
lf.addWidget(self.canvas)
layout.addLayout(lf)
# endregion
l_sl = QHBoxLayout()
# region - Slider position
l_sl_p = QVBoxLayout()
l_sl_p.addWidget(QLabel('"Current" point in time:'))
self.sl_p = QSlider(Qt.Horizontal)
self.sl_p.setMinimum(0)
self.sl_p.setMaximum(self.time_axis.shape[0]-self.max_horizon-1)
self.sl_p.setValue((self.time_axis.shape[0]-self.max_horizon)//2)
self.sl_p.setTickPosition(QSlider.TicksBelow)
# self.sl_p.setTickInterval(5)
l_sl_p.addWidget(self.sl_p)
self.sl_p.valueChanged.connect(self.slider_position_f)
# endregion
# region - Slider horizon
l_sl_h = QVBoxLayout()
l_sl_h.addWidget(QLabel('Prediction horizon:'))
self.sl_h = QSlider(Qt.Horizontal)
self.sl_h.setMinimum(0)
self.sl_h.setMaximum(self.max_horizon)
self.sl_h.setValue(self.max_horizon//2)
self.sl_h.setTickPosition(QSlider.TicksBelow)
# self.sl_h.setTickInterval(5)
# endregion
l_sl_h.addWidget(self.sl_h)
self.sl_h.valueChanged.connect(self.slider_horizon_f)
separatorLine = QFrame()
separatorLine.setFrameShape( QFrame.VLine )
separatorLine.setFrameShadow( QFrame.Raised )
l_sl.addLayout(l_sl_p)
l_sl.addWidget(separatorLine)
l_sl.addLayout(l_sl_h)
layout.addLayout(l_sl)
# region - Make strip of layout for checkboxes and compobox
l_cb = QHBoxLayout()
# region -- Checkbox: Show all
self.cb_show_all = QCheckBox('Show all', self)
if self.show_all:
self.cb_show_all.toggle()
self.cb_show_all.toggled.connect(self.cb_show_all_f)
l_cb.addWidget(self.cb_show_all)
# endregion
# region -- Checkbox: Save/don't save experiment recording
self.cb_downsample = QCheckBox('Downsample predictions (X2)', self)
if self.downsample:
self.cb_downsample.toggle()
self.cb_downsample.toggled.connect(self.cb_downsample_f)
l_cb.addWidget(self.cb_downsample)
# endregion
# region Radio buttons to chose the dataset
self.rbs_datasets = []
for title in self.titles:
self.rbs_datasets.append(QRadioButton(title))
# Ensures that radio buttons are exclusive
self.datasets_buttons_group = QButtonGroup()
for button in self.rbs_datasets:
self.datasets_buttons_group.addButton(button)
lr_d = QHBoxLayout()
lr_d.addStretch(1)
lr_d.addWidget(QLabel('Dataset:'))
for rb in self.rbs_datasets:
rb.clicked.connect(self.RadioButtons_detaset_selection)
lr_d.addWidget(rb)
lr_d.addStretch(1)
self.rbs_datasets[0].setChecked(True)
# if len(self.predictions_list) < 2:
# self.rbs_datasets[1].setEnabled(False)
# # self.rbs_datasets[2].setEnabled(False)
l_cb.addLayout(lr_d)
# endregion
# region -- Combobox: Select feature to plot
l_cb.addWidget(QLabel('Feature to plot:'))
self.cb_select_feature = QComboBox()
self.cb_select_feature.addItems(self.features)
self.cb_select_feature.currentIndexChanged.connect(self.cb_select_feature_f)
self.cb_select_feature.setCurrentText(self.features[0])
l_cb.addWidget(self.cb_select_feature)
# region - Add checkboxes to layout
layout.addLayout(l_cb)
# endregion
# endregion
# region - QUIT button
bq = QPushButton("QUIT")
bq.pressed.connect(self.quit_application)
lb = QVBoxLayout() # Layout for buttons
lb.addWidget(bq)
layout.addLayout(lb)
# endregion
# region - Create an instance of a GUI window
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
self.show()
self.setWindowTitle('Testing System Model')
# endregion
def slider_position_f(self, value):
self.current_point_at_timeaxis = int(value)
self.redraw_canvas()
def slider_horizon_f(self, value):
self.horizon = int(value)
self.redraw_canvas()
def cb_show_all_f(self, state):
if state:
self.show_all = True
else:
self.show_all = False
self.redraw_canvas()
def cb_downsample_f(self, state):
if state:
self.downsample = True
else:
self.downsample = False
self.redraw_canvas()
def RadioButtons_detaset_selection(self):
for i in range(len(self.rbs_datasets)):
if self.rbs_datasets[i].isChecked():
self.dataset = self.predictions_list[i]
self.redraw_canvas()
def cb_select_feature_f(self):
self.feature_to_display = self.cb_select_feature.currentText()
self.redraw_canvas()
# The actions which has to be taken to properly terminate the application
# The method is evoked after QUIT button is pressed
# TODO: Can we connect it somehow also the the default cross closing the application?
# If you find out, please correct for the same in CartPole simulator
def quit_application(self):
# Closes the GUI window
self.close()
# The standard command
# It seems however not to be working by its own
# I don't know how it works
QApplication.quit()
def redraw_canvas(self):
self.fig.Ax.clear()
brunton_widget(self.features, self.ground_truth, self.dataset, self.time_axis,
axs=self.fig.Ax,
current_point_at_timeaxis=self.current_point_at_timeaxis,
feature_to_display=self.feature_to_display,
max_horizon=self.max_horizon,
horizon=self.horizon,
show_all=self.show_all,
downsample=self.downsample)
self.fig.Ax.grid(color="k", linestyle="--", linewidth=0.5)
self.canvas.draw()
def brunton_widget(features, ground_truth, predictions_array, time_axis, axs=None,
current_point_at_timeaxis=None,
feature_to_display=None,
max_horizon=10, horizon=None,
show_all=True,
downsample=False):
# Start at should be done bu widget (slider)
if current_point_at_timeaxis is None:
current_point_at_timeaxis = ground_truth.shape[0]//2
if feature_to_display is None:
feature_to_display = features[0]
if horizon is None:
horizon = max_horizon
feature_idx = features.index(feature_to_display)
# Brunton Plot
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(18, 10), sharex=True)
axs.plot(time_axis, ground_truth[:, feature_idx], 'k:', markersize=12, label='Ground Truth')
y_lim = axs.get_ylim()
prediction_distance = []
axs.set_ylabel(feature_to_display, fontsize=18)
axs.set_xlabel('Time [s]', fontsize=18)
for i in range(horizon):
if not show_all:
axs.plot(time_axis[current_point_at_timeaxis], ground_truth[current_point_at_timeaxis, feature_idx],
'g.', markersize=16, label='Start')
prediction_distance.append(predictions_array[current_point_at_timeaxis, i+1, feature_idx])
if downsample:
if (i % 2) == 0:
continue
axs.plot(time_axis[current_point_at_timeaxis+i+1], prediction_distance[i],
c=cmap(float(i)/max_horizon),
marker='.')
else:
prediction_distance.append(predictions_array[:-(i+1), i+1, feature_idx])
if downsample:
if (i % 2) == 0:
continue
axs.plot(time_axis[i+1:], prediction_distance[i],
c=cmap(float(i)/max_horizon),
marker='.', linestyle = '')
# axs.set_ylim(y_lim)
plt.show()
```
#### File: Testing/Testing_Functions/preprocess_for_brunton.py
```python
from SI_Toolkit.load_and_normalize import \
load_data, get_sampling_interval_from_datafile
def preprocess_for_brunton(a):
# Get dataset:
test_dfs = load_data(a.test_file)
if a.test_len == 'max':
a.test_len = len(test_dfs[
0]) - a.test_max_horizon - a.test_start_idx # You could have +1; then, for last prediction you don not have ground truth to compare with, but you can still calculate it.
dataset = test_dfs[0].iloc[a.test_start_idx:a.test_start_idx + a.test_len + a.test_max_horizon, :]
dataset.reset_index(drop=True, inplace=True)
# Get sampling interval
dataset_sampling_dt = get_sampling_interval_from_datafile(a.test_file[0])
if dataset_sampling_dt is None:
raise ValueError ('No information about sampling interval found')
time_axis = dataset['time'].to_numpy()[:a.test_len]
ground_truth_features = a.features+a.control_inputs
ground_truth = dataset[ground_truth_features].to_numpy()[:a.test_len, :]
return dataset, time_axis, dataset_sampling_dt, ground_truth
```
#### File: SI_Toolkit/TF/Train.py
```python
try:
import nni
except ModuleNotFoundError:
print('Module nni not found - only needed to run training with NNI Framework')
import matplotlib.pyplot as plt
from tensorflow import keras
import os
import timeit
from warnings import warn as warning
# "Command line" parameters
from SI_Toolkit.TF.Parameters import args
# Custom functions
from SI_Toolkit.TF.TF_Functions.Initialization import set_seed, create_full_name, create_log_file, \
get_net, get_norm_info_for_net
from SI_Toolkit.TF.TF_Functions.Loss import loss_msr_sequence_customizable, loss_msr_sequence_customizable_relative
from SI_Toolkit.TF.TF_Functions.Dataset import Dataset
# from SI_Toolkit.TF.TF_Functions.Dataset import DatasetRandom
from SI_Toolkit.load_and_normalize import load_data, normalize_df, \
get_sampling_interval_from_datafile, get_paths_to_datafiles
# region Import and print "command line" arguments
print('')
a = args() # 'a' like arguments
print(a.__dict__)
print('')
# endregion
# Uncomment the @profile(precision=4) to get the report on memory usage after the training
# Warning! It may affect performance. I would discourage you to use it for long training tasks
# @profile(precision=4)
def train_network(nni_parameters=None):
# region Start measuring time - to evaluate performance of the training function
start = timeit.default_timer()
# endregion
# region If NNI enabled load new parameters
if nni_parameters is not None:
a.net_name = 'GRU-' + str(nni_parameters['h1']) + 'H1-' + str(nni_parameters['h2']) + 'H2'
a.wash_out_len = int(nni_parameters['wash_out_len'])
# endregion
# region Set seeds to make experiment reproducible
set_seed(a)
# endregion
# region Make folder to keep trained models and their logs if not yet exist
try:
os.makedirs(a.path_to_models[:-1])
except FileExistsError:
pass
# endregion
net, net_info = get_net(a)
# Create a copy of the network suitable for inference (stateful and with sequence length one)
net_for_inference, net_for_inference_info = \
get_net(a, time_series_length=a.test_len,
batch_size=1, stateful=True)
# Create new full name for the pretrained net
create_full_name(net_info, a.path_to_models)
normalization_info = get_norm_info_for_net(net_info, files_for_normalization=a.training_files)
# а is an "argument"
# It must contain:
# path to models
# information about paths for:
# - training
# - validation
# - testing
create_log_file(net_info, a)
# region Load data and prepare datasets
if a.on_fly_data_generation:
...
# TODO DatasetRandom should have normalization info too...
# TODO It should be possible only to provide here the dt as in norm info
# train_set = DatasetRandom(a, inputs_list=net_info.inputs, outputs_list=net_info.outputs, number_of_batches=1000)
# validation_set = DatasetRandom(a, inputs_list=net_info.inputs, outputs_list=net_info.outputs,
# number_of_batches=10)
else:
paths_to_datafiles_training = get_paths_to_datafiles(a.training_files)
paths_to_datafiles_validation = get_paths_to_datafiles(a.validation_files)
for path in paths_to_datafiles_training + paths_to_datafiles_validation:
try:
dt_sampling = get_sampling_interval_from_datafile(path)
if abs(net_info.sampling_interval - dt_sampling) > 1.0e-5:
warning('A difference between network sampling interval and save interval of data file {} detected'
.format(path))
except TypeError:
print('Sampling interval unknown.')
training_dfs = load_data(paths_to_datafiles_training)
validation_dfs = load_data(paths_to_datafiles_validation)
training_dfs_norm = normalize_df(training_dfs, normalization_info)
training_dataset = Dataset(training_dfs_norm, a, shuffle=True, inputs=net_info.inputs, outputs=net_info.outputs)
validation_dfs_norm = normalize_df(validation_dfs, normalization_info)
validation_dataset = Dataset(validation_dfs_norm, a, shuffle=True, inputs=net_info.inputs,
outputs=net_info.outputs)
# test_dfs is not deleted as we need it further for plotting
del training_dfs, validation_dfs, paths_to_datafiles_validation, paths_to_datafiles_training
# region In either case testing is done on a data collected offline
paths_to_datafiles_test = get_paths_to_datafiles(a.test_files)
test_dfs = load_data(paths_to_datafiles_test)
test_dfs_norm = normalize_df(test_dfs, normalization_info)
test_set = Dataset(test_dfs_norm, a, shuffle=False, inputs=net_info.inputs, outputs=net_info.outputs)
# Check the sampling interval for test file
for path in paths_to_datafiles_test:
try:
dt_sampling = get_sampling_interval_from_datafile(path)
if abs(net_info.sampling_interval - dt_sampling) > 1.0e-5:
warning('A difference between network sampling interval and save interval of data file {} detected'
.format(path))
except TypeError:
print('Sampling interval unknown')
# endregion
# endregion
net.compile(
loss=loss_msr_sequence_customizable(wash_out_len=a.wash_out_len,
post_wash_out_len=a.post_wash_out_len,
discount_factor=1.0),
optimizer=keras.optimizers.Adam(0.001)
)
net.summary()
# endregion
# region Define callbacks to be used in training
callbacks_for_training = []
class PlotPredictionsCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
...
# net_for_inference.set_weights(net.get_weights())
# plot_string = 'This is the network after {} training epoch(s), warm_up={}'.format(epoch + 1, a.wash_out_len)
# ground_truth, net_outputs, time_axis = \
# get_predictions_TF(net_for_inference, net_for_inference_info,
# test_set, normalization_info,
# experiment_length=a.test_len)
# brunton_widget(net_for_inference_info.inputs, net_for_inference_info.outputs,
# ground_truth, net_outputs, time_axis,
# )
plot_predictions_callback = PlotPredictionsCallback()
callbacks_for_training.append(plot_predictions_callback)
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=net_info.path_to_net + 'ckpt' + '.ckpt',
save_weights_only=True,
monitor='val_loss',
mode='auto',
save_best_only=False)
callbacks_for_training.append(model_checkpoint_callback)
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.2,
patience=1,
min_lr=0.0001,
verbose=2
)
callbacks_for_training.append(reduce_lr)
csv_logger = keras.callbacks.CSVLogger(net_info.path_to_net + 'log_training.csv', append=False, separator=';')
callbacks_for_training.append(csv_logger)
# endregion
history = net.fit(
training_dataset,
epochs=a.num_epochs,
verbose=True,
shuffle=False,
validation_data=validation_dataset,
callbacks=callbacks_for_training,
)
# region Save final weights as checkpoint
net.save_weights(net_info.path_to_net + 'ckpt' + '.ckpt')
# endregion
# region Plot loss change during training
plt.figure()
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.xlabel("Training Epoch")
plt.ylabel("Loss")
plt.yscale('log')
plt.legend()
plt.savefig(net_info.path_to_net + 'training_curve' + '.png')
plt.show()
# endregion
# region If NNI enabled send final report
if nni_parameters is not None:
nni.report_final_result(history.history['val_loss'][-1])
# endregion
# region Calculate and print the total time it took to train the network
stop = timeit.default_timer()
total_time = stop - start
# Print the total time it took to run the function
print('Total time of training the network: ' + str(total_time))
# endregion
if __name__ == '__main__':
import os.path
import time
# The following lines help to determine if the file on Google Colab was updated
file = os.path.realpath(__file__)
print("Training script last modified: %s" % time.ctime(os.path.getmtime(file)))
# Run the training function and measure time of execution
train_network()
# Use the call below instead of train_network() if you want to use NNI
# nni_parameters = nni.get_next_parameter()
# train_network(nni_parameters)
``` |
{
"source": "jhuebotter/SpikingVAE",
"score": 3
} |
#### File: jhuebotter/SpikingVAE/setup.py
```python
from pathlib import Path
import argparse
from torchvision import datasets, transforms
def download_MNIST(root):
"""Download MNIST dataset."""
datasets.MNIST(
root=root, train=True, download=True, transform=transforms.ToTensor()
)
def download_FASHION(root):
"""Download Fashion-MNIST dataset."""
datasets.FashionMNIST(
root=root, train=True, download=True, transform=transforms.ToTensor()
)
def download_CIFAR10(root):
"""Download CIFAR-10 dataset."""
datasets.CIFAR10(
root=root, train=True, download=True, transform=transforms.ToTensor()
)
def download_CELEBA(root):
"""Download CELEBA dataset."""
import gdown, zipfile
# Path to folder with the dataset
dataset_folder = f'{root}/img_align_celeba'
# URL for the CelebA dataset
url = 'https://drive.google.com/uc?id=1cNIac61PSA_LqDFYFUeyaQYekYPc75NH'
# Path to download the dataset to
download_path = f'{root}/img_align_celeba.zip'
# Download the dataset from google drive
gdown.download(url, download_path, quiet=False)
# Unzip the downloaded file
with zipfile.ZipFile(download_path, 'r') as ziphandler:
ziphandler.extractall(dataset_folder)
# this is still not fixed by pytorch dev team
#datasets.CelebA(
# root=root, download=True, transform=transforms.ToTensor()
#)
if __name__ == "__main__":
"""Setup result directories and download supported datasets."""
# Create project folder organization
#Path.mkdir(Path("results/logs"), parents=True, exist_ok=True)
#Path.mkdir(Path("results/images"), parents=True, exist_ok=True)
Path.mkdir(Path("results/checkpoints"), parents=True, exist_ok=True)
# Read datasets to download
parser = argparse.ArgumentParser(
description="Download datasets for VSC experiments"
)
parser.add_argument(
"--datasets",
type=str,
nargs="+",
choices=["mnist", "fashion", "cifar10", "celeba"],
help="name of dataset to download [mnist, fashion, cifar10, celeba]",
default="cifar10",
)
args = parser.parse_args()
# Download datasets for experiments
if "mnist" in args.datasets:
print("Downloading MNIST dataset...")
Path.mkdir(Path("data/mnist"), parents=True, exist_ok=True)
download_MNIST("data/mnist")
if "fashion" in args.datasets:
print("Downloading Fashion-MNIST dataset...")
Path.mkdir(Path("data/fashion-mnist"), parents=True, exist_ok=True)
download_FASHION("data/fashion-mnist")
if "cifar10" in args.datasets:
print("Downloading CIFAR-10 dataset...")
Path.mkdir(Path("data/cifar10"), parents=True, exist_ok=True)
download_CIFAR10("data/cifar10")
if "celeba" in args.datasets:
print("Downloading CELEBA dataset...")
Path.mkdir(Path("data/celeba"), parents=True, exist_ok=True)
download_CELEBA("data/celeba")
```
#### File: src/models/base_model.py
```python
import torch
import torch.nn as nn
import math
from pathlib import Path
from tqdm import tqdm
import utils as u
import metrics as met
import samplers as sam
import losses
import shutil
import time
class BaseModel:
"""Custom neural network base model."""
def __init__(
self,
input_width,
input_height,
input_channels,
#dataset,
learning_rate,
weight_decay,
device,
loss,
verbose=True,
log_func=print,
):
""" Initializes a custom neural network base model
Args:
input_width (int): pixel width of input data
input_height (int): pixel height of input data
input_channels (int): number of channels in input data, grayscale = 1, RGB = 3
learning_rate (float): step size for parameter updates during learning
weight_decay (float): weight of the L2 regularization applied during learning
device (str): device where model parameters are stored and trained on
loss (str): name of the loss function to use during training.
Choice must be a valid option defined in losses.py
verbose (bool, optional): flag to determine if process summary is given (default: True)
log_func (function, optional): function to use for summary output (default: print)
"""
#self.dataset = dataset
# set input dimensions
self.input_width = input_width
self.input_height = input_height
self.input_channels = input_channels
self.input_sz = (self.input_channels, self.input_width, self.input_height)
self.lr = learning_rate
self.wd = weight_decay
self.device = device
self.log_func = log_func
self.training_step = 0
# To be implemented by subclasses
self.model = None
self.optimizer = None
self.task = None
self.input_layer = None
self.output_layer = None
# initialize loss function
self.loss_function = loss
self.verbose = verbose
def count_parameters(self):
"""Helper function to count trainable parameters of the model
Returns:
int: number of trainable parameters of the model
"""
return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
def init_weights(self):
"""Initializes network weights"""
self.model.module_list = [
m for m in self.model.modules()
if not issubclass(type(m), nn.modules.container.ModuleDict)
]
for i, m in enumerate(self.model.modules()):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
variance1 = math.sqrt(2.0 / n)
m.weight.data.normal_(0, variance1)
elif isinstance(m, nn.Linear):
size = m.weight.size()
fan_in = size[1]
variance2 = math.sqrt(2.0 / fan_in)
m.weight.data.normal_(0.0, 2*variance2)
elif isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
variance3 = math.sqrt(2.0 / n)
m.weight.data.normal_(0, variance3)
def loss_function(self, **kwargs):
"""Loss function applied during learning is to be implemented by subclass"""
raise NotImplementedError
def update_(self, **kwargs):
"""Update some internal variables during training such as a scheduled or conditional learning rate"""
pass
def step(self, data_batch, target_batch, train=False):
"""Pass data through the model for training or evaluation
Args:
data_batch (torch.Tensor): batch of features from DataLoader object
target_batch (torch.Tensor): batch of targets from DataLoader object
train (bool, optional): flag if model weights should be updated based on loss (default: False)
Returns:
dict: contains performance metric information and possibly additional data from models forward function
"""
if train:
self.optimizer.zero_grad()
out_batch = self.model(data_batch)
out_batch["target"] = target_batch
out_batch["weights"] = [p for p in self.model.parameters()]
losses = self.loss_function(**out_batch)
if train:
losses["loss"].backward()
self.optimizer.step()
self.training_step += 1
self.update_(step=self.training_step)
result = dict(
input=data_batch,
target=target_batch,
)
result.update(out_batch)
for k, v in losses.items():
result.update({k: v.item()})
return result
def train(self, train_loader, epoch, metrics={}, max_batches=0):
"""Train the model for a single epoch
Args:
train_loader (DataLoader): object containing training data
epoch (int): current training epoch
metrics (dict, optional): contains names and functions of metrics
to be calculated during training (default: {})
max_batches (int, optional): limits number of batches per epoch, 0 = no limit (default: 0)
Returns:
dict: average loss and optional metrics on training dataset
"""
self.model.train()
summary = {"batch time": u.RunningAverage()}
summary.update({m: u.RunningAverage() for m in metrics.keys()})
summary.update({l: u.RunningAverage() for l in self.loss_function.loss_labels})
n = min(max_batches, len(train_loader)) if max_batches else len(train_loader)
with tqdm(total=n) as t:
for batch_idx, (data_batch, target_batch) in enumerate(train_loader):
t0 = time.time()
data_batch = data_batch.to(self.device)
target_batch = target_batch.to(self.device)
if self.task == "reconstruction":
target_batch = data_batch
result = self.step(data_batch, target_batch, train=True)
# calculate metrics
summary["batch time"].update(time.time() - t0)
for metric, metric_fn in metrics.items():
summary[metric].update(metric_fn(**result))
for l in self.loss_function.loss_labels:
summary[l].update(result[l])
del result
t.set_postfix(loss="{:05.4f}".format(summary["loss"]()))
t.update()
if batch_idx+1 == max_batches:
break
# create the results dict
train_results = {key: value() for (key, value) in summary.items()}
"""
train_results = {}
for metric in metrics.keys():
train_results[metric] = summary[metric]()
for l in self.loss_function.loss_labels:
train_results[l] = summary[l]()
"""
if self.verbose:
self.log_func(f"====> Epoch {epoch}: Average loss = {summary['loss']():.4f}")
return train_results
def evaluate(self, test_loader, metrics={}, samplers={}, sample_freq=0, max_batches=0):
"""Evaluate the model on a validation or test set
Args:
test_loader (DataLoader): object containing test or validation data
metrics (dict, optional): contains names and functions of metrics
to be calculated during testing (default: {})
samplers (dict, optional): contains names and functions of samplers
to be applied during evaluation (default: {})
sample_freq (int, optional): determines how often to sample the current data batch.
Has no effect if samplers is empty.
0 = no samples, 1 = sample every batch, 10 = sample every 10 batches (default: 0)
max_batches (int, optional): limits how many batches should be evaluated.
0 = no limit (default: 0)
Returns:
dict: average loss and optional metrics on test or validation data
dict: contains samples drawn from batches with sampler functions
"""
self.model.eval()
summary = {"batch time": u.RunningAverage()}
summary.update({m: u.RunningAverage() for m in metrics.keys()})
summary.update({l: u.RunningAverage() for l in self.loss_function.loss_labels})
samples = {}
n = min(max_batches, len(test_loader)) if max_batches else len(test_loader)
with torch.no_grad():
with tqdm(total=n) as t:
for batch_idx, (data_batch, target_batch) in enumerate(test_loader):
t0 = time.time()
data_batch = data_batch.to(self.device)
target_batch = target_batch.to(self.device)
label_batch = target_batch
if self.task == "reconstruction":
target_batch = data_batch
result = self.step(data_batch, target_batch, train=False)
summary["batch time"].update(time.time() - t0)
result["labels"] = label_batch
if self.input_layer is not None:
result["input weights"] = self.input_layer.weight.data.cpu()
if self.output_layer is not None:
result["output weights"] = self.output_layer.weight.data.cpu()
# calculate the metrics
for metric, metric_fn in metrics.items():
summary[metric].update(metric_fn(**result))
for l in self.loss_function.loss_labels:
summary[l].update(result[l])
if sample_freq and batch_idx % sample_freq == 0:
for sampler, sampler_fn in samplers.items():
samples.update({f"{sampler} batch {batch_idx}": sampler_fn(**result)})
del result
t.set_postfix(loss="{:05.4f}".format(summary["loss"]()))
t.update()
if max_batches and batch_idx == max_batches-1:
break
# create the results dict
test_results = {key: value() for (key, value) in summary.items()}
"""
for metric in metrics.keys():
test_results[metric] = summary[metric]()
for l in self.loss_function.loss_labels:
test_results[l] = summary[l]()
"""
if self.verbose:
name = self.model.__class__.__name__
self.log_func(f"====> Test: {name} Average loss = {summary['loss']():.4f}")
return test_results, samples
def save_checkpoint(self, checkpoint, dir, name, is_best=False, verbose=True):
"""Save a checkpoint with model parameters for later use
Args:
checkpoint (dict): contains important variables from the model such as weights and performance info
dir (str): directory to save the checkpoint in
name (str): name to save the checkpoint as
is_best (bool, optional): indicates if this currently the best available model of this run (default: False)
verbose (bool, optional): indicates if function should print success statements (default: True)
"""
Path.mkdir(Path(dir), exist_ok=True)
path = Path.joinpath(Path(dir), f"{name}_latest.pth")
torch.save(checkpoint, path)
if is_best:
best_path = Path.joinpath(Path(dir), f"{name}_best.pth")
shutil.copyfile(path, best_path)
if verbose:
self.log_func(f"Saved model at {path}\n")
if is_best:
self.log_func(f"Saved model at {best_path}\n")
def load_checkpoint(self, path, model, optimizer=None, verbose=True):
"""Load model parameters from a checkpoint
Args:
path (str): location of the checkpoint file to load
model (torch.nn.Module object): model to load the weights from checkpoint
optimizer (torch.optim object, optional): optimizer to load parameters from checkpoint (default: None)
verbose (bool, optional): indicates if function should print success statements (default: True)
Retruns:
dict: contains important variables from the model such as weights and performance info
"""
path = Path(path)
if self.verbose:
self.log_func(f"Loading model parameters from checkpoint {path}...")
if not Path.exists(path):
raise FileNotFoundError(f"File {path} doesn't exist")
checkpoint = torch.load(path.as_posix())
model.load_state_dict(checkpoint["state_dict"])
if optimizer:
optimizer.load_state_dict(checkpoint["optim_dict"])
if verbose:
self.log_func("Found checkpoint entries:")
for k, v in checkpoint.items():
self.log_func(f"{k:20} {type(v)}")
self.log_func(f"Loaded model state from {path}\n")
if optimizer:
self.log_func(f"Loaded optimizer state from {path}\n")
return checkpoint
def train_and_evaluate(
self,
train_loader,
val_loader,
epochs,
model_name=None,
metrics=[],
key_metric="validation loss",
goal="minimize",
load="",
logger=None,
checkpoints_dir="../results/checkpoints",
eval_first=True,
samplers=[],
sample_freq=0,
max_epoch_batches=0,
plot_weights=[],
):
"""
Runs training on the training set for the set number of epochs while continuously evaluating and logging.
Args:
train_loader (DataLoader): object containing the training data
val_loader (DataLoader): object containing the validation data
epochs (int): number of epochs to run training for
model_name (str, optional): name to save the model under.
If None model class name will be used (default: None)
metrics (list, optional): names of metrics to compute during training and evaluation.
All names given must correspond to a valid method defined in metrics.py (default: [])
key_metric (str, optional): key of the metric to monitor to determine model improvements
(default: validation loss)
goal (str, optional): weather to maximize or minimize the key metric (default: minimize)
load (str, optional): path to model checkpoint to load from disk, ignored if empty (default: "")
logger (object, optional): if not None, logger.log() will be called to log dictionaries
of metrics and samples at each epoch (default: None)
checkpoints_dir (str, optional): directory to save and load model checkpoints (default: ../results/checkpoints)
eval_first (bool, optional): if True, evaluation will occur before the first training episode (default: True)
samplers (list, optional): names of samplers to use during evaluation.
All names given must correspond to a valid method defined in samplers.py (default: [])
sample_freq (int, optional): determines how often to sample the current data batch.
Has no effect if samplers is empty.
0 = no samples, 1 = sample every batch, 10 = sample every 10 batches (default: 0)
max_epoch_batches (int, optional): limits number of batches per epoch, 0 = no limit (default: 0)
"""
# Check if model name was specified
if model_name is None:
model_name = self.model.__class__.__name__
# Start fresh or load previous model
if load:
checkpoint = self.load_checkpoint(
path=load,
model=self.model,
optimizer=self.optimizer,
verbose=self.verbose,
)
start_epoch = checkpoint["epoch"] + 1
training_summary = checkpoint["training_summary"]
del checkpoint
else:
start_epoch = 1
training_summary = {}
# initialize metrics
metrics = met.get_metrics(metrics)
# initialize samplers
samplers = sam.get_samplers(samplers)
if goal.lower() == "minimize":
lower_is_better = True
best_key_score = math.inf
elif goal.lower() == "maximize":
lower_is_better = False
best_key_score = -math.inf
if eval_first:
val_results, samples = self.evaluate(val_loader, metrics, samplers, sample_freq)
# Store results in log
summary = dict()
for key, value in val_results.items():
summary[f"validation {key}"] = value
if logger is not None:
logger.save_summary(summary, epoch=start_epoch - 1)
logger.log(summary, step=start_epoch - 1)
for batch, sample in samples.items():
for key, value in sample.items():
logger.log({f"{batch}_{key}": value}, step=start_epoch - 1)
del val_results, samples, summary
if self.verbose:
self.log_func(f"Training {model_name} model for {epochs} epochs...")
# Train for desired number of epochs
for epoch in range(start_epoch, start_epoch + epochs):
train_results = self.train(train_loader, epoch, metrics, max_epoch_batches)
val_results, samples = self.evaluate(val_loader, metrics, samplers, sample_freq, max_epoch_batches)
train_loss = train_results["loss"]
val_loss = val_results["loss"]
# Store results in log
summary = dict()
for key, value in train_results.items():
summary[f"training {key}"] = value
for key, value in val_results.items():
summary[f"validation {key}"] = value
training_summary[epoch] = summary
if logger is not None:
logger.save_summary(summary, epoch=epoch)
logger.log(summary, step=epoch)
for batch, sample in samples.items():
for key, value in sample.items():
logger.log({f"{batch}_{key}": value}, step=epoch)
# print epoch summary
if self.verbose:
self.log_func(f"Summary epoch {epoch}:")
for key, value in summary.items():
self.log_func(f"{key:50s} {value:10.4f}")
# Check if the model is the best model
key_score = summary[key_metric]
is_best = False
if (key_score <= best_key_score and lower_is_better) or (
key_score >= best_key_score and not lower_is_better
):
is_best = True
best_key_score = key_score
if self.verbose:
self.log_func(f"New best model: {key_metric} {best_key_score:.4f} \n")
# Save the latest model
checkpoint = dict(
epoch=epoch,
state_dict=self.model.state_dict(),
optim_dict=self.optimizer.state_dict(),
train_loss=train_loss,
val_loss=val_loss,
key_score=key_score,
training_summary=training_summary,
run_id=logger.run.id if logger is not None else None,
)
self.save_checkpoint(
checkpoint=checkpoint,
dir=checkpoints_dir,
name=model_name,
is_best=is_best,
verbose=self.verbose,
)
del checkpoint, train_results, val_results, samples, summary
class SpikingBaseModel(BaseModel):
"""Custom spiking neural network base model"""
def __init__(
self,
input_width,
input_height,
input_channels,
#dataset,
learning_rate,
weight_decay,
steps,
threshold,
decay,
device,
loss,
grad_clip=0.0,
use_extra_grad=True,
verbose=False,
log_func=print,
):
""" Initializes a custom neural network base model
Args:
input_width (int): pixel width of input data
input_height (int): pixel height of input data
input_channels (int): number of channels in input data, grayscale = 1, RGB = 3
learning_rate (float): step size for parameter updates during learning
weight_decay (float): weight of the L2 regularization applied during learning
steps (int): number of timesteps per example for spiking simulation
threshold (int): firing threshold for LIF neurons
decay (float): temporal variable controlling LIF membrane potential decay per step
device (str): device where model parameters are stored and trained on
loss (str): name of the loss function to use during training.
Choice must be a valid option defined in losses.py
verbose (bool, optional): flag to determine if process summary is given (default: True)
log_func (function, optional): function to use for summary output (default: print)
"""
super(SpikingBaseModel, self).__init__(
input_width,
input_height,
input_channels,
#dataset,
learning_rate,
weight_decay,
device,
loss,
verbose=verbose,
log_func=log_func,
)
self.steps = steps
self.decay = decay
self.threshold = threshold
self.grad_clip = grad_clip
self.use_extra_grad = use_extra_grad
def grad_cal(self, decay, LF_output, Total_output):
"""Calculates gradients for spiking neurons based on this paper:
https://www.frontiersin.org/articles/10.3389/fnins.2020.00119/full
Args:
decay (float): temporal variable describing the leaky property of used LIF neurons
LF_output (torch.Tensor):
Total_output (torch.Tensor):
Returns:
torch.Tensor: gradients
TODO: Check if this works on CPU or if torch.cuda.FloatTensor needs to be replaced
"""
Total_output = Total_output + (Total_output < 1e-3).type(torch.cuda.FloatTensor)
out = LF_output.gt(1e-3).type(torch.cuda.FloatTensor) + math.log(decay) * torch.div(LF_output, Total_output)
return out
def step(self, data_batch, target_batch, train=False):
"""Pass data through the model for training or evaluation
Args:
data_batch (torch.Tensor): batch of features from DataLoader object
target_batch (torch.Tensor): batch of targets from DataLoader object
train (bool, optional): flag if model weights should be updated based on loss (default: False)
Returns:
dict: contains performance metric information and possibly additional data from models forward function
"""
if train:
self.optimizer.zero_grad()
out_batch = self.model(data_batch, steps=self.steps)
if train:
if self.use_extra_grad:
# compute gradient
gradients = [self.grad_cal(self.decay, out_batch["lf_outs"][i], out_batch["total_outs"][i]) for i in range(len(out_batch["total_outs"])-1)]
# apply gradient
for t in range(self.steps):
for i in range(len(gradients)):
out_batch["out_temps"][i][t].register_hook(lambda grad, i=i: torch.mul(grad, gradients[i]))
if type(self.loss_function) == nn.MSELoss and self.task is not "reconstruction":
target = out_batch["output"].data.clone().zero_()
target.scatter_(1, target_batch.unsqueeze(1), 1)
target = target.type(torch.FloatTensor).to(self.device)
else:
target = target_batch
out_batch["target"] = target
out_batch["weights"] = [p for p in self.model.parameters()]
losses = self.loss_function(**out_batch)
result = dict(
input=data_batch,
target=target_batch,
)
result.update(out_batch)
for k, v in losses.items():
result.update({k: v.item()})
if train:
losses["loss"].backward()
if self.grad_clip:
torch.nn.utils.clip_grad_value_(self.model.parameters(), self.grad_clip)
self.optimizer.step()
if self.use_extra_grad:
for i in range(len(out_batch["out_temps"])-1):
out_batch["out_temps"][i] = None
gradients[i] = None
return result
```
#### File: src/models/cnn_vae.py
```python
import torch
import torch.nn as nn
from torch.distributions.logistic_normal import LogisticNormal
from models.base_model import BaseModel
from models.input_encoders import get_input_encoder
import utils as u
class ConvolutionalVAE(BaseModel):
def __init__(
self,
input_width,
input_height,
input_channels,
conv2d_channels,
hidden_sizes,
loss,
optimizer,
learning_rate,
weight_decay,
device,
kernel_size=3,
stride=1,
padding=0,
pooling_kernel=2,
pooling_stride=1,
activation="relu",
activation_out="logsoftmax",
pooling="avg",
encoder_params={"encoder": "noisy"},
verbose=True,
log_func=print,
):
super(ConvolutionalVAE, self).__init__(
input_width=input_width,
input_height=input_height,
input_channels=input_channels,
learning_rate=learning_rate,
weight_decay=weight_decay,
device=device,
loss=loss,
verbose=verbose,
log_func=log_func,
)
# initialize model
if verbose:
self.log_func("Initializing convolutional autoencoder...")
self.task = "reconstruction"
self.hidden_sizes = hidden_sizes
self.conv2d_channels = conv2d_channels
self.kernel_sizes = [kernel_size for i in range(len(conv2d_channels))]
self.strides = [stride for i in range(len(conv2d_channels))]
self.paddings = [padding for i in range(len(conv2d_channels))]
self.pooling_kernels = [pooling_kernel for i in range(len(conv2d_channels))]
self.pooling_strides = [pooling_stride for i in range(len(conv2d_channels))]
self.model = CNNVAEModel(
input_width=self.input_width,
input_height=self.input_height,
input_channels=self.input_channels,
conv2d_channels=self.conv2d_channels,
hidden_sizes=self.hidden_sizes,
kernel_sizes=self.kernel_sizes,
strides=self.strides,
paddings=self.paddings,
pooling_kernels=self.pooling_kernels,
pooling_strides=self.pooling_strides,
activation=activation,
activation_out=activation_out,
encoder_params=encoder_params,
pooling=pooling,
)
self.init_weights()
self.model.to(self.device)
self.input_layer = self.model.conv_encoder_layers["conv2d1"]
self.output_layer = self.model.conv_decoder_layers[f"convT2d{len(self.conv2d_channels)}"]
if verbose:
self.summary()
self.log_func(f"Model initialized successfully!\n")
# initialize optimizer
self.optimizer = u.get_optimizer(
optimizer, self.model, self.lr, self.wd, verbose
)
def summary(self):
"""Prints a model summary about itself."""
x = (
torch.randn(self.input_channels, self.input_width, self.input_height)
.view(-1, self.input_channels, self.input_width, self.input_height)
.to(self.device)
)
x_ = self.model.conv_encode(x)
self.log_func(f"Input shape: {x.shape}")
self.log_func(f"Shape after convolution: {x_[0].shape}")
self.log_func(f"Network architecture:")
self.log_func(self.model)
self.log_func(f"Number of trainable parameters: {self.count_parameters()}")
def update_(self, step=0, start=0, end=0.1, rate=0.9999, **kwargs):
try:
self.loss_function.beta = self.loss_function.end + (self.loss_function.start - self.loss_function.end) *\
self.loss_function.rate ** step
except:
print("could not change beta parameter for ELBO loss.")
class CNNVAEModel(nn.Module):
"""Creates a CNN model."""
def __init__(
self,
input_width,
input_height,
input_channels,
conv2d_channels,
hidden_sizes,
kernel_sizes,
strides,
paddings,
pooling_kernels,
pooling_strides,
encoder_params={"encoder": "noisy"},
activation="relu",
activation_out="logsoftmax",
pooling="avg",
):
super(CNNVAEModel, self).__init__()
self.input_width = input_width
self.input_height = input_height
self.input_channels = input_channels
self.conv2d_channels = conv2d_channels
self.kernel_sizes = kernel_sizes
self.strides = strides
self.paddings = paddings
self.pooling_kernels = pooling_kernels
self.pooling_strides = pooling_strides
self.hidden_sizes = hidden_sizes
# self.fc_activations = [activation for i in range(len(hidden_sizes))] + [activation_out]
# define convolutional encoder layers
conv_encoder_args = dict(
input_channels=self.input_channels,
channels=self.conv2d_channels,
kernel_sizes=self.kernel_sizes,
strides=self.strides,
paddings=self.paddings,
activations=[activation for i in range(len(self.conv2d_channels))],
pooling_funcs=[pooling for i in range(len(self.conv2d_channels))],
pooling_kernels=self.pooling_kernels,
pooling_strides=self.pooling_strides,
)
conv_encoder_parameters = u.get_conv2d_layers(**conv_encoder_args)
self.conv_encoder_layers = u.build_layers(conv_encoder_parameters)
# get flattend input size and reverse transformation
x = torch.randn(self.input_channels, self.input_width, self.input_height).view(
-1, self.input_channels, self.input_width, self.input_height
)
x_ = self.conv_encode(x)
self._to_linear = x_[0].shape[0] * x_[0].shape[1] * x_[0].shape[2]
self._from_linear = (x_[0].shape[0], x_[0].shape[1], x_[0].shape[2])
# define fully connected encoder layers
self.fc_encoder_activations = [activation for i in range(len(hidden_sizes)-1)] + [None]
fc_encoder_args = dict(
input_size=self._to_linear,
hidden_sizes=self.hidden_sizes[:-1],
output_size=self.hidden_sizes[-1]*2,
activations=self.fc_encoder_activations,
)
fc_encoder_parameters = u.get_fc_layers(**fc_encoder_args)
self.fc_encoder_layers = u.build_layers(fc_encoder_parameters)
# define fully connected decoder layers
self.fc_decoder_activations = [activation for i in range(len(hidden_sizes))]
decoder_sizes = self.hidden_sizes[:-1]
decoder_sizes.reverse()
fc_decoder_args = dict(
input_size=self.hidden_sizes[-1],
hidden_sizes=decoder_sizes,
output_size=self._to_linear,
activations=self.fc_decoder_activations,
)
fc_decoder_parameters = u.get_fc_layers(**fc_decoder_args)
self.fc_decoder_layers = u.build_layers(fc_decoder_parameters)
# define convolutional decoder layers
decoder_kernel_sizes = self.kernel_sizes
decoder_kernel_sizes.reverse()
decoder_convtranspose2d_channels = [self.input_channels] + self.conv2d_channels[:-1]
decoder_convtranspose2d_channels.reverse()
decoder_strides = self.strides
decoder_strides.reverse()
decoder_paddings = self.paddings
decoder_paddings.reverse()
unpooling_kernels = self.pooling_kernels
unpooling_kernels.reverse()
unpooling_strides = self.pooling_strides
unpooling_strides.reverse()
conv_decoder_args = dict(
input_channels=x_[0].shape[0],
channels=decoder_convtranspose2d_channels,
kernel_sizes=decoder_kernel_sizes,
strides=decoder_strides,
paddings=decoder_paddings,
activations=[activation for i in range(len(self.conv2d_channels) - 1)] + [activation_out],
unpooling_funcs=[pooling for i in range(len(self.conv2d_channels))],
unpooling_kernels=unpooling_kernels,
unpooling_strides=unpooling_strides,
)
conv_decoder_parameters = u.get_convtranspose2d_layers(**conv_decoder_args)
self.conv_decoder_layers = u.build_layers(conv_decoder_parameters)
# initialize input encoder
self.input_encoder = get_input_encoder(**encoder_params)
def conv_encode(self, x):
"""Passes data through convolutional layers.
:param x: Tensor with input data.
:return Tensor with output data.
"""
for layer in self.conv_encoder_layers.values():
x = layer(x)
return x
def fc_encode(self, x):
x = x.view(-1, self._to_linear)
for layer in self.fc_encoder_layers.values():
x = layer(x)
return x
def fc_decode(self, x):
for layer in self.fc_decoder_layers.values():
x = layer(x)
return x
def conv_decode(self, x):
x = x.view(-1, *self._from_linear)
for layer in self.conv_decoder_layers.values():
x = layer(x)
return x
def encode(self, x):
x = self.conv_encode(x)
x = self.fc_encode(x)
mu = x[:, :self.hidden_sizes[-1]]
logvar = x[:, self.hidden_sizes[-1]:]
return mu, logvar
def decode(self, x):
x = self.fc_decode(x)
x = self.conv_decode(x)
return x
def forward(self, x):
"""Passes data through the network.
:param x: Tensor with input data.
:return Tensor with output data.
"""
x_ = self.input_encoder.encode(x)
mu, logvar = self.encode(x_)
z = self.reparametrize(mu, logvar)
#print(mu)
#print(logvar)
#print(z)
y = self.decode(z)
result = dict(
input=x,
mu=mu,
logvar=logvar,
output=y,
latent=z,
input_history=self.input_encoder.input_history,)
self.input_encoder.reset()
return result
def reparametrize(self, mu, logvar):
std = logvar.div(2).exp()
eps = torch.randn_like(std, device=std.device)
return mu + std * eps
```
#### File: src/models/sfcn_classifier.py
```python
import torch
import torch.nn as nn
from models.base_model import SpikingBaseModel
from models.spiking_layers import LIF_sNeuron, LF_Unit
import utils as u
class SpikingFCNClassifier(SpikingBaseModel):
def __init__(
self,
input_width,
input_height,
input_channels,
conv2d_channels,
hidden_sizes,
dataset,
loss,
optimizer,
learning_rate,
weight_decay,
device,
activation="lif",
activation_out="logsoftmax",
steps=100,
threshold=1,
decay=0.99,
n_out=2,
verbose=True,
log_func=print,
):
super(SpikingFCNClassifier, self).__init__(
input_width=input_width,
input_height=input_height,
input_channels=input_channels,
dataset=dataset,
learning_rate=learning_rate,
weight_decay=weight_decay,
steps=steps,
threshold=threshold,
decay=decay,
device=device,
loss=loss,
verbose=verbose,
log_func=log_func,
)
# initialize model
if verbose:
self.log_func("Initializing spiking convolutional neural network...")
self.n_out = n_out
self.hidden_sizes = hidden_sizes
self.model = SFCNModel(
input_width=self.input_width,
input_height=self.input_height,
input_channels=self.input_channels,
hidden_sizes=self.hidden_sizes,
activation=activation,
activation_out=activation_out,
threshold=threshold,
decay=decay,
n_out=n_out,
device=device,
steps=steps,
)
self.init_weights()
self.model.to(self.device)
if verbose:
self.summary()
self.log_func(f"Model initialized successfully!\n")
# initialize optimizer
self.optimizer = u.get_optimizer(
optimizer, self.model, self.lr, self.wd, verbose
)
# TODO: implement a better solution an delete line as done by base model
# initialize loss function
self.loss_function = u.get_loss_function(loss, verbose, spiking=True)
def summary(self):
"""Prints a model summary about itself."""
x = (
torch.randn(self.input_channels, self.input_width, self.input_height)
.view(-1, self.input_channels, self.input_width, self.input_height)
.to(self.device)
)
self.log_func(f"Input shape: {x.shape}")
self.log_func(f"Output shape: {self.n_out}")
self.log_func(f"Network architecture:")
self.log_func(self.model)
self.log_func(f"Number of trainable parameters: {self.count_parameters()}")
class SFCNModel(nn.Module):
"""Creates a CNN model."""
def __init__(
self,
input_width,
input_height,
input_channels,
hidden_sizes,
activation="lif",
activation_out="lif",
steps=100,
threshold=1,
decay=0.99,
n_out=2,
device="cuda",
):
super(SFCNModel, self).__init__()
self.input_width = input_width
self.input_height = input_height
self.input_channels = input_channels
self.hidden_sizes = hidden_sizes
self.fc_activations = [activation for i in range(len(hidden_sizes))] + [activation_out]
self.n_out = n_out
self.device = device
self.steps = steps
# get flattend input size
x = torch.randn(self.input_channels, self.input_width, self.input_height).view(
-1, self.input_channels, self.input_width, self.input_height
)
self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
# define fully connected layers
fc_args = dict(
input_size=self._to_linear,
hidden_sizes=self.hidden_sizes,
output_size=self.n_out,
activations=self.fc_activations,
thresholds=[threshold for i in range(len(self.fc_activations))],
decays=[decay for i in range(len(self.fc_activations))],
)
fc_parameters = u.get_sfc_layers(**fc_args)
self.fc_layers = u.build_layers(fc_parameters)
def forward(self, x, steps=100):
"""Passes data through the network.
:param x: Tensor with input data.
:return Tensor with output data.
"""
modules = [m for m in self.modules() if not issubclass(type(m), nn.modules.container.ModuleDict)]
out_temps = [[] for m in modules if type(m) == LIF_sNeuron]
membrane_potentials = []
total_outs = []
LF_outs = []
with torch.no_grad():
p = x.clone()
p = p.view(p.size(0), -1)
for i, (name, layer) in enumerate(self.fc_layers.items()):
if type(layer) == LIF_sNeuron:
LF_outs.append(torch.zeros(p.size(), requires_grad=False, device=self.device))
total_outs.append(torch.zeros(p.size(), requires_grad=False, device=self.device))
if type(layer) not in [LIF_sNeuron, Pooling_sNeuron]:
p = layer(p)
last = i == len(self.fc_layers.items())-1
membrane_potentials.append(torch.zeros(p.size(), requires_grad=last, device=self.device))
for t in range(self.steps):
rand_num = torch.rand(x.size(0), x.size(1), x.size(2), x.size(3)).to(self.device)
Poisson_d_input = ((torch.abs(x)/2) > rand_num).type(torch.cuda.FloatTensor)
out = torch.mul(Poisson_d_input, torch.sign(x))
out = out.view(out.size(0), -1)
i = 0
for name, layer in self.fc_layers.items():
if type(layer) == LIF_sNeuron:
out, membrane_potentials[i] = layer(membrane_potentials[i])
LF_outs[i], total_outs[i], out = LF_Unit(layer.decay, LF_outs[i], total_outs[i], out, out_temps[i], t)
i += 1
else:
membrane_potentials[i] = membrane_potentials[i] + layer(out)
result = dict(output=membrane_potentials[-1], total_outs=total_outs, lf_outs=LF_outs, out_temps=out_temps)
return result
``` |
{
"source": "jhueckelheim/ParAD",
"score": 3
} |
#### File: jhueckelheim/ParAD/pyscopad.py
```python
import ompparser
import loopinspector
import ompdiff
import sys
from fparser.common.readfortran import FortranFileReader, FortranStringReader
from fparser.two.parser import ParserFactory
import fparser.two.Fortran2003 as f2003
def __getparloops_walker__(node):
'''
Recursive helper function for getparloops
'''
local_list = []
children = []
if hasattr(node, "content"):
children = node.content
elif hasattr(node, "items"):
children = node.items
for child in children:
if(type(child) == f2003.Comment and ompparser.ispragma(child.tostr())):
local_list.append(node)
local_list += __getparloops_walker__(child)
return local_list
def getparloops_file(filename):
'''
Find do-loops with OpenMP pragma
'''
reader = FortranFileReader(filename, ignore_comments=False)
f_parser = ParserFactory().create(std="f2003")
ast = f_parser(reader)
return __getparloops_walker__(ast)
def getparloops_string(string):
'''
Find do-loops with OpenMP pragma
'''
reader = FortranStringReader(string, ignore_comments=False)
f_parser = ParserFactory().create(std="f2003")
ast = f_parser(reader)
return __getparloops_walker__(ast)
def diffparloop(parloop):
'''
Analyse and differentiate an OpenMP pragma in reverse mode.
The argument must be the root node of an OpenMP-parallel loop.
Returns the scopes for all primal and adjoint variables.
'''
omppragmastr = parloop.content[0].tostr()
inspector = loopinspector.ReadWriteInspector()
inspector.visitNode(parloop)
# hack to determine loop counter, which is default private
counter_name = parloop.content[1].items[1].items[1][0].tostr()
inspector.vars[counter_name].makeLoopCounter()
varset = inspector.vars
scopes = ompparser.getscopes(omppragmastr, varset)
scopes_b = ompdiff.scope_reverse(scopes, varset, inspector)
return scopes, scopes_b
if __name__ == "__main__":
if(len(sys.argv)<2):
raise Exception("Input f90 file must be specified")
parloops = getparloops_file(sys.argv[1])
# For each parloop, diff the pragma separately from the actual program. For
# now we'll not do any loop transformations, can just call Tapenade for the
# rest. omp simd should be roughly the same, with additional mechanics for
# aligned push/pop and TF-MAD, maybe polyhedral transformations.
for parloop in parloops:
scopes, scopes_b = diffparloop(parloop)
print(parloop)
print("Original scopes:")
for varname,scope in scopes.items():
print("%s: %s"%(varname,scope))
print("Diff scopes:")
for varname,scope in scopes_b.items():
print("%s: %s"%(varname,scope))
print("\n")
``` |
{
"source": "jhueckelheim/PerforAD",
"score": 3
} |
#### File: jhueckelheim/PerforAD/perforad.py
```python
import sympy as sp
import textwrap
from operator import itemgetter
verbose = False
verboseprint = print if verbose else lambda *a, **k: None
class LoopNest:
def __init__(self,body,bounds,counters,arrays,scalars,ints):
self.body = body
self.bounds = bounds
self.counters = counters
self.arrays = arrays
self.scalars = scalars
self.ints = ints
def __str__(self):
body = ""
try:
# Try to join the list of statements in the loop body together.
# If this fails, there is only one statement (no list).
outlist = []
for stmt in self.body:
outlist.append(str(stmt))
body = "\n".join(outlist)
except TypeError:
body = str(self.body)
rcounters = self.counters.copy()
rcounters.reverse()
loops = {}
outermost = None # keep track which is the outermost loop so we can place an OpenMP pragma in front
counters_within = [] # keep track of counters that are set within the OpenMP parallel loop, which need to be privatised.
for counter in rcounters:
# this builds the loop nest from the innermost to the outermost loop
counters_within.append(counter)
start = self.bounds[counter][0]
end = self.bounds[counter][1]
try:
# Try and remove loop nests where any dimension has 0 iterations.
if(end-start<0):
return ""
except TypeError:
# If start or end contain sympy.Symbols, the relation can not be known
# and this throws an error. We ignore this and assume that the loop may
# have more than 0 iterations (it does not matter if this turns out to
# be false at runtime, just adds dead code).
pass
try:
# Try and simplify loops with only one iteration (print the loop
# body, and assign the correct value to the counter variable).
if(end-start==0):
loops[counter] = False
continue
except TypeError:
# If that failed, perhaps the loop bounds are again symbolic. Print the
# loop, everything will be fine at runtime.
pass
loops[counter] = True
# whenever we find another nontrivial loop (more than one iteration),
# save it as the current outermost loop. By the end of this process,
# this variable will contain the actual outermost loop, and whatever
# counters we had found within that loop.
outermost = (counter,counters_within.copy())
for counter in rcounters:
start = self.bounds[counter][0]
end = self.bounds[counter][1]
is_a_loop = loops[counter]
# nontrivial loops get a for(...){...} construct.
if(is_a_loop):
# the outermost nontrivial loop also gets an OpenMP pragma
omp = ""
if(outermost[0] == counter):
omp = "#pragma omp parallel for private(%s)\n"%",".join(map(str,outermost[1]))
body = "%sfor ( %s=%s; %s<=%s; %s++ ) {\n%s\n}"%(omp,counter,start,counter,end,counter,textwrap.indent(str(body),4*" "))
# trivial loops (those with exactly one iteration) just get a statement.
else:
body = "%s=%s;\n%s"%(counter,start,body)
return body
def diff(self, diffvars):
body_b = self.body.diff(diffvars)
# A nest is a tuple that contains
# - a list containing (offset, statement) tuples
# - a dict with {counter: loop bounds}
# This method takes a nest, and splits it up into several nests
# such that each nest will iterate over a subset of the original domain,
# and contains only the statements that are valid in that subset.
nestlist = [(body_b,self.bounds)]
# This loop goes over all dimensions. In each dimension, nestlist is replaced
# with a new nestlist that has been split in that dimension.
for counter in self.counters:
verboseprint("counter %s"%counter)
newnestlist = []
# This loop goes over all nests. Each nest may get split into several new
# nests, which are appended to newnestlist.
for nest,nestbound in nestlist:
verboseprint(" nest %s"%str(nestbound))
nest.sort(key=lambda x: x[0][counter])
verboseprint(" -with offs %s"%list(map(lambda x: x[0],nest)))
# Multiple offsets may hav the same offset in the current dimension. We need to find
# the positions in the array where the offset in this dimension changes.
offsets = list(map(lambda x: x[0][counter],nest))
uniqueoffsets = list(set(offsets))
uniqueoffsets.sort()
chunklimits = [offsets.index(i) for i in uniqueoffsets]
for i in range(len(chunklimits)-1):
# Get the range of offsets that this loop nest will contain
offs_0 = offsets[chunklimits[i]]
offs_1 = offsets[chunklimits[i+1]]
# Get all statements that need to be part of the body of this prequel loop
stmts_pre = nest[slice(0,chunklimits[i+1])]
# Get all statements that need to be part of the body of this sequel loop
stmts_post = nest[slice(chunklimits[i+1],len(nest))]
# Compute the new loop bounds after applying offsets
bounds_pre = nestbound.copy()
bounds_post = nestbound.copy()
bounds_pre[counter] = nestbound[counter][0]+offs_1-1,nestbound[counter][0]+offs_0
bounds_post[counter] = nestbound[counter][1]+offs_1,nestbound[counter][1]+offs_0+1
verboseprint(" pre %s"%bounds_pre)
verboseprint(" post %s"%bounds_post)
# Append the nest to the new list of nests
newnestlist.append((stmts_pre,bounds_pre))
newnestlist.append((stmts_post,bounds_post))
# Finally, create the core loop and append it to the new list of nests
stmts_core = nest
bounds_core = nestbound.copy()
bounds_core[counter] = nestbound[counter][0]+nest[-1][0][counter],nestbound[counter][1]+nest[0][0][counter]
verboseprint(" core %s"%bounds_core)
newnestlist.append((stmts_core,bounds_core))
# Replace the old nest list with the refined one, ready for the next iteration
nestlist = newnestlist
# Finally, take all nests and turn them into actual LoopNest objects.
loops = []
arrays = []
for v in self.arrays:
arrays.append(v)
if(v in diffvars):
arrays.append(diffvars[v])
for body_b,nestbound in nestlist:
statements = map(itemgetter(1),body_b)
verboseprint(nestbound)
loops.append(LoopNest(statements,nestbound,self.counters,arrays,self.scalars,self.ints))
return loops
class SympyFuncStencil:
def __init__(self, name, args):
self.name = name
self.args = args
def __str__(self):
return str(self.name)
def at(self,inputs):
argstr = ", ".join(map(lambda arg: "%s=%s"%(arg[0],arg[1]), zip(self.args,inputs)))
return "%s[%s]"%(self.name,argstr)
def diff(self,wrt):
diffname = "%s_d"%self.name
diffargs = self.args + ["%s_d"%wrt]
return SympyFuncStencil(diffname,diffargs)
class SympyExprStencil:
def __init__(self, expr, args):
self.expr = expr
self.args = args
def __str__(self):
return self.expr
def at(self,inputs):
subs = dict(zip(self.args,inputs))
return self.expr.subs(subs)
def args(self):
return self.args
def diff(self,wrt):
wrtb = sp.Symbol('wrtb')
return SympyExprStencil(self.expr.diff(wrt)*wrtb,self.args+[wrtb])
# TODO separate presentation/API from logic.
# StencilExpression should only deal with whatever is necessary for the logic,
# and can be extended by a FortranStencilExpression / SympyStencilExpression that
# adds a layer of sugar.
class StencilExpression:
def __init__(self,outvar,invar,idx_out,offset_in,func):
self.outvar = outvar
self.invar = invar
self.idx_out = idx_out # should be a loop counter (e.g. i)
self.offset_in = offset_in # should be a list of constant offsets (e.g. [i-1,i,i+1])
self.func = func
def __str__(self):
# Go through the list of input vars and their corresponding list of offsets
args = []
for (var,offsets) in list(zip(self.invar,self.offset_in)):
# Print the var with each offset as an index
for ofs in offsets:
# The offset and the array index can have multiple dimensions
idxlist = []
for dim,of in list(zip(self.idx_out,ofs)):
idxlist.append(dim+of)
args.append(var(*idxlist))
lhsargs = ",".join(map(lambda x: str(x),self.idx_out))
lhs = "%s(%s)"%(self.outvar,lhsargs)
return "%s += %s;"%(lhs,self.func.at(args))
def diff(self,diffvars):
# All invars and offsets given to the StencilExpression are
# zipped so that each offset has the correct invar, like so:
# [(invar, [(i, -1)]), (invar, [(i, 0)]), (invar, [(i, 1)])]
inputs = []
for (var,offsets) in list(zip(self.invar,self.offset_in)):
# Print the var with each offset as an index
for ofs in offsets:
# The offset and the array index can have multiple dimensions
idxlist = list(zip(self.idx_out,ofs))
inputs.append((var,idxlist))
exprs = []
# zip the list of function arguments and input variables
for arg,inp in list(zip(self.func.args,inputs)):
if(inp[0] in diffvars):
# Differentiate the function wrt. the current input
func_d = self.func.diff(arg)
# inpvar is the name of the current input variable.
# inpidx is a tuple of counter variable (e.g. i) and offset (e.g. -1)
inpvar, inpidx = inp
# The output index of the diff'ed expression will be the same as that of
# the primal expression (that's the whole point of this transformation)
outidx = self.idx_out
# We shift all other indices by the offset in inpidx to make this correct
shifted_idx_in = []
for (var,offsets) in list(zip(self.invar,self.offset_in)):
idxlist = []
for ofs in offsets:
idxlist.append(list(map(lambda x: (x[1]-x[2][1]),zip(self.idx_out,ofs,inpidx))))
shifted_idx_in.append(idxlist)
shifted_idx_in.append([list(map(lambda x: (-x[1]),inpidx))])
expr_d = StencilExpression(outvar = diffvars[inp[0]], invar = self.invar+[diffvars[self.outvar]], idx_out = outidx, offset_in = shifted_idx_in, func = func_d)
exprs.append((dict(inpidx),expr_d))
return exprs
i, j, n, l, c, r, t, b, lb, rt, lt, rb, a = sp.symbols('i, j, n, l, c, r, t, b, lb, rt, lt, rb, a')
outv = sp.Function('outv')
inv = sp.Function('inv')
vel = sp.Function('vel')
outv_b = sp.Function('outv_b')
inv_b = sp.Function('inv_b')
vel_b = sp.Function('vel_b')
def printfunction(name, loopnestlist):
counters = loopnestlist[0].counters
arrays = loopnestlist[0].arrays
scalars = loopnestlist[0].scalars
ints = loopnestlist[0].ints
funcdefs = """
#ifndef TAPENADE
#include <math.h>
#endif
#define Max(x,y) fmax(x,y)
#define Min(x,y) fmin(x,y)
#define Heaviside(x) ((x>=0)?1.0:0.0)
"""
arrtransformlist = []
for varname in arrays:
arglist = list(map(lambda x: x*"x",range(1,len(counters)+1)))
arrtransformlist.append("#define %s(%s) %s[%s]"%(varname,",".join(arglist),varname,"][".join(arglist)))
cpp = "%s\n%s\n"%(funcdefs,"\n".join(arrtransformlist))
args = list(map(lambda x: "double%s %s"%(len(counters)*"*",x),arrays))
args = args + list(map(lambda x: "double %s"%(x),scalars))
args = args + list(map(lambda x: "int %s"%(x),ints))
declarations = "\n".join(list(map(lambda x: "int %s;"%x, counters)))
body = [textwrap.indent(declarations,4*" ")]
for loopnest in loopnestlist:
body.append(textwrap.indent(str(loopnest),4*" "))
filename = "generated/%s.c"%name
print("Writing to %s"%filename)
file = open(filename,"w")
file.write("%svoid %s(%s) {\n%s\n}"%(cpp,name, ", ".join(args), "\n".join(body)))
file.close()
#f = SympyFuncStencil("foo",[l,c,r])
#stexpr = StencilExpression(outv, [inv], [i], [[[-1],[0],[1]]],f)
#loop1d = LoopNest(body=stexpr, bounds={i:[2,n-1]})
#print(loop1d)
#for lp in (loop1d.diff({inv:inv_b, outv:outv_b})):
# print(lp)
def makeLoopNest(lhs, rhs, counters, bounds):
functions = list(rhs.atoms(sp.Function))
functions.sort(key=lambda x: x.func)
scalars = [s for s in rhs.atoms(sp.Symbol) if s not in counters]
ints = []
for b in bounds:
try:
ints = ints + list(bounds[b][0].atoms(sp.Symbol))
except AttributeError:
pass
try:
ints = ints + list(bounds[b][1].atoms(sp.Symbol))
except AttributeError:
pass
ints = list(set(ints))
funcNames = []
offsets = []
funcID = 0
subs = []
for func in functions:
funcName = func.func
if not (funcName in funcNames):
funcNames.append(funcName)
offsets.append([])
funcArgs = list(map(lambda x: x[0]-x[1], zip(func.args, counters)))
offsets[-1].append(funcArgs)
subs.append([func, "perforad_arg_%d"%funcID])
funcID = funcID + 1
# TODO check that offsets are const
# TODO check that lhs args are in correct order
exprvars = list(map(itemgetter(1),subs))
f2d = SympyExprStencil(rhs.subs(subs),exprvars)
stexpr = StencilExpression(lhs.func, funcNames, counters, offsets, f2d)
loop = LoopNest(body=stexpr, bounds = bounds, counters = counters, arrays = [lhs.func]+funcNames, scalars = scalars, ints = ints)
return loop
``` |
{
"source": "JHU-Econ-Choice-2018/brock-mirman-etc-jacalin1",
"score": 4
} |
#### File: brock-mirman-etc-jacalin1/notebooks/DSGE-RA-K-Dynamics-Problems.py
```python
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from interpolation import interp
from numba import njit, prange
from quantecon.optimize.scalar_maximization import brent_max
class OptimalGrowthModel:
def __init__(self,
f, # Production function
u, # Utility function
β=0.96, # Discount factor
μ=0,
s=0.1,
grid_max=4,
grid_size=200,
shock_size=250):
self.β, self.μ, self.s = β, μ, s
self.f, self.u = f, u
self.y_grid = np.linspace(1e-5, grid_max, grid_size) # Set up grid
self.shocks = np.exp(μ + s * np.random.randn(shock_size)) # Store shocks
def operator_factory(og, parallel_flag=True):
"""
A function factory for building the Bellman operator, as well as
a function that computes greedy policies.
Here og is an instance of OptimalGrowthModel.
"""
f, u, β = og.f, og.u, og.β
y_grid, shocks = og.y_grid, og.shocks
@njit
def objective(c, v, y):
"""
The right hand side of the Bellman equation
"""
# First turn v into a function via interpolation
v_func = lambda x: interp(y_grid, v, x)
return u(c) + β * np.mean(v_func(f(y - c) * shocks))
@njit(parallel=parallel_flag)
def T(v):
"""
The Bellman operator
"""
v_new = np.empty_like(v)
for i in prange(len(y_grid)):
y = y_grid[i]
# Solve for optimal v at y
v_max = brent_max(objective, 1e-10, y, args=(v, y))[1]
v_new[i] = v_max
return v_new
@njit
def get_greedy(v):
"""
Computes the v-greedy policy of a given function v
"""
σ = np.empty_like(v)
for i in range(len(y_grid)):
y = y_grid[i]
# Solve for optimal c at y
c_max = brent_max(objective, 1e-10, y, args=(v, y))[0]
σ[i] = c_max
return σ
return T, get_greedy
α = 0.4 # Production function parameter
@njit
def f(k):
"""
Cobb-Douglas production function
"""
return k**α
og = OptimalGrowthModel(f=f, u=np.log)
T, get_greedy = operator_factory(og)
def solve_model(og,
use_parallel=True,
tol=1e-4,
max_iter=1000,
verbose=True,
print_skip=25):
T, _ = operator_factory(og, parallel_flag=use_parallel)
# Set up loop
v = np.log(og.y_grid) # Initial condition
i = 0
error = tol + 1
while i < max_iter and error > tol:
v_new = T(v)
error = np.max(np.abs(v - v_new))
i += 1
if verbose and i % print_skip == 0:
print(f"Error at iteration {i} is {error}.")
v = v_new
if i == max_iter:
print("Failed to converge!")
if verbose and i < max_iter:
print(f"\nConverged in {i} iterations.")
return v_new
# %% [markdown]
# ## Solution QuantEcon Q1.
# %%
def simulate_og(σ_func, og, α, y0=0.1, ts_length=100):
'''
Compute a time series given consumption policy σ.
'''
y = np.empty(ts_length)
ξ = np.random.randn(ts_length-1)
y[0] = y0
for t in range(ts_length-1):
y[t+1] = (y[t] - σ_func(y[t]))**α * np.exp(og.μ + og.s * ξ[t])
return y
fig, ax = plt.subplots(figsize=(9, 6))
for β in (0.8, 0.9, 0.98):
og = OptimalGrowthModel(f, np.log, β=β, s=0.05)
y_grid = og.y_grid
v_solution = solve_model(og, verbose=False)
σ_star = get_greedy(v_solution)
σ_func = lambda x: interp(y_grid, σ_star, x) # Define an optimal policy function
y = simulate_og(σ_func, og, α)
ax.plot(y, lw=2, alpha=0.6, label=rf'$\beta = {β}$')
ax.legend(loc='lower right')
plt.show()
# %% [markdown]
# ## Solution
# %%
ts_length=500
ξ = np.random.randn(ts_length-1)
def simulate_og(σ_func, og, α, y0=0.1, ts_length=500):
'''
Compute a time series given consumption policy σ.
'''
y = np.empty(ts_length)
k = np.empty(ts_length)
#ξ = np.random.randn(ts_length-1)
shocks = np.empty(ts_length-1)
y[0] = y0
for t in range(ts_length-1):
k[t+1] = np.log(y[t] - σ_func(y[t]))
y[t+1] = (y[t] - σ_func(y[t]))**α * np.exp(og.μ + og.s * ξ[t])
shocks[t] = np.exp(og.μ + og.s * ξ[t])
return y, k, shocks
β=0.96
og = OptimalGrowthModel(f, np.log, β=β, s=0.05)
y_grid = og.y_grid
v_solution = solve_model(og, verbose=False)
σ_star = get_greedy(v_solution)
σ_func = lambda x: interp(y_grid, σ_star, x) # Define an optimal policy function
y, k, shocks = simulate_og(σ_func, og, α)
vk = round(np.var(k[10:]),5)
vtheoretical = round(np.var(shocks[10:])/(1 - α**2),5)
print(f"\nEmpirical variance is {vk} and theoretical variance is {vtheoretical}.")
print("Thus, simulation results correspond to the theoretical result.")
# %% [markdown] {"hidden": true}
# ## Compare the Results to a linearized approximation
#
# Now numerically confirm the result from the BrockMirman handout that
#
# \begin{eqnarray}
# y_{t+1} & = & \alpha (y_{t} + \log \alpha \beta ) + \phi_{t+1}
# \end{eqnarray}
# %%
x = np.empty(ts_length)
x[0] = 0
for t in range(ts_length-1):
x[t+1] = round(np.log(y[t+1]) - α *(np.log(y[t]) + np.log(α*β)) + (og.μ + og.s * ξ[t]),0)
x
# %% [markdown]
# # PROBLEM
# ## Suppose $\phi_{t}$ is serially correlated
#
# Now we want to consider a case where the level of productivity $\epsilon$ is serially correlated:
#
# \begin{eqnarray}
# \phi_{t} = \zeta \phi_{t-1} + \nu_{t}
# \end{eqnarray}
#
# for some shock $\nu$ with variance $\sigma^{2}_{\nu}$ and a serial correlation coefficient $0 < \zeta < 1$. Calculate the variance of $k$ under this new assumption.
# %% [markdown]
# ## Solution
#
# We have:
# $$ var(k) = \frac{\sigma ^2_{\phi}}{1 - \alpha^2} $$
#
# As $\phi$ follows an AR(1) process:
# $$ var(\phi) = \frac{var(\nu)}{1 - \rho^2} $$
#
# Thus:
# $$ var(k) = \frac{var(\nu)}{(1 - \alpha^2)(1 - \rho^2)} $$
#
# %% [markdown] {"heading_collapsed": true}
# # PROBLEM
# ## Now Solve and Simulate the Model
#
# Use the tools provided on the QuantEcon website to solve the model. Then, starting with a capital stock equal to the stochastic steady state, simulate the model for 100 periods five separate times. Compare the variances you have calculated numerically to the formulas you derived analytically, and make some remarks about what this means for trying to calibrate the model to data by examining the variance of $k$ in empirical data.
# %% [markdown]
# ## Solution
# %%
ρ = 0.2
def simulate_ogc(σ_func, og, α, y0=0.1, ts_length=500):
'''
Compute a time series given consumption policy σ.
'''
y = np.empty(ts_length)
k = np.empty(ts_length)
ξ = 0.05*np.random.randn(ts_length-1)
shocks = np.empty(ts_length)
y[0] = y0
shocks[0] = ξ[0]/(1-ρ**2)
for t in range(ts_length-1):
shocks[t+1] = ρ*shocks[t] + ξ[t]
k[t+1] = np.log(y[t] - σ_func(y[t]))
y[t+1] = (y[t] - σ_func(y[t]))**α * np.exp(shocks[t+1])
return y, k, shocks
β=0.96
og = OptimalGrowthModel(f, np.log, β=β, s=0.05)
y_grid = og.y_grid
v_solution = solve_model(og, verbose=False)
σ_star = get_greedy(v_solution)
σ_func = lambda x: interp(y_grid, σ_star, x) # Define an optimal policy function
y, k, shocks = simulate_ogc(σ_func, og, α)
vk = round(np.var(k[200:]),5)
vtheoretical = round(0.05**2/((1 - α**2)*(1 - ρ**2)),5)
print(f"\nEmpirical variance is {vk} and theoretical variance is {vtheoretical}.")
print("Thus, simulation results does not necessarily correspond to the theoretical result, especially if ρ is high.")
# %% [markdown] {"heading_collapsed": true, "hidden": true}
# ## Now Do a Similar Exercise for the CRRA utility Model
#
# Use the QuantEcon code to solve the model for a value of relative risk aversion $\rho = 3$. Now calculate the variance of $k$ for this new model in the same way you did for the earlier model.
#
# %%
α=0.4
β=0.96
μ=0
s=0.05
rho=3
@njit
def crra(c):
return c**(1-rho)/(1-rho) # CRRA Utility
og = OptimalGrowthModel(f, crra, β=β, s=0.05)
y_grid = og.y_grid
v_solution = solve_model(og, verbose=False)
σ_star = get_greedy(v_solution)
σ_func = lambda x: interp(y_grid, σ_star, x) # Define an optimal policy function
y, k, shocks = simulate_og(σ_func, og, α)
vk = round(np.var(k[10:]),5)
print(f"\nEmpirical variance is {vk}.")
``` |
{
"source": "j-huff/ion-gui",
"score": 2
} |
#### File: j-huff/ion-gui/ion_config_utils.py
```python
from io import StringIO
import traceback
import logging
logging.basicConfig(filename='example.log',level=logging.DEBUG)
def generate_ionconfig(json_data,node_key,filename):
nodes = json_data["nodes"]
node = nodes[node_key]
# print(node)
f = open(filename,"w")
wmKey = node["wmKey"]
sdrName = node["sdrName"]
wmSize = node["wmSize"]
heapWords = node["heapWords"]
f.write(f"wmKey {wmKey}\n")
f.write(f"sdrName {sdrName}\n")
f.write(f"wmSize {wmSize}\n")
f.write(f"configFlags 1\n")
f.write(f"heapWords {heapWords}\n")
f.close()
def generate_ionrc(json_data,node_key,filename):
nodes = json_data["nodes"]
node = nodes[node_key]
# print(node)
f = open(filename,"w")
ipn = node["ipn"]
# print(json_data["links"][0])
links = [link for link in json_data["links"].values() if link["node1_uuid"] == node_key or link["node2_uuid"] == node_key]
f.write(f"1 {ipn} config.ionconfig\n")
for link in links:
for contact in link["contacts"].values():
try:
fromTime = contact["fromTime"]
untilTime = contact["untilTime"]
rate = contact["rate"]
confidence = contact["confidence"]
if (fromTime and untilTime and rate):
f.write(f"a contact +{fromTime} +{untilTime} {rate} {confidence}\n")
except:
pass
f.write(f"s\n")
f.write(f"m horizon +0\n")
f.close()
def generate_bprc(json_data,node_key,filename):
nodes = json_data["nodes"]
node = nodes[node_key]
# print(node)
f = open(filename,"w")
ipn = node["ipn"]
# print(json_data["links"][0])
links = [link for link in json_data["links"].values() if link["node1_uuid"] == node_key or link["node2_uuid"] == node_key]
f.write(f"1\na scheme ipn 'ipnfw' 'ipnadminep'\n")
f.write(f"a endpoint ipn:{ipn}.0 q\n")
for endpointStr in node["endpoints"].split(','):
num = int(endpointStr)
f.write(f"a endpoint ipn:{ipn}.{endpointStr} q\n")
protocols = set()
for link in links:
protocols.add(link["protocol"])
for protocol in protocols:
if(protocol == "TCP"):
payloadBytesPerFrame = node["protocolSettings"]["TCP"]["payloadBytesPerFrame"]
overheadBytesPerFrame = node["protocolSettings"]["TCP"]["overheadBytesPerFrame"]
nominalDataRate = node["protocolSettings"]["TCP"]["nominalDataRate"]
f.write(f"a protocol tcp {payloadBytesPerFrame} {overheadBytesPerFrame} {nominalDataRate}\n")
if(protocol == "UDP"):
payloadBytesPerFrame = node["protocolSettings"]["UDP"]["payloadBytesPerFrame"]
overheadBytesPerFrame = node["protocolSettings"]["UDP"]["overheadBytesPerFrame"]
nominalDataRate = node["protocolSettings"]["UDP"]["nominalDataRate"]
f.write(f"a protocol udp {payloadBytesPerFrame} {overheadBytesPerFrame} {nominalDataRate}\n")
#TODO: fix multiple link bug
processed_uuids = []
for link in links:
logging.debug(link["uuid"])
if link["uuid"] in processed_uuids:
logging.debug("link already processed\n")
logging.debug(link)
continue
processed_uuids.append(link["uuid"])
if(link["protocol"] == "TCP"):
port1 = link["protocolSettings"]["TCP"]["port1"][0]
port2 = link["protocolSettings"]["TCP"]["port2"][0]
other_uuid = link["node2_uuid"]
if(node["uuid"] == link["node2_uuid"]):
other_uuid = link["node1_uuid"]
tmp = port1
port1 = port2
port2 = tmp
other_node = nodes[other_uuid]
other_machine_uuid = other_node["machine"]
other_addr = json_data["machines"][other_machine_uuid]["address"]
f.write(f"a induct tcp {other_addr}:{port1} tcpcli\n")
f.write(f"a outduct tcp {other_addr}:{port2} ''\n")
f.write("r 'ipnadmin config.ipnrc'\ns\n")
f.close()
def generate_ipnrc(json_data,node_key,filename):
nodes = json_data["nodes"]
node = nodes[node_key]
# print(node)
f = open(filename,"w")
ipn = node["ipn"]
# print(json_data["links"][0])
links = [link for link in json_data["links"].values() if link["node1_uuid"] == node_key or link["node2_uuid"] == node_key]
for link in links:
if(link["protocol"] == "TCP"):
port1 = link["protocolSettings"]["TCP"]["port1"][0]
port2 = link["protocolSettings"]["TCP"]["port2"][0]
other_uuid = link["node2_uuid"]
if(node["uuid"] == link["node2_uuid"]):
other_uuid = link["node1_uuid"]
tmp = port1
port1 = port2
port2 = tmp
other_node = nodes[other_uuid]
other_machine_uuid = other_node["machine"]
other_addr = json_data["machines"][other_machine_uuid]["address"]
other_ipn = other_node["ipn"]
f.write(f"a plan {other_ipn} tcp/{other_addr}:{port2}\n")
f.close()
```
#### File: j-huff/ion-gui/process_download.py
```python
import json
import sys
import tempfile
import os
import uuid
import zipfile
import time
import shutil
import logging
from ion_config_utils import *
logging.basicConfig(filename='example.log',level=logging.DEBUG)
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def pretty(obj):
print(json.dumps(nodes, indent=4, separators=(',',' : ')))
#input_str = str(sys.stdin.read())
input_str = str(sys.argv[1])
logging.debug(input_str)
input_json = json.loads(input_str)
# print(input_json['machines'])
tmp_path = os.getcwd() + "/tmp/"
path_uuid = str(uuid.uuid4())
path = tmp_path + path_uuid +"/"
common_path = os.getcwd() + "/config_common/"
# print ("The current working directory is %s" % path)
os.mkdir(path)
machines = input_json['machines']
nodes = input_json['nodes']
# pretty(nodes)
node_paths = {}
for key in nodes:
node = nodes[key]
new_path = path + node["name"] + "/"
node_paths[key] = new_path
os.mkdir(new_path)
cmd = "cp -rp "+common_path+"* "+new_path
os.system(cmd)
generate_ionconfig(input_json,key,new_path+"config.ionconfig")
generate_ionrc(input_json,key,new_path+"config.ionrc")
generate_bprc(input_json,key,new_path+"config.bprc")
generate_ipnrc(input_json,key,new_path+"config.ipnrc")
logging.debug("Outputing to file")
zip_filename = tmp_path+path_uuid
shutil.make_archive(zip_filename, 'zip', path)
# time.sleep(.1)
ret = "{'filename' : '"+zip_filename+'.zip'+"'}"
print(zip_filename+".zip",end="")
shutil.rmtree(tmp_path + path_uuid)
logging.debug("done")
``` |
{
"source": "jhuggins/anti-racist-data",
"score": 3
} |
#### File: US-covid-cases-deaths-by-state-race/python/create-expected-versus-observed-deaths-plots.py
```python
import os
import matplotlib
if 'DISPLAY' not in os.environ:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import pandas as pd
import seaborn as sns
def create_plot(group, df):
# make scatter plot and best linear fit of
# expected vs observed proportion of deaths
deaths_col = 'Deaths_' + group + '_proportion'
sns.lmplot(x=group, y=deaths_col, data=df)
# plot x = y line
max_prop = min(df[group].max(), df[deaths_col].max())
l = mlines.Line2D([0, max_prop], [0, max_prop], linestyle=':', color='k')
plt.gca().add_line(l)
plt.title(group)
plt.xlabel('Proportion of state population')
plt.ylabel('Proportion of COVID deaths')
plt.tight_layout()
plt.savefig('output/expected-versus-observed-deaths-{}.png'.format(group.lower()))
plt.close()
def main():
# make figures pretty
sns.set_style('white')
sns.set_context('notebook', font_scale=1.5, rc={'lines.linewidth': 2})
matplotlib.rcParams['legend.frameon'] = False
# load dataset; exclude US
df = pd.read_csv('data/US-covid-cases-deaths-population-by-state-race.csv',
index_col=0)
df.drop('United States', inplace=True)
df = df[pd.notnull(df.Deaths_Black_proportion)]
for group in ['White', 'Black']:
create_plot(group, df)
if __name__ == '__main__':
main()
``` |
{
"source": "jhuggins/viabel",
"score": 2
} |
#### File: viabel/tests/test_objectives.py
```python
import autograd.numpy as anp
import numpy as np
from autograd.scipy.stats import norm
from viabel.approximations import MFGaussian, MFStudentT
from viabel.objectives import AlphaDivergence, DISInclusiveKL, ExclusiveKL
from viabel.optimization import RMSProp
def _test_objective(objective_cls, num_mc_samples, **kwargs):
np.random.seed(851)
# mean = np.random.randn(1,dimension)
# stdev = np.exp(np.random.randn(1,dimension))
mean = np.array([1., -1.])[np.newaxis, :]
stdev = np.array([2., 5.])[np.newaxis, :]
def log_p(x):
return anp.sum(norm.logpdf(x, loc=mean, scale=stdev), axis=1)
approx = MFStudentT(2, 100)
objective = objective_cls(approx, log_p, num_mc_samples, **kwargs)
# large number of MC samples and smaller epsilon and learning rate to ensure accuracy
init_param = np.array([0, 0, 1, 1], dtype=np.float32)
opt = RMSProp(0.1)
opt_results = opt.optimize(1000, objective, init_param)
# iterate averaging introduces some bias, so use last iterate
est_mean, est_cov = approx.mean_and_cov(opt_results['opt_param'])
est_stdev = np.sqrt(np.diag(est_cov))
print(est_stdev, stdev)
np.testing.assert_almost_equal(mean.squeeze(), est_mean, decimal=1)
np.testing.assert_almost_equal(stdev.squeeze(), est_stdev, decimal=1)
def test_ExclusiveKL():
_test_objective(ExclusiveKL, 100)
def test_ExclusiveKL_path_deriv():
_test_objective(ExclusiveKL, 100, use_path_deriv=True)
def test_DISInclusiveKL():
dim = 2
_test_objective(DISInclusiveKL, 100,
temper_prior=MFGaussian(dim),
temper_prior_params=np.concatenate([[0] * dim, [1] * dim]),
ess_target=50)
def test_AlphaDivergence():
_test_objective(AlphaDivergence, 100, alpha=2)
``` |
{
"source": "Jhughes999/Python-Projects",
"score": 4
} |
#### File: Jhughes999/Python-Projects/diceroll.py
```python
import random
reroll = "y"
# Dice Ascii Lines
zero = "| |"
odd = "| O |"
even = "| O O |"
trip = "| O O O |"
# Dice Ascii Print Function
def print_dice(line1, line2, line3):
print("---------")
print(line1)
print(line2)
print(line3)
print("---------")
# Dice Result Dictionary
dice_img = {
1 : [zero, odd, zero],
2 : [zero, even, zero],
3 : [odd, odd, odd],
4 : [even, zero, even],
5 : [even, odd, even],
6 : [even, even, even],
7 : [even, trip, even],
8 : [trip, even, trip],
9 : [trip, trip, trip],
}
# Dice Roll Main Code
while reroll == "y":
min = 1
max = int(input("number of sides?"))
num_of_rolls = int(input("number of dice?"))
results = []
x = 1
while x <= num_of_rolls:
diceroll = random.randint(min, max)
x += 1
#Dice Result with Ascii
if max <= 9:
print_dice(*dice_img[diceroll])
#Dice Result without Ascii
else:
print(diceroll)
results.append(diceroll)
print("Total dice roll is: %d!" %(sum(results)))
# Continue or end statement
reroll = input("Would you like to reroll? y or n...")
if reroll == "n":
reroll = "n"
else:
print("Goodbye")
``` |
{
"source": "jhughesbiot/daq",
"score": 2
} |
#### File: daq/daq/runner.py
```python
import copy
import os
import re
import threading
import time
import traceback
import uuid
import json
import pathlib
from datetime import datetime, timedelta, timezone
from forch.proto.shared_constants_pb2 import PortBehavior
import configurator
from device_report_client import DeviceReportClient
import faucet_event_client
import container_gateway
import external_gateway
import gcp
import host as connected_host
import network
import report
import stream_monitor
from wrappers import DaqException
import logger
from proto.system_config_pb2 import DhcpMode
LOGGER = logger.get_logger('runner')
class PortInfo:
"""Simple container for device port info"""
active = False
flapping_start = None
port_no = None
class IpInfo:
"""Simple container for device ip info"""
ip_addr = None
state = None
delta_sec = None
class Device:
"""Simple container for device info"""
def __init__(self):
self.mac = None
self.host = None
self.gateway = None
self.group = None
self.port = None
self.dhcp_ready = False
self.dhcp_mode = None
self.ip_info = IpInfo()
self.vlan = None
self.set_id = None
def __repr__(self):
return self.mac.replace(":", "")
class Devices:
"""Container for all devices"""
def __init__(self):
self._devices = {}
self._set_ids = set()
def new_device(self, mac, port_info=None, vlan=None):
"""Adding a new device"""
assert mac not in self._devices, "Device with mac: %s is already added." % mac
device = Device()
device.mac = mac
self._devices[mac] = device
device.port = port_info if port_info else PortInfo()
device.vlan = vlan
port_no = device.port.port_no
set_id = port_no if port_no else self._allocate_set_id()
assert set_id not in self._set_ids, "Duplicate device set id %d" % set_id
self._set_ids.add(set_id)
device.set_id = set_id
return device
def create_if_absent(self, mac, port_info=None, vlan=None):
"""Create a new device if none found, else return the previous one"""
prev_device = self._devices.get(mac)
return prev_device if prev_device else self.new_device(mac, port_info=port_info, vlan=vlan)
def _allocate_set_id(self):
set_id = 1
while set_id in self._set_ids:
set_id += 1
return set_id
def remove(self, device):
"""Removing a device"""
assert self.contains(device), "Device %s not found." % device
del self._devices[device.mac]
self._set_ids.remove(device.set_id)
def get(self, device_mac):
"""Get a device using its mac address"""
return self._devices.get(device_mac)
def get_by_port_info(self, port):
"""Get a device using its port info object"""
for device in self._devices.values():
if device.port == port:
return device
return None
def get_by_gateway(self, gateway):
"""Get devices under specified gateway"""
return [device for device in self._devices.values() if device.gateway == gateway]
def get_by_group(self, group_name):
"""Get devices under a group name"""
return [device for device in self._devices.values() if device.group == group_name]
def get_all_devices(self):
"""Get all devices"""
return list(self._devices.values())
def get_triggered_devices(self):
"""Get devices with hosts"""
return [device for device in self._devices.values() if device.host]
def contains(self, device):
"""Returns true if the device is expected"""
return self._devices.get(device.mac) == device
class DAQRunner:
"""Main runner class controlling DAQ. Primarily mediates between
faucet events, connected hosts (to test), and gcp for logging. This
class owns the main event loop and shards out work to subclasses."""
MAX_GATEWAYS = 9
_DEFAULT_RETENTION_DAYS = 30
_SITE_CONFIG = 'site_config.json'
_RUNNER_CONFIG_PATH = 'runner/setup'
_DEFAULT_TESTS_FILE = 'config/modules/host.conf'
_RESULT_LOG_FILE = 'inst/result.log'
def __init__(self, config):
self.configurator = configurator.Configurator()
self.gateway_sets = set(range(1, self.MAX_GATEWAYS+1))
self.config = config
self._result_sets = {}
self._devices = Devices()
self._ports = {}
self._callback_queue = []
self._callback_lock = threading.Lock()
self.gcp = gcp.GcpManager(self.config, self._queue_callback)
self._base_config = self._load_base_config()
self.description = config.get('site_description', '').strip('\"')
self._daq_version = os.environ['DAQ_VERSION']
self._lsb_release = os.environ['DAQ_LSB_RELEASE']
self._sys_uname = os.environ['DAQ_SYS_UNAME']
self.network = network.TestNetwork(config)
self.result_linger = config.get('result_linger', False)
self._linger_exit = 0
self.faucet_events = None
self.single_shot = config.get('single_shot', False)
self.fail_mode = config.get('fail_mode', False)
self.run_trigger = config.get('run_trigger', {})
self.run_tests = True
self.stream_monitor = None
self.exception = None
self.run_count = 0
self.run_limit = int(config.get('run_limit', 0))
self._default_port_flap_timeout = int(config.get('port_flap_timeout_sec', 0))
self.result_log = self._open_result_log()
self._system_active = False
logging_client = self.gcp.get_logging_client()
self.daq_run_id = self._init_daq_run_id()
self._device_result_client = self._init_device_result_client()
if logging_client:
logger.set_stackdriver_client(logging_client,
labels={"daq_run_id": self.daq_run_id})
test_list = self._get_test_list(config.get('host_tests', self._DEFAULT_TESTS_FILE))
if self.config.get('keep_hold'):
LOGGER.info('Appending test_hold to master test list')
if 'hold' not in test_list:
test_list.append('hold')
config['test_list'] = test_list
config['test_metadata'] = self._get_test_metadata()
LOGGER.info('DAQ RUN id: %s' % self.daq_run_id)
LOGGER.info('Configured with tests %s' % ', '.join(config['test_list']))
LOGGER.info('DAQ version %s' % self._daq_version)
LOGGER.info('LSB release %s' % self._lsb_release)
LOGGER.info('system uname %s' % self._sys_uname)
def _open_result_log(self):
return open(self._RESULT_LOG_FILE, 'w')
def _get_states(self):
states = connected_host.pre_states() + self.config['test_list']
return states + connected_host.post_states()
def _init_daq_run_id(self):
daq_run_id = str(uuid.uuid4())
with open('inst/daq_run_id.txt', 'w') as output_stream:
output_stream.write(daq_run_id + '\n')
return daq_run_id
def _init_device_result_client(self):
server_port = self.config.get('device_reporting', {}).get('server_port')
if server_port:
return DeviceReportClient(server_port=server_port)
return None
def _send_heartbeat(self):
message = {
'name': 'status',
'states': self._get_states(),
'ports': self._get_active_ports(),
'description': self.description,
'timestamp': time.time()
}
message.update(self.get_run_info())
self.gcp.publish_message('daq_runner', 'heartbeat', message)
def get_run_info(self):
"""Return basic run info dict"""
info = {
'version': self._daq_version,
'lsb': self._lsb_release,
'uname': self._sys_uname,
'daq_run_id': self.daq_run_id
}
data_retention_days = self.config.get('run_data_retention_days',
self._DEFAULT_RETENTION_DAYS)
if data_retention_days:
expiration = datetime.now(timezone.utc) + timedelta(days=float(data_retention_days))
info['expiration'] = gcp.to_timestamp(expiration)
return info
def initialize(self):
"""Initialize DAQ instance"""
self._send_heartbeat()
self._publish_runner_config(self._base_config)
self.network.initialize()
LOGGER.debug('Attaching event channel...')
self.faucet_events = faucet_event_client.FaucetEventClient(self.config)
self.faucet_events.connect()
LOGGER.info('Waiting for system to settle...')
time.sleep(3)
LOGGER.debug('Done with initialization')
def cleanup(self):
"""Cleanup instance"""
try:
LOGGER.info('Stopping network...')
self.network.stop()
except Exception as e:
LOGGER.error('Cleanup exception: %s', e)
if self.result_log:
self.result_log.close()
self.result_log = None
LOGGER.info('Done with runner.')
def add_host(self, *args, **kwargs):
"""Add a host with the given parameters"""
return self.network.add_host(*args, **kwargs)
def remove_host(self, host):
"""Remove the given host"""
return self.network.remove_host(host)
def get_host_interface(self, host):
"""Get the internal interface for the host"""
return self.network.get_host_interface(host)
def _handle_faucet_events(self):
while self.faucet_events:
event = self.faucet_events.next_event()
if not event:
break
(dpid, port, active) = self.faucet_events.as_port_state(event)
if dpid and port:
LOGGER.debug('port_state: %s %s', dpid, port)
self._handle_port_state(dpid, port, active)
return
(dpid, port, target_mac, vid) = self.faucet_events.as_port_learn(event)
if dpid and port and vid:
is_vlan = self.run_trigger.get("vlan_start") and self.run_trigger.get("vlan_end")
if is_vlan:
if self.network.is_system_port(dpid, port):
self._handle_device_learn(vid, target_mac)
else:
self._handle_port_learn(dpid, port, vid, target_mac)
return
(dpid, restart_type) = self.faucet_events.as_config_change(event)
if dpid is not None:
LOGGER.debug('dp_id %d restart %s', dpid, restart_type)
def _handle_port_state(self, dpid, port, active):
if self.network.is_system_port(dpid, port):
LOGGER.info('System port %s on dpid %s is active %s', port, dpid, active)
if self._system_active and not active:
LOGGER.error('System port became inactive, terminating.')
self.exception = DaqException('System port inactive')
self.shutdown()
self._system_active = active
return
if not self.network.is_device_port(dpid, port):
LOGGER.debug('Unknown port %s on dpid %s is active %s', port, dpid, active)
return
if active != self._is_port_active(port):
LOGGER.info('Port %s dpid %s is now %s', port, dpid, "active" if active else "inactive")
if active:
self._activate_port(port)
elif port in self._ports:
port_info = self._ports[port]
device = self._devices.get_by_port_info(port_info)
if device and device.host and not port_info.flapping_start:
port_info.flapping_start = time.time()
if port_info.active:
if device and not port_info.flapping_start:
self._direct_port_traffic(device.mac, port, None)
self._deactivate_port(port)
self._send_heartbeat()
def _handle_remote_port_state(self, device, port_event):
if not device.host:
return
if port_event.event == PortBehavior.PortEvent.down:
if not device.port.flapping_start:
device.port.flapping_start = time.time()
device.port.active = False
else:
device.port.flapping_start = 0
device.port.active = True
def _activate_port(self, port):
if port not in self._ports:
self._ports[port] = PortInfo()
self._ports[port].port_no = port
port_info = self._ports[port]
port_info.flapping_start = 0
port_info.active = True
def _is_port_active(self, port):
return port in self._ports and self._ports[port].active
def _deactivate_port(self, port):
port_info = self._ports[port]
port_info.active = False
def _direct_port_traffic(self, mac, port, target):
self.network.direct_port_traffic(mac, port, target)
def _handle_port_learn(self, dpid, port, vid, target_mac):
if self.network.is_device_port(dpid, port) and self._is_port_active(port):
LOGGER.info('Port %s dpid %s learned %s', port, dpid, target_mac)
device = self._devices.create_if_absent(target_mac, port_info=self._ports[port])
self._target_set_trigger(device)
else:
LOGGER.info('Port %s dpid %s learned %s (ignored)', port, dpid, target_mac)
def _handle_device_learn(self, vid, target_mac):
LOGGER.info('Learned %s on vid %s', target_mac, vid)
if not self._devices.get(target_mac):
device = self._devices.new_device(target_mac, vlan=vid)
else:
device = self._devices.get(target_mac)
device.dhcp_mode = DhcpMode.EXTERNAL
# For keeping track of remote port flap events
if self._device_result_client:
device.port = PortInfo()
device.port.active = True
self._device_result_client.get_port_events(
device.mac, lambda event: self._handle_remote_port_state(device, event))
self._target_set_trigger(device)
def _queue_callback(self, callback):
with self._callback_lock:
LOGGER.debug('Register callback')
self._callback_queue.append(callback)
def _handle_queued_events(self):
with self._callback_lock:
callbacks = self._callback_queue
self._callback_queue = []
if callbacks:
LOGGER.debug('Processing %d callbacks', len(callbacks))
for callback in callbacks:
callback()
def _handle_system_idle(self):
# Some synthetic faucet events don't come in on the socket, so process them here.
self._handle_faucet_events()
all_idle = True
for device in self._devices.get_triggered_devices():
try:
if device.host.is_running():
all_idle = False
device.host.idle_handler()
else:
self.target_set_complete(device, 'target set not active')
except Exception as e:
self.target_set_error(device, e)
for device in self._devices.get_all_devices():
self._target_set_trigger(device)
all_idle = False
if not self._devices.get_triggered_devices() and not self.run_tests:
if self.faucet_events and not self._linger_exit:
self.shutdown()
if self._linger_exit == 1:
self._linger_exit = 2
LOGGER.warning('Result linger on exit.')
all_idle = False
if all_idle:
LOGGER.debug('No active device, waiting for trigger event...')
def _reap_stale_ports(self):
for device in self._devices.get_triggered_devices():
if not device.port.flapping_start:
continue
timeout_sec = device.host.get_port_flap_timeout(device.host.test_name)
if timeout_sec is None:
timeout_sec = self._default_port_flap_timeout
if (device.port.flapping_start + timeout_sec) <= time.time():
exception = DaqException('port not active for %ds' % timeout_sec)
self.target_set_error(device, exception)
device.port.flapping_start = 0
def shutdown(self):
"""Shutdown this runner by closing all active components"""
self._terminate()
self.monitor_forget(self.faucet_events.sock)
self.faucet_events.disconnect()
self.faucet_events = None
count = self.stream_monitor.log_monitors(as_info=True)
LOGGER.warning('No active ports remaining (%d monitors), ending test run.', count)
self._send_heartbeat()
def _loop_hook(self):
self._handle_queued_events()
states = {device.mac: device.host.state for device in self._devices.get_triggered_devices()}
LOGGER.debug('Active target sets/state: %s', states)
def _terminate(self):
for device in self._devices.get_triggered_devices():
self.target_set_error(device, DaqException('terminated'))
def _module_heartbeat(self):
# Should probably be converted to a separate thread to timeout any blocking fn calls
_ = [device.host.heartbeat() for device in self._devices.get_triggered_devices()]
def main_loop(self):
"""Run main loop to execute tests"""
try:
monitor = stream_monitor.StreamMonitor(idle_handler=self._handle_system_idle,
loop_hook=self._loop_hook,
timeout_sec=20) # Polling rate
self.stream_monitor = monitor
self.monitor_stream('faucet', self.faucet_events.sock, self._handle_faucet_events,
priority=10)
LOGGER.info('Entering main event loop.')
LOGGER.info('See docs/troubleshooting.md if this blocks for more than a few minutes.')
while self.stream_monitor.event_loop():
self._reap_stale_ports()
self._module_heartbeat()
except Exception as e:
LOGGER.error('Event loop exception: %s', e)
LOGGER.exception(e)
self.exception = e
except KeyboardInterrupt as e:
LOGGER.error('Keyboard Interrupt')
LOGGER.exception(e)
self.exception = e
if self.config.get('use_console'):
LOGGER.info('Dropping into interactive command line')
self.network.cli()
self._terminate()
def _target_set_trigger(self, device):
assert self._devices.contains(device), 'Target device %s is not expected' % device.mac
port_trigger = device.port.port_no is not None
if not self._system_active:
LOGGER.warning('Target device %s ignored, system is not active', device.mac)
return False
if device.host:
LOGGER.debug('Target device %s already triggered', device.mac)
return False
if port_trigger:
assert device.port.active, 'Target port %d is not active' % device.port.port_no
if not self.run_tests:
LOGGER.debug('Target device %s trigger suppressed', device.mac)
return False
try:
group_name = self.network.device_group_for(device)
device.group = group_name
gateway = self._activate_device_group(device)
if gateway.activated:
LOGGER.debug('Target device %s trigger ignored b/c activated gateway', device.mac)
return False
except Exception as e:
LOGGER.error('Target device %s target trigger error %s', device.mac, str(e))
if self.fail_mode:
LOGGER.warning('Suppressing further tests due to failure.')
self.run_tests = False
return False
# Stops all DHCP response initially
# Selectively enables dhcp response at ipaddr stage based on dhcp mode
if device.dhcp_mode != DhcpMode.EXTERNAL:
gateway.stop_dhcp_response(device.mac)
gateway.attach_target(device)
device.gateway = gateway
try:
self.run_count += 1
new_host = connected_host.ConnectedHost(self, device, self.config)
device.host = new_host
new_host.register_dhcp_ready_listener(self._dhcp_ready_listener)
new_host.initialize()
if port_trigger:
target = {
'port': device.port.port_no,
'group': group_name,
'fake': gateway.fake_target,
'port_set': gateway.port_set,
'mac': device.mac
}
self._direct_port_traffic(device.mac, device.port.port_no, target)
else:
self._direct_device_traffic(device, gateway.port_set)
return True
except Exception as e:
self.target_set_error(device, e)
def _direct_device_traffic(self, device, port_set):
self.network.direct_device_traffic(device, port_set)
def _get_test_list(self, test_file):
no_test = self.config.get('no_test', False)
if no_test:
LOGGER.warning('Suppressing configured tests because no_test')
return ['hold']
head = []
body = []
tail = []
def get_test_list(test_file):
LOGGER.info('Reading test definition file %s', test_file)
with open(test_file) as file:
line = file.readline()
while line:
cmd = re.sub(r'#.*', '', line).strip().split()
cmd_name = cmd[0] if cmd else None
argument = cmd[1] if len(cmd) > 1 else None
ordering = cmd[2] if len(cmd) > 2 else None
if cmd_name == 'add':
LOGGER.debug('Adding test %s from %s', argument, test_file)
if ordering == "first":
head.append(argument)
elif ordering == "last":
tail.append(argument)
else:
body.append(argument)
elif cmd_name == 'remove':
LOGGER.debug('Removing test %s from %s', argument, test_file)
for section in (head, body, tail):
if argument in section:
section.remove(argument)
elif cmd_name == 'include':
get_test_list(argument)
elif cmd_name == 'build' or not cmd_name:
pass
else:
LOGGER.warning('Unknown test list command %s', cmd_name)
line = file.readline()
get_test_list(test_file)
return [*head, *body, *tail]
def _get_test_metadata(self, extension=".daqmodule", root="."):
metadata = {}
for meta_file in pathlib.Path(root).glob('**/*%s' % extension):
if str(meta_file).startswith('inst') or str(meta_file).startswith('local'):
continue
with open(meta_file) as fd:
metadatum = json.loads(fd.read())
assert "name" in metadatum and "startup_cmd" in metadatum
module = metadatum["name"]
assert module not in metadata, "Duplicate module definition for %s" % module
metadata[module] = {
"startup_cmd": metadatum["startup_cmd"],
"basedir": meta_file.parent
}
return metadata
def _activate_device_group(self, device):
group_name = device.group
group_devices = self._devices.get_by_group(group_name)
existing_gateways = {device.gateway for device in group_devices if device.gateway}
if existing_gateways:
existing = existing_gateways.pop()
LOGGER.info('Gateway for existing device group %s is %s', group_name, existing)
return existing
set_num = self._find_gateway_set(device)
LOGGER.info('Gateway for device group %s not found, initializing base %d...',
device.group, set_num)
if device.dhcp_mode == DhcpMode.EXTERNAL:
# Under vlan trigger, start a external gateway that doesn't utilize a DHCP server.
gateway = external_gateway.ExternalGateway(self, group_name, set_num)
gateway.set_tap_intf(self.network.tap_intf)
else:
gateway = container_gateway.ContainerGateway(self, group_name, set_num)
try:
gateway.initialize()
except Exception:
LOGGER.error('Cleaning up from failed gateway initialization')
LOGGER.debug('Clearing %s gateway group %s for %s',
device, set_num, group_name)
self.gateway_sets.add(set_num)
raise
return gateway
def ip_notify(self, state, target, gateway, exception=None):
"""Handle a DHCP / Static IP notification"""
if exception:
assert not target, 'unexpected exception with target'
LOGGER.error('IP exception for %s: %s', gateway, exception)
LOGGER.exception(exception)
self._terminate_gateway_set(gateway)
return
target_type = target['type']
target_mac, target_ip, delta_sec = target['mac'], target['ip'], target['delta']
LOGGER.info('IP notify %s %s is %s on %s (%s/%d)', target_type, target_mac,
target_ip, gateway, state, delta_sec)
assert target_mac
assert target_ip
assert delta_sec is not None
device = self._devices.get(target_mac)
device.ip_info.ip_addr = target_ip
device.ip_info.state = state
device.ip_info.delta_sec = delta_sec
if device and device.host and target_type in ('ACK', 'STATIC'):
device.host.ip_notify(target_ip, state, delta_sec)
self._check_and_activate_gateway(device)
def _get_active_ports(self):
return [p.port_no for p in self._ports.values() if p.active]
def _check_and_activate_gateway(self, device):
# Host ready to be activated and DHCP happened / Static IP
ip_info = device.ip_info
if not ip_info.ip_addr or not device.dhcp_ready:
return
(gateway, ready_devices) = self._should_activate_target(device)
if not ready_devices:
return
if ready_devices is True:
device.host.trigger(ip_info.state, target_ip=ip_info.ip_addr,
delta_sec=ip_info.delta_sec)
else:
self._activate_gateway(ip_info.state, gateway, ready_devices, ip_info.delta_sec)
def _dhcp_ready_listener(self, device):
device.dhcp_ready = True
self._check_and_activate_gateway(device)
def _activate_gateway(self, state, gateway, ready_devices, delta_sec):
gateway.activate()
if len(ready_devices) > 1:
state = 'group'
delta_sec = -1
for device in ready_devices:
LOGGER.info('IP activating target %s', device)
target_ip, delta_sec = device.ip_info.ip_addr, device.ip_info.delta_sec
triggered = device.host.trigger(state, target_ip=target_ip, delta_sec=delta_sec)
assert triggered, 'Device %s not triggered' % device
def _should_activate_target(self, device):
if not device.host:
LOGGER.warning('DHCP targets missing %s', device)
return False, False
gateway, group_name = device.gateway, device.group
if gateway.activated:
LOGGER.info('DHCP activation group %s already activated', group_name)
return gateway, True
if not device.host.notify_activate():
LOGGER.info('DHCP device %s ignoring spurious notify', device)
return gateway, False
ready_devices = gateway.target_ready(device)
group_size = self.network.device_group_size(group_name)
remaining = group_size - len(ready_devices)
if remaining and self.run_tests:
LOGGER.info('DHCP waiting for %d additional members of group %s', remaining, group_name)
return gateway, False
ready_trigger = all(map(lambda host: device.host.trigger_ready(), ready_devices))
if not ready_trigger:
LOGGER.info('DHCP device group %s not ready to trigger', group_name)
return gateway, False
return gateway, ready_devices
def _terminate_gateway_set(self, gateway):
gateway_devices = self._devices.get_by_gateway(gateway)
assert gateway_devices, '%s not found' % gateway
LOGGER.info('Terminating %s', gateway)
for device in gateway_devices:
self.target_set_error(device, DaqException('terminated'))
def _find_gateway_set(self, device):
if not self.gateway_sets:
raise Exception('Could not allocate open gateway set')
if device.port.port_no in self.gateway_sets:
self.gateway_sets.remove(device.port.port_no)
return device.port.port_no
return self.gateway_sets.pop()
@staticmethod
def ping_test(src, dst, src_addr=None):
"""Test ping between hosts"""
dst_name = dst if isinstance(dst, str) else dst.name
dst_ip = dst if isinstance(dst, str) else dst.IP()
from_msg = ' from %s' % src_addr if src_addr else ''
LOGGER.info('Test ping %s->%s%s', src.name, dst_name, from_msg)
failure = "ping FAILED"
assert dst_ip != "0.0.0.0", "IP address not assigned, can't ping"
ping_opt = '-I %s' % src_addr if src_addr else ''
try:
output = src.cmd('ping -c2', ping_opt, dst_ip, '> /dev/null 2>&1 || echo ', failure)
return output.strip() != failure
except Exception as e:
LOGGER.info('Test ping failure: %s', e)
return False
def target_set_error(self, device, exception):
"""Handle an error in the target set"""
running = bool(device.host)
LOGGER.error('Target device %s running %s exception: %s', device, running, exception)
LOGGER.exception(exception)
if running:
device.host.record_result(device.host.test_name, exception=exception)
self.target_set_complete(device, str(exception))
else:
stack = ''.join(
traceback.format_exception(etype=type(exception), value=exception,
tb=exception.__traceback__))
self._target_set_finalize(device,
{'exception': {'exception': str(exception),
'traceback': stack}},
str(exception))
self._send_device_result(device.mac, None)
self._detach_gateway(device)
def target_set_complete(self, device, reason):
"""Handle completion of a target_set"""
self._target_set_finalize(device, device.host.results, reason)
self._target_set_cancel(device)
def _target_set_finalize(self, device, result_set, reason):
results = self._combine_result_set(device, result_set)
LOGGER.info('Target device %s finalize: %s (%s)', device, results, reason)
if self.result_log:
self.result_log.write('%s: %s\n' % (device, results))
self.result_log.flush()
suppress_tests = self.fail_mode or self.result_linger
if results and suppress_tests:
LOGGER.warning('Suppressing further tests due to failure.')
self.run_tests = False
if self.result_linger:
self._linger_exit = 1
self._result_sets[device] = result_set
def _target_set_cancel(self, device):
target_host = device.host
if target_host:
target_gateway = device.gateway
target_port = device.port.port_no
LOGGER.info('Target device %s cancel (#%d/%s).', device.mac, self.run_count,
self.run_limit)
results = self._combine_result_set(device, self._result_sets.get(device))
this_result_linger = results and self.result_linger
target_gateway_linger = target_gateway and target_gateway.result_linger
if target_gateway_linger or this_result_linger:
LOGGER.warning('Target device %s result_linger: %s', device.mac, results)
if target_port:
self._activate_port(target_port)
target_gateway.result_linger = True
else:
if target_port:
self._direct_port_traffic(device.mac, target_port, None)
test_results = target_host.terminate('_target_set_cancel', trigger=False)
self._send_device_result(device.mac, test_results)
if target_gateway:
self._detach_gateway(device)
if self.run_limit and self.run_count >= self.run_limit and self.run_tests:
LOGGER.warning('Suppressing future tests because run limit reached.')
self.run_tests = False
if self.single_shot and self.run_tests:
LOGGER.warning('Suppressing future tests because test done in single shot.')
self._handle_faucet_events() # Process remaining queued faucet events
self.run_tests = False
device.host = None
self._devices.remove(device)
LOGGER.info('Remaining target sets: %s', self._devices.get_triggered_devices())
def _detach_gateway(self, device):
target_gateway = device.gateway
if not target_gateway:
return
if not target_gateway.detach_target(device):
LOGGER.info('Retiring %s. Last device: %s', target_gateway, device)
target_gateway.terminate()
self.gateway_sets.add(target_gateway.port_set)
if device.vlan:
self._direct_device_traffic(device, None)
device.gateway = None
def monitor_stream(self, *args, **kwargs):
"""Monitor a stream"""
return self.stream_monitor.monitor(*args, **kwargs)
def monitor_forget(self, stream):
"""Forget monitoring a stream"""
return self.stream_monitor.forget(stream)
def _combine_results(self):
results = []
for result_set_key in self._result_sets:
result_set = self._result_sets[result_set_key]
results.extend(self._combine_result_set(result_set_key, result_set))
return results
def _combine_result_set(self, set_key, result_sets):
results = []
if not result_sets:
return results
result_set_keys = list(result_sets)
result_set_keys.sort()
for result_set_key in result_set_keys:
result = result_sets[result_set_key]
code_string = result['code'] if 'code' in result else None
code = int(code_string) if code_string else 0
name = result['name'] if 'name' in result else result_set_key
exp_msg = result.get('exception')
status = exp_msg if exp_msg else code if name != 'fail' else not code
if status != 0:
results.append('%s:%s:%s' % (set_key, name, status))
return results
def _send_device_result(self, mac, test_results):
if not self._device_result_client:
return
if test_results is None:
device_result = PortBehavior.failed
else:
device_result = self._calculate_device_result(test_results)
LOGGER.info('Sending device result for device %s: %s', mac, device_result)
self._device_result_client.send_device_result(mac, device_result)
def _calculate_device_result(self, test_results):
for module_result in test_results.get('modules', {}).values():
if report.ResultType.EXCEPTION in module_result:
return PortBehavior.failed
if module_result.get(report.ResultType.RETURN_CODE):
return PortBehavior.failed
for test_result in module_result.get('tests', {}).values():
if test_result.get('result') == 'fail':
return PortBehavior.failed
return PortBehavior.passed
def finalize(self):
"""Finalize this instance, returning error result code"""
self.gcp.release_config(self._RUNNER_CONFIG_PATH)
exception = self.exception
failures = self._combine_results()
if failures:
LOGGER.error('Test failures: %s', failures)
if exception:
LOGGER.error('Exiting b/c of exception: %s', exception)
if failures or exception:
return 1
return 0
def _base_config_changed(self, new_config):
LOGGER.info('Base config changed: %s', new_config)
config_file = os.path.join(self.config.get('site_path'), self._SITE_CONFIG)
self.configurator.write_config(new_config, config_file)
self._base_config = self._load_base_config(register=False)
self._publish_runner_config(self._base_config)
_ = [device.host.reload_config() for device in self._devices.get_triggered_devices()]
def _load_base_config(self, register=True):
base_conf = self.config.get('base_conf')
LOGGER.info('Loading base config from %s', base_conf)
base = self.configurator.load_config(base_conf)
site_path = self.config.get('site_path')
site_config_file = os.path.join(site_path, self._SITE_CONFIG)
LOGGER.info('Loading site config from %s', site_config_file)
site_config = self.configurator.load_config(site_config_file, optional=True)
if register:
self.gcp.register_config(self._RUNNER_CONFIG_PATH, site_config,
self._base_config_changed)
if site_config:
return self.configurator.merge_config(base, site_config_file)
return base
def get_base_config(self):
"""Get the base configuration for this install"""
return copy.deepcopy(self._base_config)
def _publish_runner_config(self, loaded_config):
result = {
'timestamp': gcp.get_timestamp(),
'config': loaded_config
}
self.gcp.publish_message('daq_runner', 'runner_config', result)
```
#### File: daq/test_modules/ipaddr_module.py
```python
from __future__ import absolute_import
import logging
import time
from datetime import datetime, timedelta
import os
import copy
import logger
from .docker_module import DockerModule
from .base_module import HostModule
from proto.system_config_pb2 import DhcpMode
_LOG_FORMAT = "%(asctime)s %(levelname)-7s %(message)s"
LEASE_TIME_UNITS_CONVERTER = {
's': 1,
'm': 60,
'h': 60 ** 2,
'd': 24 * 60 ** 2
}
class IpAddrModule(HostModule):
"""Module for inline ipaddr tests"""
_TIMEOUT_EXCEPTION = TimeoutError('DHCP analysis step timeout expired')
def __init__(self, host, tmpdir, test_name, module_config):
super().__init__(host, tmpdir, test_name, module_config)
self.docker_host = DockerModule(host, tmpdir, test_name, module_config)
self.test_dhcp_ranges = copy.copy(self.test_config.get('dhcp_ranges', []))
self._ip_callback = None
self._lease_time_seconds = self._get_lease_time()
self.tests = [
('dhcp port_toggle test', self._dhcp_port_toggle_test),
('dhcp multi subnet test', self._multi_subnet_test),
('ip change test', self._ip_change_test),
('dhcp change test', self._dhcp_change_test),
('analyze results', self._analyze)
]
self._logger = logger.get_logger('ipaddr_%s' % self.host_name)
log_folder = os.path.join(self.tmpdir, 'nodes', self.host_name, 'tmp')
os.makedirs(log_folder)
log_path = os.path.join(log_folder, 'activate.log')
self._file_handler = logging.FileHandler(log_path)
formatter = logging.Formatter(_LOG_FORMAT)
self._file_handler.setFormatter(formatter)
self._logger.addHandler(self._file_handler)
self._force_terminated = False
self._timeout = None
def start(self, port, params, callback, finish_hook):
"""Start the ip-addr tests"""
super().start(port, params, callback, finish_hook)
assert self.host.device.dhcp_mode != DhcpMode.EXTERNAL, "device DHCP is not enabled."
self._logger.info('Target device %s starting ipaddr test %s', self.device, self.test_name)
# Wait for initial ip before beginning test.
self._ip_callback = self._next_test
def _get_lease_time(self):
lease_time = self.host.config.get("dhcp_lease_time")
if not lease_time or lease_time[-1] not in LEASE_TIME_UNITS_CONVERTER:
return None
return float(lease_time[:-1]) * LEASE_TIME_UNITS_CONVERTER[lease_time[-1]]
def _set_timeout(self):
if not self._lease_time_seconds:
return
self._timeout = datetime.now() + timedelta(seconds=self._lease_time_seconds)
self._logger.info('Setting DHCP timeout at %s' % self._timeout)
def _next_test(self):
try:
self._timeout = None
name, func = self.tests.pop(0)
self._logger.info('Running ' + name)
func()
except Exception as e:
self._logger.error(str(e))
self._finalize(exception=e)
def _dhcp_port_toggle_test(self):
self._set_timeout()
if not self.host.connect_port(False):
self._logger.error('disconnect port not enabled')
return
time.sleep(self.host.config.get("port_debounce_sec", 0) + 1)
self.host.connect_port(True)
self._ip_callback = self._next_test
def _multi_subnet_test(self):
self._set_timeout()
if not self.test_dhcp_ranges:
self._next_test()
return
dhcp_range = self.test_dhcp_ranges.pop(0)
self._logger.info('Testing dhcp range: ' + str(dhcp_range))
args = (dhcp_range["start"], dhcp_range["end"], dhcp_range["prefix_length"])
self.host.gateway.change_dhcp_range(*args)
self._ip_callback = self._multi_subnet_test if self.test_dhcp_ranges else self._next_test
def _ip_change_test(self):
self._set_timeout()
self.host.gateway.request_new_ip(self.host.target_mac)
self._ip_callback = self._next_test
def _dhcp_change_test(self):
self._set_timeout()
if not self.host.connect_port(False):
self._logger.error('disconnect port not enabled')
return
self.host.gateway.request_new_ip(self.host.target_mac)
self.host.connect_port(True)
self._ip_callback = self._next_test
def _analyze(self):
self._set_timeout()
self._ip_callback = None
self.docker_host.start(self.port, self.params,
self._finalize, self._finish_hook)
def _finalize(self, return_code=None, exception=None):
self._logger.info('Module finalizing')
self._ip_callback = None
self._file_handler.close()
if not self._force_terminated:
self.callback(return_code=return_code, exception=exception)
def terminate(self):
"""Terminate this set of tests"""
self._logger.info('Module terminating')
self._force_terminated = True
if self.docker_host.start_time:
self.docker_host.terminate()
self._finalize()
def ip_listener(self, target_ip):
"""Respond to a ip notification event"""
self._logger.info('ip notification %s' % target_ip)
self.host.runner.ping_test(self.host.gateway.host, self.host.target_ip)
if self._ip_callback:
self._ip_callback()
def heartbeat(self):
if self._timeout and datetime.now() >= self._timeout:
if self.docker_host.start_time:
self.terminate()
self.callback(exception=self._TIMEOUT_EXCEPTION)
else:
self._logger.error('DHCP times out after %ds lease time' % self._lease_time_seconds)
self.tests = self.tests[-1:]
self._next_test()
``` |
{
"source": "jhughes/gsutil",
"score": 2
} |
#### File: gslib/commands/rewrite.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
import textwrap
import time
from apitools.base.py import encoding
from boto import config
from gslib.cloud_api import EncryptionException
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.name_expansion import NameExpansionIterator
from gslib.name_expansion import SeekAheadNameExpansionIterator
from gslib.progress_callback import FileProgressCallbackHandler
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.thread_message import FileMessage
from gslib.utils.cloud_api_helper import GetCloudApiInstance
from gslib.utils.constants import NO_MAX
from gslib.utils.constants import UTF8
from gslib.utils.encryption_helper import CryptoKeyType
from gslib.utils.encryption_helper import CryptoKeyWrapperFromKey
from gslib.utils.encryption_helper import GetEncryptionKeyWrapper
from gslib.utils.encryption_helper import MAX_DECRYPTION_KEYS
from gslib.utils.system_util import StdinIterator
from gslib.utils.text_util import ConvertRecursiveToFlatWildcard
from gslib.utils.text_util import NormalizeStorageClass
from gslib.utils import text_util
from gslib.utils.translation_helper import PreconditionsFromHeaders
MAX_PROGRESS_INDICATOR_COLUMNS = 65
_SYNOPSIS = """
gsutil rewrite -k [-f] [-r] url...
gsutil rewrite -k [-f] [-r] -I
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The gsutil rewrite command rewrites cloud objects, applying the specified
transformations to them. The transformation(s) are atomic for each affected
object and applied based on the input transformation flags. Object metadata
values are preserved unless altered by a transformation.
The -k flag is supported to add, rotate, or remove encryption keys on
objects. For example, the command:
gsutil rewrite -k gs://bucket/**
will update all objects in gs://bucket with the current encryption key
from your boto config file, which may either be a base64-encoded CSEK or the
fully-qualified name of a Cloud KMS key.
You can also use the -r option to specify recursive object transform; this is
synonymous with the ** wildcard. Thus, either of the following two commands
will perform encryption key transforms on gs://bucket/subdir and all objects
and subdirectories under it:
gsutil rewrite -k gs://bucket/subdir**
gsutil rewrite -k -r gs://bucket/subdir
The rewrite command acts only on live object versions, so specifying a
URL with a generation number fails. If you want to rewrite a noncurrent
version, first copy it to the live version, then rewrite it, for example:
gsutil cp gs://bucket/object#123 gs://bucket/object
gsutil rewrite -k gs://bucket/object
You can use the -s option to specify a new storage class for objects. For
example, the command:
gsutil rewrite -s nearline gs://bucket/foo
will rewrite the object, changing its storage class to nearline.
If you specify the -k option and you have an encryption key set in your boto
configuration file, the rewrite command will skip objects that are already
encrypted with the specified key. For example, if you run:
gsutil rewrite -k gs://bucket/**
and gs://bucket contains objects encrypted with the key specified in your boto
configuration file, gsutil will skip rewriting those objects and only rewrite
objects that are not encrypted with the specified key. This avoids the cost of
performing redundant rewrite operations.
If you specify the -k option and you do not have an encryption key set in your
boto configuration file, gsutil will always rewrite each object, without
explicitly specifying an encryption key. This results in rewritten objects
being encrypted with either the bucket's default KMS key (if one is set) or
Google-managed encryption (no CSEK or CMEK). Gsutil does not attempt to
determine whether the operation is redundant (and thus skippable) because
gsutil cannot be sure how the object will be encrypted after the rewrite. Note
that if your goal is to encrypt objects with a bucket's default KMS key, you
can avoid redundant rewrite costs by specifying the bucket's default KMS key
in your boto configuration file; this allows gsutil to perform an accurate
comparison of the objects' current and desired encryption configurations and
skip rewrites for objects already encrypted with that key.
If have an encryption key set in your boto configuration file and specify
multiple transformations, gsutil will only skip those that would not change
the object's state. For example, if you run:
gsutil rewrite -s nearline -k gs://bucket/**
and gs://bucket contains objects that already match the encryption
configuration but have a storage class of standard, the only transformation
applied to those objects would be the change in storage class.
You can pass a list of URLs (one per line) to rewrite on stdin instead of as
command line arguments by using the -I option. This allows you to use gsutil
in a pipeline to rewrite objects identified by a program, such as:
some_program | gsutil -m rewrite -k -I
The contents of stdin can name cloud URLs and wildcards of cloud URLs.
The rewrite command requires OWNER permissions on each object to preserve
object ACLs. You can bypass this by using the -O flag, which will cause
gsutil not to read the object's ACL and instead apply the default object ACL
to the rewritten object:
gsutil rewrite -k -O gs://bucket/**
<B>OPTIONS</B>
-f Continues silently (without printing error messages) despite
errors when rewriting multiple objects. If some of the objects
could not be rewritten, gsutil's exit status will be non-zero
even if this flag is set. This option is implicitly set when
running "gsutil -m rewrite ...".
-I Causes gsutil to read the list of objects to rewrite from stdin.
This allows you to run a program that generates the list of
objects to rewrite.
-k Rewrite objects with the current encryption key specified in
your boto configuration file. The value for encryption_key may
be either a base64-encoded CSEK or a fully-qualified KMS key
name. If encryption_key is specified, encrypt all objects with
this key. If encryption_key is unspecified, customer-managed or
customer-supplied encryption keys that were used on the original
objects aren't used for the rewritten objects. Instead,
rewritten objects are encrypted with either the bucket's default
KMS key (if one is set) or Google-managed encryption (no CSEK
or CMEK). See 'gsutil help encryption' for details on encryption
configuration.
-O When a bucket has uniform bucket-level access (UBLA) enabled,
the -O flag is required and will skip all ACL checks. When a
bucket has UBLA disabled, the -O flag rewrites objects with the
bucket's default object ACL instead of the existing object ACL.
This is needed if you do not have OWNER permission on the
object.
-R, -r The -R and -r options are synonymous. Causes bucket or bucket
subdirectory contents to be rewritten recursively.
-s <class> Rewrite objects using the specified storage class.
""")
def _RewriteExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
if not cls.continue_on_error:
cls.logger.error(str(e))
cls.op_failure_count += 1
def _RewriteFuncWrapper(cls, name_expansion_result, thread_state=None):
cls.RewriteFunc(name_expansion_result, thread_state=thread_state)
def GenerationCheckGenerator(url_strs):
"""Generator function that ensures generation-less (live) arguments."""
for url_str in url_strs:
if StorageUrlFromString(url_str).generation is not None:
raise CommandException('"rewrite" called on URL with generation (%s).' %
url_str)
yield url_str
class _TransformTypes(object):
"""Enum class for valid transforms."""
CRYPTO_KEY = 'crypto_key'
STORAGE_CLASS = 'storage_class'
class RewriteCommand(Command):
"""Implementation of gsutil rewrite command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'rewrite',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='fkIrROs:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[CommandArgument.MakeZeroOrMoreCloudURLsArgument()])
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='rewrite',
help_name_aliases=['rekey', 'rotate'],
help_type='command_help',
help_one_line_summary='Rewrite objects',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def CheckProvider(self, url):
if url.scheme != 'gs':
raise CommandException(
'"rewrite" called on URL with unsupported provider: %s' % str(url))
def RunCommand(self):
"""Command entry point for the rewrite command."""
self.continue_on_error = self.parallel_operations
self.csek_hash_to_keywrapper = {}
self.dest_storage_class = None
self.no_preserve_acl = False
self.read_args_from_stdin = False
self.supported_transformation_flags = ['-k', '-s']
self.transform_types = set()
self.op_failure_count = 0
self.boto_file_encryption_keywrapper = GetEncryptionKeyWrapper(config)
self.boto_file_encryption_sha256 = (
self.boto_file_encryption_keywrapper.crypto_key_sha256
if self.boto_file_encryption_keywrapper else None)
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-f':
self.continue_on_error = True
elif o == '-k':
self.transform_types.add(_TransformTypes.CRYPTO_KEY)
elif o == '-I':
self.read_args_from_stdin = True
elif o == '-O':
self.no_preserve_acl = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
self.all_versions = True
elif o == '-s':
self.transform_types.add(_TransformTypes.STORAGE_CLASS)
self.dest_storage_class = NormalizeStorageClass(a)
if self.read_args_from_stdin:
if self.args:
raise CommandException('No arguments allowed with the -I flag.')
url_strs = StdinIterator()
else:
if not self.args:
raise CommandException('The rewrite command (without -I) expects at '
'least one URL.')
url_strs = self.args
if not self.transform_types:
raise CommandException(
'rewrite command requires at least one transformation flag. '
'Currently supported transformation flags: %s' %
self.supported_transformation_flags)
self.preconditions = PreconditionsFromHeaders(self.headers or {})
url_strs_generator = GenerationCheckGenerator(url_strs)
# Convert recursive flag to flat wildcard to avoid performing multiple
# listings.
if self.recursion_requested:
url_strs_generator = ConvertRecursiveToFlatWildcard(url_strs_generator)
# Expand the source argument(s).
name_expansion_iterator = NameExpansionIterator(
self.command_name,
self.debug,
self.logger,
self.gsutil_api,
url_strs_generator,
self.recursion_requested,
project_id=self.project_id,
continue_on_error=self.continue_on_error or self.parallel_operations,
bucket_listing_fields=['name', 'size'])
seek_ahead_iterator = None
# Cannot seek ahead with stdin args, since we can only iterate them
# once without buffering in memory.
if not self.read_args_from_stdin:
# Perform the same recursive-to-flat conversion on original url_strs so
# that it is as true to the original iterator as possible.
seek_ahead_url_strs = ConvertRecursiveToFlatWildcard(url_strs)
seek_ahead_iterator = SeekAheadNameExpansionIterator(
self.command_name,
self.debug,
self.GetSeekAheadGsutilApi(),
seek_ahead_url_strs,
self.recursion_requested,
all_versions=self.all_versions,
project_id=self.project_id)
# Rather than have each worker repeatedly calculate the sha256 hash for each
# decryption_key in the boto config, do this once now and cache the results.
for i in range(0, MAX_DECRYPTION_KEYS):
key_number = i + 1
keywrapper = CryptoKeyWrapperFromKey(
config.get('GSUtil', 'decryption_key%s' % str(key_number), None))
if keywrapper is None:
# Stop at first attribute absence in lexicographical iteration.
break
if keywrapper.crypto_type == CryptoKeyType.CSEK:
self.csek_hash_to_keywrapper[keywrapper.crypto_key_sha256] = keywrapper
# Also include the encryption_key, since it should be used to decrypt and
# then encrypt if the object's CSEK should remain the same.
if self.boto_file_encryption_sha256 is not None:
self.csek_hash_to_keywrapper[self.boto_file_encryption_sha256] = (
self.boto_file_encryption_keywrapper)
if self.boto_file_encryption_keywrapper is None:
msg = '\n'.join(
textwrap.wrap(
'NOTE: No encryption_key was specified in the boto configuration '
'file, so gsutil will not provide an encryption key in its rewrite '
'API requests. This will decrypt the objects unless they are in '
'buckets with a default KMS key set, in which case the service '
'will automatically encrypt the rewritten objects with that key.')
)
print('%s\n' % msg, file=sys.stderr)
# Perform rewrite requests in parallel (-m) mode, if requested.
self.Apply(_RewriteFuncWrapper,
name_expansion_iterator,
_RewriteExceptionHandler,
fail_on_error=(not self.continue_on_error),
shared_attrs=['op_failure_count'],
seek_ahead_iterator=seek_ahead_iterator)
if self.op_failure_count:
plural_str = 's' if self.op_failure_count else ''
raise CommandException('%d file%s/object%s could not be rewritten.' %
(self.op_failure_count, plural_str, plural_str))
return 0
def RewriteFunc(self, name_expansion_result, thread_state=None):
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
transform_url = name_expansion_result.expanded_storage_url
self.CheckProvider(transform_url)
# Get all fields so that we can ensure that the target metadata is
# specified correctly.
src_metadata = gsutil_api.GetObjectMetadata(
transform_url.bucket_name,
transform_url.object_name,
generation=transform_url.generation,
provider=transform_url.scheme)
if self.no_preserve_acl:
# Leave ACL unchanged.
src_metadata.acl = []
elif not src_metadata.acl:
raise CommandException(
'No OWNER permission found for object %s. If your bucket has uniform '
'bucket-level access (UBLA) enabled, include the -O option in your '
'command to avoid this error. If your bucket does not use UBLA, you '
'can use the -O option to apply the bucket\'s default object ACL '
'when rewriting.' % transform_url)
# Note: If other transform types are added, they must ensure that the
# encryption key configuration matches the boto configuration, because
# gsutil maintains an invariant that all objects it writes use the
# encryption_key value (including decrypting if no key is present).
# Store metadata about src encryption to make logic below easier to read.
src_encryption_kms_key = (src_metadata.kmsKeyName
if src_metadata.kmsKeyName else None)
src_encryption_sha256 = None
if (src_metadata.customerEncryption and
src_metadata.customerEncryption.keySha256):
src_encryption_sha256 = src_metadata.customerEncryption.keySha256
# In python3, hashes are bytes, use ascii since it should be ascii
src_encryption_sha256 = src_encryption_sha256.encode('ascii')
src_was_encrypted = (src_encryption_sha256 is not None or
src_encryption_kms_key is not None)
# Also store metadata about dest encryption.
dest_encryption_kms_key = None
if (self.boto_file_encryption_keywrapper is not None and
self.boto_file_encryption_keywrapper.crypto_type == CryptoKeyType.CMEK):
dest_encryption_kms_key = self.boto_file_encryption_keywrapper.crypto_key
dest_encryption_sha256 = None
if (self.boto_file_encryption_keywrapper is not None and
self.boto_file_encryption_keywrapper.crypto_type == CryptoKeyType.CSEK):
dest_encryption_sha256 = (
self.boto_file_encryption_keywrapper.crypto_key_sha256)
should_encrypt_dest = self.boto_file_encryption_keywrapper is not None
encryption_unchanged = (src_encryption_sha256 == dest_encryption_sha256 and
src_encryption_kms_key == dest_encryption_kms_key)
# Prevent accidental key rotation.
if (_TransformTypes.CRYPTO_KEY not in self.transform_types and
not encryption_unchanged):
raise EncryptionException(
'The "-k" flag was not passed to the rewrite command, but the '
'encryption_key value in your boto config file did not match the key '
'used to encrypt the object "%s" (hash: %s). To encrypt the object '
'using a different key, you must specify the "-k" flag.' %
(transform_url, src_encryption_sha256))
# Determine if we can skip this rewrite operation (this should only be done
# when ALL of the specified transformations are redundant).
redundant_transforms = []
# STORAGE_CLASS transform is redundant if the target storage class matches
# the existing storage class.
if (_TransformTypes.STORAGE_CLASS in self.transform_types and
self.dest_storage_class == NormalizeStorageClass(
src_metadata.storageClass)):
redundant_transforms.append('storage class')
# CRYPTO_KEY transform is redundant if we're using the same encryption
# key that was used to encrypt the source. However, if no encryption key was
# specified, we should still perform the rewrite. This results in the
# rewritten object either being encrypted with its bucket's default KMS key
# or having no CSEK/CMEK encryption applied. While we could attempt fetching
# the bucket's metadata and checking its default KMS key before performing
# the rewrite (in the case where we appear to be transitioning from
# no key to no key), that is vulnerable to the race condition where the
# default KMS key is changed between when we check it and when we rewrite
# the object.
if (_TransformTypes.CRYPTO_KEY in self.transform_types and
should_encrypt_dest and encryption_unchanged):
redundant_transforms.append('encryption key')
if len(redundant_transforms) == len(self.transform_types):
self.logger.info('Skipping %s, all transformations were redundant: %s' %
(transform_url, redundant_transforms))
return
# First make a deep copy of the source metadata, then overwrite any
# requested attributes (e.g. if a storage class change was specified).
dest_metadata = encoding.PyValueToMessage(
apitools_messages.Object, encoding.MessageToPyValue(src_metadata))
# Remove some unnecessary/invalid fields.
dest_metadata.generation = None
# Service has problems if we supply an ID, but it is responsible for
# generating one, so it is not necessary to include it here.
dest_metadata.id = None
# Ensure we don't copy over the KMS key name or CSEK key info from the
# source object; those should only come from the boto config's
# encryption_key value.
dest_metadata.customerEncryption = None
dest_metadata.kmsKeyName = None
# Both a storage class change and CMEK encryption should be set as part of
# the dest object's metadata. CSEK encryption, if specified, is added to the
# request later via headers obtained from the keywrapper value passed to
# encryption_tuple.
if _TransformTypes.STORAGE_CLASS in self.transform_types:
dest_metadata.storageClass = self.dest_storage_class
if dest_encryption_kms_key is not None:
dest_metadata.kmsKeyName = dest_encryption_kms_key
# Make sure we have the CSEK key necessary to decrypt.
decryption_keywrapper = None
if src_encryption_sha256 is not None:
if src_encryption_sha256 in self.csek_hash_to_keywrapper:
decryption_keywrapper = (
self.csek_hash_to_keywrapper[src_encryption_sha256])
else:
raise EncryptionException(
'Missing decryption key with SHA256 hash %s. No decryption key '
'matches object %s' % (src_encryption_sha256, transform_url))
operation_name = 'Rewriting'
if _TransformTypes.CRYPTO_KEY in self.transform_types:
if src_was_encrypted and should_encrypt_dest:
if not encryption_unchanged:
operation_name = 'Rotating'
# Else, keep "Rewriting". This might occur when -k was specified and was
# redundant, but we're performing the operation anyway because some
# other transformation was not redundant.
elif src_was_encrypted and not should_encrypt_dest:
operation_name = 'Decrypting'
elif not src_was_encrypted and should_encrypt_dest:
operation_name = 'Encrypting'
# TODO: Remove this call (used to verify tests) and make it processed by
# the UIThread.
sys.stderr.write(
_ConstructAnnounceText(operation_name, transform_url.url_string))
sys.stderr.flush()
# Message indicating beginning of operation.
gsutil_api.status_queue.put(
FileMessage(transform_url,
None,
time.time(),
finished=False,
size=src_metadata.size,
message_type=FileMessage.FILE_REWRITE))
progress_callback = FileProgressCallbackHandler(
gsutil_api.status_queue,
src_url=transform_url,
operation_name=operation_name).call
gsutil_api.CopyObject(src_metadata,
dest_metadata,
src_generation=transform_url.generation,
preconditions=self.preconditions,
progress_callback=progress_callback,
decryption_tuple=decryption_keywrapper,
encryption_tuple=self.boto_file_encryption_keywrapper,
provider=transform_url.scheme,
fields=[])
# Message indicating end of operation.
gsutil_api.status_queue.put(
FileMessage(transform_url,
None,
time.time(),
finished=True,
size=src_metadata.size,
message_type=FileMessage.FILE_REWRITE))
def _ConstructAnnounceText(operation_name, url_string):
"""Constructs announce text for ongoing operations on url_string.
This truncates the text to a maximum of MAX_PROGRESS_INDICATOR_COLUMNS, and
informs the rewrite-related operation ('Encrypting', 'Rotating', or
'Decrypting').
Args:
operation_name: String describing the operation, i.e.
'Rotating' or 'Encrypting'.
url_string: String describing the file/object being processed.
Returns:
Formatted announce text for outputting operation progress.
"""
# Operation name occupies 10 characters (enough for 'Encrypting'), plus a
# space. The rest is used for url_string. If a longer operation name is
# used, it will be truncated. We can revisit this size if we need to support
# a longer operation, but want to make sure the terminal output is meaningful.
justified_op_string = operation_name[:10].ljust(11)
start_len = len(justified_op_string)
end_len = len(': ')
if (start_len + len(url_string) + end_len > MAX_PROGRESS_INDICATOR_COLUMNS):
ellipsis_len = len('...')
url_string = '...%s' % url_string[-(MAX_PROGRESS_INDICATOR_COLUMNS -
start_len - end_len - ellipsis_len):]
base_announce_text = '%s%s:' % (justified_op_string, url_string)
format_str = '{0:%ds}' % MAX_PROGRESS_INDICATOR_COLUMNS
return format_str.format(base_announce_text)
```
#### File: gsutil/gslib/context_config.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import atexit
import os
import subprocess
from boto import config
import gslib
# Maintain a single context configuration.
_singleton_config = None
class CertProvisionError(Exception):
"""Represents errors when provisioning a client certificate."""
pass
class ContextConfigSingletonAlreadyExistsError(Exception):
"""Error for when create_context_config is called multiple times."""
pass
def _IsPemSectionMarker(line):
"""Returns (begin:bool, end:bool, name:str)."""
if line.startswith('-----BEGIN ') and line.endswith('-----'):
return True, False, line[11:-5]
elif line.startswith('-----END ') and line.endswith('-----'):
return False, True, line[9:-5]
else:
return False, False, ''
def _SplitPemIntoSections(contents, logger):
"""Returns dict with {name: section} by parsing contents in PEM format.
A simple parser for PEM file. Please see RFC 7468 for the format of PEM
file. Not using regex to improve performance catching nested matches.
Note: This parser requires the post-encapsulation label of a section to
match its pre-encapsulation label. It ignores a section without a
matching label.
Args:
contents (str): Contents of a PEM file.
logger (logging.logger): gsutil logger.
Returns:
A dict of the PEM file sections.
"""
result = {}
pem_lines = []
pem_section_name = None
for line in contents.splitlines():
line = line.strip()
if not line:
continue
begin, end, name = _IsPemSectionMarker(line)
if begin:
if pem_section_name:
logger.warning('Section %s missing end line and will be ignored.' %
pem_section_name)
if name in result.keys():
logger.warning('Section %s already exists, and the older section will '
'be ignored.' % name)
pem_section_name = name
pem_lines = []
elif end:
if not pem_section_name:
logger.warning(
'Section %s missing a beginning line and will be ignored.' % name)
elif pem_section_name != name:
logger.warning('Section %s missing a matching end line. Found: %s' %
(pem_section_name, name))
pem_section_name = None
if pem_section_name:
pem_lines.append(line)
if end:
result[name] = '\n'.join(pem_lines) + '\n'
pem_section_name = None
if pem_section_name:
logger.warning('Section %s missing an end line.' % pem_section_name)
return result
class _ContextConfig(object):
"""Represents the configurations associated with context aware access.
Only one instance of Config can be created for the program.
"""
def __init__(self, logger):
"""Initializes config.
Args:
logger (logging.logger): gsutil logger.
"""
self.logger = logger
self.use_client_certificate = config.getbool('Credentials',
'use_client_certificate')
self.client_cert_path = None
self.client_cert_password = None
if not self.use_client_certificate:
# Don't spend time generating values gsutil won't need.
return
# Generates certificate and deletes it afterwards.
atexit.register(self._UnprovisionClientCert)
command_string = config.get('Credentials', 'cert_provider_command', None)
if not command_string:
raise CertProvisionError('No cert provider detected.')
self.client_cert_path = os.path.join(gslib.GSUTIL_DIR, 'caa_cert.pem')
try:
# Certs provisioned using endpoint verification are stored as a
# single file holding both the public certificate and the private key.
self._ProvisionClientCert(command_string, self.client_cert_path)
except CertProvisionError as e:
self.logger.error('Failed to provision client certificate: %s' % e)
def _ProvisionClientCert(self, command_string, cert_path):
"""Executes certificate provider to obtain client certificate and keys."""
# Monkey-patch command line args to get password-protected certificate.
# Adds password flag if it's not already there.
password_arg = ' <PASSWORD>pass<PASSWORD>'
if ('--print_certificate' in command_string and
password_arg not in command_string):
command_string += password_arg
try:
command_process = subprocess.Popen(command_string.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
command_stdout, command_stderr = command_process.communicate()
if command_process.returncode != 0:
raise CertProvisionError(command_stderr)
# Python 3 outputs bytes from communicate() by default.
command_stdout_string = command_stdout.decode()
sections = _SplitPemIntoSections(command_stdout_string, self.logger)
with open(cert_path, 'w+') as f:
f.write(sections['CERTIFICATE'])
f.write(sections['ENCRYPTED PRIVATE KEY'])
self.client_cert_password = sections['PASSPHRASE'].splitlines()[1]
except OSError as e:
raise CertProvisionError(e)
except KeyError as e:
raise CertProvisionError(
'Invalid output format from certificate provider, no %s' % e)
def _UnprovisionClientCert(self):
"""Cleans up any files or resources provisioned during config init."""
if self.client_cert_path is not None:
try:
os.remove(self.client_cert_path)
self.logger.debug('Unprovisioned client cert: %s' %
self.client_cert_path)
except OSError as e:
self.logger.error('Failed to remove client certificate: %s' % e)
def create_context_config(logger):
"""Should be run once at gsutil startup. Creates global singleton.
Args:
logger (logging.logger): For logging during config functions.
Returns:
New ContextConfig singleton.
Raises:
Exception if singleton already exists.
"""
global _singleton_config
if not _singleton_config:
_singleton_config = _ContextConfig(logger)
return _singleton_config
raise ContextConfigSingletonAlreadyExistsError
def get_context_config():
"""Retrieves ContextConfig global singleton.
Returns:
ContextConfig or None if global singleton doesn't exist.
"""
global _singleton_config
return _singleton_config
``` |
{
"source": "j-hugo/DLD1",
"score": 3
} |
#### File: j-hugo/DLD1/architecture.py
```python
import torch
import torch.nn as nn
from torchsummary import summary
'''
The unet and resnetunet architectures are adapted from https://github.com/usuyama/pytorch-unet
'''
# convolution blocks for contracting path of unet
def double_conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
# convolution blocks for expansive path of unet
def double_conv_up(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
)
# Unet architecture
class UNet(nn.Module):
"""
A unet architecture implementation
http://lmb.informatik.uni-freiburg.de/Publications/2015/RFB15a
"""
def __init__(self, n_channel, n_class):
super().__init__()
self.dconv_down1 = double_conv(n_channel, 64)
self.dconv_down2 = double_conv(64, 128)
self.dconv_down3 = double_conv(128, 256)
self.dconv_down4 = double_conv(256, 512)
self.dconv_down5 = double_conv(512, 1024)
self.maxpool = nn.MaxPool2d(2)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.dconv_up4 = double_conv_up(512 + 1024, 512)
self.dconv_up3 = double_conv_up(256 + 512, 256)
self.dconv_up2 = double_conv_up(128 + 256, 128)
self.dconv_up1 = double_conv_up(128 + 64, 64)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, x):
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
conv4 = self.dconv_down4(x)
x = self.maxpool(conv4)
x = self.dconv_down5(x)
x = self.upsample(x)
x = torch.cat([x, conv4], dim=1)
x = self.dconv_up4(x)
x = self.upsample(x)
x = torch.cat([x, conv3], dim=1)
x = self.dconv_up3(x)
x = self.upsample(x)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
out = self.conv_last(x)
return out
# convolution blocks for resnetunet
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
# ResNetUnet architecture
class ResNetUNet(nn.Module):
def __init__(self, base_model, n_class):
super().__init__()
self.base_model = base_model
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3])
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5])
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5]
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6]
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7]
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(1, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return out
if __name__ == "__main__":
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda:0") # set device to GPU if available
u_model = UNet(1, 2).to(device)
summary(u_model, input_size=(1, 256, 256))
```
#### File: j-hugo/DLD1/utils.py
```python
import skimage.io as io
import json
import argparse
import numpy as np
import os
def get_subset_stats(json_path):
"""
Calculate the statistics of subset dataset
Args:
json_path: A path to subset json file
"""
with open(json_path) as json_file:
data_index = json.load(json_file)
stats = {}
for subset in ['train','test']:
stats[subset] = {'Cancer': len([k for k,v in data_index.items() if (v['subset'] == subset) and (v['cancer'] == True)]),
'No Cancer': len([k for k,v in data_index.items() if (v['subset'] == subset) and (v['cancer'] == False)])}
print("{:<8} {:<8} {:<10} {:<8}".format('Subset', 'Total', 'Cancerous', 'Non-cancerous'))
for k, v in stats.items():
cancer = v['Cancer']
non_cancer = v['No Cancer']
print("{:<8} {:<8} {:<10} {:<8}".format(k, cancer+non_cancer,cancer, non_cancer))
def metrics_summary(metric_path):
"""
Calculate the statistics of evaluation metrics
Args:
metric_path: A path to metric json file directory
"""
print(
"{:<12} {:<15} {:<5} {:<5} {:<5} {:<5} {:<5} {:<5} {:<5} {:<5}".format('Model', 'Dataset', 'avg_dice', 'c_dice',
'n_dice', 'precision', 'recall',
'overlap', 'FPR', 'FNR'))
for file in os.listdir(metric_path):
file_path = os.path.join(metric_path, file)
with open(file_path) as json_file:
metrics = json.load(json_file)
dataset = metrics['train']['dataset']
if dataset == None:
dataset = "original"
model = metrics['train']['model']
image_size = metrics['train']['image_size']
avg_dice = metrics['test']['average_dice_score']
cancer_dice = metrics['test']['average_cancer_dice_score']
no_cancer_dice = metrics['test']['average_non_cancer_dice_score']
FP = metrics['test']['gt_n_pd_c']
TP = metrics['test']['gt_c_pd_c_overlap'] + metrics['test']['gt_c_pd_c_no_overlap']
FN = metrics['test']['gt_c_pd_no_c']
TN = metrics['test']['gt_n_pd_n']
if TP + FP == 0:
precision = 0
else:
precision = TP / (TP + FP)
recall = TP / (TP + FN)
specificity = TN / (TN + FP)
if TP == 0:
TP_with_overlap = 0
else:
TP_with_overlap = metrics['test']['gt_c_pd_c_overlap'] / TP
false_positive = FP / (FP + TN)
false_negative = FN / (FN + TP)
print("{:<12} {:<15} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}".format(model, dataset,
avg_dice,
cancer_dice,
no_cancer_dice,
precision,
recall,
TP_with_overlap,
false_positive,
false_negative
))
# outline, gray2rgb, overlay_plot are adapted from: https://github.com/mateuszbuda/brain-segmentation-pytorch/blob/master/utils.py
def outline(image, mask, color):
"""
Give a color to the outline of the mask
Args:
image: an image
mask: a label
color: a RGB color for outline
Return:
image: the image which is drawn outline
"""
mask = np.round(mask)
yy, xx = np.nonzero(mask)
for y, x in zip(yy, xx):
if 0.0 < np.mean(mask[max(0, y - 1) : y + 2, max(0, x - 1) : x + 2]) < 1.0:
image[max(0, y) : y + 1, max(0, x) : x + 1] = color
return image
def gray2rgb(image):
"""
Change a one channel image to a RGB image
Args:
image: an image which has one channel
Return:
image: the converted image which has a RGB channel
"""
w, h = image.shape
image += np.abs(np.min(image))
image_max = np.abs(np.max(image))
if image_max > 0:
image /= image_max
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = image * 255
return ret
def overlay_plot(img, y_true, y_pred, index, args, save_plot=False):
"""
Change a one channel image to a RGB image
Args:
img: an image which has one channel
y_pred: the predicted label from the model
y_true: the ground truth label
index: the index number
args: arguemtns from the parser
save_plot: if True, it saves plots
Return:
image: the overlay image
"""
image = gray2rgb(img[0])
image = outline(image, y_true[0], color=[255, 0, 0])
image = outline(image, y_pred[0], color=[0, 255, 0])
if save_plot == True:
filename = "img-{}.png".format(index)
filepath = os.path.join(args.plot_path, filename)
io.imsave(filepath, image)
return image
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Utility funcitons for statistics on the dataset or analysis of metrics"
)
parser.add_argument(
"--method",
type=str,
default=None,
help="util function to be executed",
)
parser.add_argument(
"--jsonfile", type=str, default="./data/data_index_subsets.json",
help="root folder with json with assigned subsets"
)
parser.add_argument(
"--metric_path", type=str, default="./save/metrics",
help="root folder with json with assigned subsets"
)
parser.add_argument(
"--plot-path", type=str, default="./save/plots",
help="root folder to save plots"
)
args = parser.parse_args()
if args.method == 'subset_stats':
get_subset_stats(args.jsonfile)
elif args.method == 'metrics_summary':
metrics_summary(args.metric_path)
``` |
{
"source": "jhuguetn/nisnap",
"score": 3
} |
#### File: nisnap/utils/slices.py
```python
from collections.abc import Iterable
import numpy as np
def _chunks_(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def __get_lambdas__(data):
if len(data.shape) == 4: # RGB mode (4D volume)
lambdas = {'x': lambda x: data[:, :, x, :],
'y': lambda x: data[:, x, :, :],
'z': lambda x: data[x, :, :, :]}
else: # standard 3D label volume
lambdas = {'x': lambda x: data[:, :, x],
'y': lambda x: data[:, x, :],
'z': lambda x: data[x, :, :]}
return lambdas
def __get_abs_minmax(data, axis, slices, margin=5):
bb = {}
lambdas = __get_lambdas__(data)
for a, chunk in enumerate(slices):
bb[a] = []
for i, slice_index in enumerate(chunk):
test = np.flip(np.swapaxes(np.abs(lambdas[axis](int(slice_index))),
0, 1), 0)
xs, ys = np.where(test != 0)
bb[a].append((xs, ys))
min_xs, max_xs = 9999, 0
min_ys, max_ys = 9999, 0
for a, bba in bb.items():
for xs, ys in bba:
min_xs = min(min_xs, min(xs))
max_xs = max(max_xs, max(xs))
min_ys = min(min_ys, min(ys))
max_ys = max(max_ys, max(ys))
# Create mock bounding-box
res = {}
for a, bba in bb.items():
res[a] = []
for each in bba:
i = [max(int(min_xs - margin), 0), int(max_xs + margin)],\
[max(int(min_ys - margin), 0), int(max_ys + margin)]
res[a].append(i)
return res
def __maxsize__(data):
d = []
lambdas = __get_lambdas__(data)
maxsize = 0
slice_index = 0
for slice_index in range(0, data.shape[2]):
test = np.flip(np.swapaxes(np.abs(lambdas['x'](int(slice_index))),
0, 1), 0)
if len(data.shape) == 4:
black_pixels_mask = np.all(test == [0, 0, 0], axis=-1)
else:
black_pixels_mask = np.all(test == 0, axis=-1)
size = len(test) - len(black_pixels_mask[black_pixels_mask])
maxsize = max(size, maxsize)
d.append((slice_index, size))
return maxsize
def remove_empty_slices(data, slices, threshold=0):
lambdas = __get_lambdas__(data)
new_slices = {}
for axis, slices in slices.items():
new_slices[axis] = []
for slice_index in slices:
test = np.flip(np.swapaxes(np.abs(lambdas[axis](int(slice_index))), 0, 1), 0)
if len(data.shape) == 4:
black_pixels_mask = np.all(test == [0, 0, 0], axis=-1)
else:
black_pixels_mask = np.all(test == 0, axis=-1)
size = len(test) - len(black_pixels_mask[black_pixels_mask])
if size > threshold:
new_slices[axis].append(slice_index)
else:
import logging as log
log.info('Removing empty slice %s-%s' % (axis, slice_index))
return new_slices
def _fix_rowsize_(axes, rowsize=None):
default_rowsize = {'x': 9, 'y': 9, 'z': 6}
if rowsize is None:
rs = {e: default_rowsize[e] for e in axes}
elif isinstance(rowsize, int):
rs = {e: {'x': rowsize, 'y': rowsize, 'z': rowsize}[e] for e in axes}
elif isinstance(rowsize, dict):
from nisnap.utils.parse import __check_axes__
rs = {__check_axes__(e)[0]: rowsize[e] for e in axes}
else:
raise TypeError('rowsize should be an int or a dict')
return rs
def _fix_figsize_(axes, figsize=None):
default_figsize = {'x': (10, 5), 'y': (10, 5), 'z': (10, 5)}
if figsize is None:
fs = {e: default_figsize[e] for e in axes}
elif isinstance(figsize, (list, tuple)) and len(figsize) == 2:
fs = {each: figsize for each in axes}
elif isinstance(figsize, dict):
from nisnap.utils.parse import __check_axes__
fs = {__check_axes__(e)[0]: figsize[e] for e in axes}
else:
raise TypeError('figsize should be a tuple/list of size 2 or a dict')
return fs
def cut_slices(data, axes, rowsize, slices=None, step=3, threshold=75):
default_slices = {'x': list(range(0, data.shape[2], step)),
'y': list(range(0, data.shape[1], step)),
'z': list(range(0, data.shape[0], step))}
if slices is not None:
if isinstance(slices, dict):
sl = {e: list(slices[e]) for e in axes}
elif isinstance(slices, Iterable):
sl = {e: slices for e in axes}
else:
sl = {e: default_slices[e] for e in axes}
sl = remove_empty_slices(data, sl, threshold=threshold)
sl = remove_empty_slices(data, sl)
sl = {each: list(_chunks_(sl[each], rowsize[each])) for each in axes}
return sl
``` |
{
"source": "jhuiac/cocktail-party-Visually-derived-Speech-",
"score": 3
} |
#### File: jhuiac/cocktail-party-Visually-derived-Speech-/speech_enhancer.py
```python
import argparse
import os
import shutil
import random
from datetime import datetime
import numpy as np
from mediaio.audio_io import AudioSignal, AudioMixer
from mediaio.dsp.spectrogram import MelConverter
from dataset import AudioVisualDataset
def enhance_speech(speaker_file_path, noise_file_path, speech_prediction_path, speech_profile):
print("enhancing mix of %s, %s" % (speaker_file_path, noise_file_path))
speaker_source_signal = AudioSignal.from_wav_file(speaker_file_path)
noise_source_signal = AudioSignal.from_wav_file(noise_file_path)
while noise_source_signal.get_number_of_samples() < speaker_source_signal.get_number_of_samples():
noise_source_signal = AudioSignal.concat([noise_source_signal, noise_source_signal])
noise_source_signal = noise_source_signal.slice(0, speaker_source_signal.get_number_of_samples())
mixed_signal = AudioMixer.mix([speaker_source_signal, noise_source_signal])
predicted_speech_signal = AudioSignal.from_wav_file(speech_prediction_path)
signals = [mixed_signal, predicted_speech_signal]
max_length = max([signal.get_number_of_samples() for signal in signals])
for signal in signals:
signal.pad_with_zeros(max_length)
mel_converter = MelConverter(mixed_signal.get_sample_rate(), n_mel_freqs=128, freq_min_hz=0, freq_max_hz=4000)
mixed_spectrogram, original_phase = mel_converter.signal_to_mel_spectrogram(mixed_signal, get_phase=True)
predicted_speech_spectrogram = mel_converter.signal_to_mel_spectrogram(predicted_speech_signal)
speech_enhancement_mask = np.zeros(shape=mixed_spectrogram.shape)
thresholds = np.zeros(shape=(speech_enhancement_mask.shape[0]))
for f in range(speech_enhancement_mask.shape[0]):
thresholds[f] = np.percentile(speech_profile[f, :], 85)
for f in range(speech_enhancement_mask.shape[0]):
for t in range(speech_enhancement_mask.shape[1]):
if predicted_speech_spectrogram[f, t] > thresholds[f]:
speech_enhancement_mask[f, t] = 1
continue
enhanced_speech_spectrogram = mixed_spectrogram * speech_enhancement_mask
enhanced_speech_signal = mel_converter.reconstruct_signal_from_mel_spectrogram(enhanced_speech_spectrogram, original_phase)
return mixed_signal, enhanced_speech_signal
def build_speech_profile(speaker_speech_dir, max_files=50):
print("building speech profile...")
speech_file_paths = [os.path.join(speaker_speech_dir, f) for f in os.listdir(speaker_speech_dir)][:max_files]
speech_signals = [AudioSignal.from_wav_file(f) for f in speech_file_paths]
mel_converter = MelConverter(speech_signals[0].get_sample_rate(), n_mel_freqs=128, freq_min_hz=0, freq_max_hz=4000)
speech_spectrograms = [mel_converter.signal_to_mel_spectrogram(signal) for signal in speech_signals]
speech_profile = np.concatenate(speech_spectrograms, axis=1)
return speech_profile
def apply_speech_enhancement(dataset_dir, speaker_id, noise_dir, prediction_input_dir, enhancement_output_dir):
enhancement_output_dir = os.path.join(enhancement_output_dir, '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.now()))
os.mkdir(enhancement_output_dir)
speech_profile = build_speech_profile(os.path.join(prediction_input_dir, speaker_id))
for speaker_file_path, noise_file_path in list_source_pairs(dataset_dir, speaker_id, noise_dir):
try:
speaker_file_name = os.path.splitext(os.path.basename(speaker_file_path))[0]
noise_file_name = os.path.splitext(os.path.basename(noise_file_path))[0]
speech_enhancement_dir_path = os.path.join(enhancement_output_dir, speaker_file_name + "_" + noise_file_name)
os.mkdir(speech_enhancement_dir_path)
speech_prediction_path = os.path.join(prediction_input_dir, speaker_id, speaker_file_name + ".wav")
mixed_signal, enhanced_speech_signal = enhance_speech(
speaker_file_path, noise_file_path, speech_prediction_path, speech_profile
)
shutil.copy(speaker_file_path, os.path.join(speech_enhancement_dir_path, "source.wav"))
enhanced_speech_signal.save_to_wav_file(os.path.join(speech_enhancement_dir_path, "enhanced.wav"))
mixed_signal.save_to_wav_file(os.path.join(speech_enhancement_dir_path, "mixture.wav"))
except Exception as e:
print("failed to enhance (%s). skipping" % e)
def list_source_pairs(dataset_dir, speaker_id, noise_dir):
dataset = AudioVisualDataset(dataset_dir)
speaker_file_paths = dataset.subset([speaker_id], max_files=20, shuffle=True).audio_paths()
noise_file_paths = [os.path.join(noise_dir, f) for f in os.listdir(noise_dir)]
random.shuffle(speaker_file_paths)
random.shuffle(noise_file_paths)
return zip(speaker_file_paths, noise_file_paths)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dataset_dir", type=str)
parser.add_argument("speaker", type=str)
parser.add_argument("noise_dir", type=str)
parser.add_argument("prediction_input_dir", type=str)
parser.add_argument("enhancement_output_dir", type=str)
args = parser.parse_args()
apply_speech_enhancement(
args.dataset_dir, args.speaker, args.noise_dir, args.prediction_input_dir, args.enhancement_output_dir
)
if __name__ == "__main__":
main()
```
#### File: cocktail-party-Visually-derived-Speech-/video2speech_vggface/data_processor.py
```python
import math
import multiprocessing
import numpy as np
import cv2
from keras_vggface.vggface import VGGFace
from facedetection.face_detection import FaceDetector
from mediaio.audio_io import AudioSignal
from mediaio.video_io import VideoFileReader
from mediaio.dsp.spectrogram import MelConverter
vgg_model = VGGFace(weights="vggface", include_top=False, pooling="avg")
def preprocess_video_sample(video_file_path, slice_duration_ms=330):
print("preprocessing %s" % video_file_path)
face_detector = FaceDetector()
with VideoFileReader(video_file_path) as reader:
features = np.zeros(shape=(reader.get_frame_count(), 512), dtype=np.float32)
for i in range(reader.get_frame_count()):
frame = reader.read_next_frame()
face = face_detector.crop_face(frame)
face = cv2.resize(face, (224, 224))
x = np.expand_dims(face, axis=0)
x = x.astype(np.float32)
x[:, :, :, 0] -= 93.5940
x[:, :, :, 1] -= 104.7624
x[:, :, :, 2] -= 129.1863
features[i, :] = vgg_model.predict(x)
frames_per_slice = int(math.ceil((float(slice_duration_ms) / 1000) * reader.get_frame_rate()))
n_slices = int(float(reader.get_frame_count()) / frames_per_slice)
slices = [
features[(i * frames_per_slice):((i + 1) * frames_per_slice)]
for i in range(n_slices)
]
return np.stack(slices)
def try_preprocess_video_sample(video_file_path):
try:
return preprocess_video_sample(video_file_path)
except Exception as e:
print("failed to preprocess %s (%s)" % (video_file_path, e))
return None
def preprocess_audio_sample(audio_file_path, slice_duration_ms=330):
print("preprocessing %s" % audio_file_path)
audio_signal = AudioSignal.from_wav_file(audio_file_path)
mel_converter = MelConverter(audio_signal.get_sample_rate(), n_mel_freqs=128, freq_min_hz=0, freq_max_hz=4000)
new_signal_length = int(math.ceil(
float(audio_signal.get_number_of_samples()) / mel_converter.get_hop_length()
)) * mel_converter.get_hop_length()
audio_signal.pad_with_zeros(new_signal_length)
mel_spectrogram = mel_converter.signal_to_mel_spectrogram(audio_signal)
samples_per_slice = int((float(slice_duration_ms) / 1000) * audio_signal.get_sample_rate())
spectrogram_samples_per_slice = int(samples_per_slice / mel_converter.get_hop_length())
n_slices = int(mel_spectrogram.shape[1] / spectrogram_samples_per_slice)
slices = [
mel_spectrogram[:, (i * spectrogram_samples_per_slice):((i + 1) * spectrogram_samples_per_slice)].flatten()
for i in range(n_slices)
]
return np.stack(slices)
def reconstruct_audio_signal(audio_sample, sample_rate):
mel_converter = MelConverter(sample_rate, n_mel_freqs=128, freq_min_hz=0, freq_max_hz=4000)
slice_mel_spectrograms = [audio_sample[i, :].reshape((mel_converter.get_n_mel_freqs(), -1)) for i in range(audio_sample.shape[0])]
full_mel_spectrogram = np.concatenate(slice_mel_spectrograms, axis=1)
return mel_converter.reconstruct_signal_from_mel_spectrogram(full_mel_spectrogram)
def preprocess_data(data):
print("reading dataset...")
thread_pool = multiprocessing.Pool(8)
video_samples = map(try_preprocess_video_sample, data.video_paths())
audio_samples = thread_pool.map(preprocess_audio_sample, data.audio_paths())
invalid_sample_ids = [i for i, sample in enumerate(video_samples) if sample is None]
video_samples = [sample for i, sample in enumerate(video_samples) if i not in invalid_sample_ids]
audio_samples = [sample for i, sample in enumerate(audio_samples) if i not in invalid_sample_ids]
return np.concatenate(video_samples), np.concatenate(audio_samples)
def normalize(video_samples, normalization_cache):
return
def apply_normalization(video_samples, normalization_cache):
return
``` |
{
"source": "Jhuighuy/fortiel",
"score": 2
} |
#### File: fortiel/src/fortiel.py
```python
import re
import argparse
import sys
from os import path
from abc import ABC
from dataclasses import dataclass, field
from keyword import iskeyword as is_reserved
from typing import (
cast, final,
Iterable, List, Set, Dict, Tuple, Any, Union,
Final, Optional, Callable, Literal, Pattern, Match)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Helper Routines =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _make_name(name: str) -> str:
"""Compile a single-word lower case identifier."""
return re.sub(r'\s+', '', name).lower()
def _compile_re(pattern: str, dotall: bool = False) -> Pattern[str]:
"""Compile regular expression."""
flags = re.IGNORECASE | re.MULTILINE | re.VERBOSE
if dotall:
flags |= re.DOTALL
return re.compile(pattern, flags)
def _find_duplicate(strings: Iterable[str]) -> Optional[str]:
"""Find first duplicate in the list."""
strings_set: Set[str] = set()
for string in strings:
if string in strings_set:
return string
strings_set.add(string)
return None
def _find_file(file_path: str, dir_paths: List[str]) -> Optional[str]:
"""Find file in the directory list."""
file_path = path.expanduser(file_path)
if path.exists(file_path):
return path.abspath(file_path)
for dir_path in dir_paths:
rel_file_path = path.expanduser(path.join(dir_path, file_path))
if path.exists(rel_file_path):
return path.abspath(rel_file_path)
here = path.abspath(path.dirname(__file__))
rel_file_path = path.join(here, file_path)
if path.exists(rel_file_path):
return rel_file_path
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Exceptions and Messages =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
class FortielError(Exception):
"""Fortiel compilation/execution error."""
def __init__(self, message: str, file_path: str, line_number: int) -> None:
super().__init__()
self.message: str = message
self.file_path: str = file_path
self.line_number: int = line_number
def __str__(self) -> str:
# Format matched GFortran error messages.
return f'{self.file_path}:{self.line_number}:1:\n\nFatal Error: {self.message}'
@final
class FortielSyntaxError(FortielError):
"""Fortiel syntax error."""
def __init__(self, message: str, file_path: str, line_number: int) -> None:
super().__init__(
f'Fortiel syntax error: {message}', file_path, line_number)
@final
class FortielRuntimeError(FortielError):
"""Fortiel runtime error."""
def __init__(self, message: str, file_path: str, line_number: int) -> None:
super().__init__(
f'Fortiel runtime error: {message}', file_path, line_number)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Options =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
@final
class FortielOptions:
"""Preprocessor options."""
# TODO: refactor as data class.
def __init__(self) -> None:
self.defines: List[str] = []
self.include_paths: List[str] = []
self.line_marker_format: Literal['fpp', 'cpp', 'none'] = 'fpp'
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Scanner and Directives Parser =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
@dataclass
class FortielNode(ABC):
"""Fortiel syntax tree node."""
file_path: str
line_number: int
@final
@dataclass
class FortielTree:
"""Fortiel syntax tree."""
file_path: str
root_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielLineListNode(FortielNode):
"""The list of code lines syntax tree node."""
lines: List[str] = field(default_factory=list)
@final
@dataclass
class FortielUseNode(FortielNode):
"""The USE directive syntax tree node."""
imported_file_path: str
@final
@dataclass
class FortielLetNode(FortielNode):
"""The LET directive syntax tree node."""
name: str
arguments: Union[str, List[str], None]
value_expression: str
@final
@dataclass
class FortielDelNode(FortielNode):
"""The DEL directive syntax tree node."""
names: Union[str, List[str]]
@final
@dataclass
class FortielElifNode(FortielNode):
"""The ELSE IF directive syntax tree node."""
condition_expression: str
then_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielIfNode(FortielNode):
"""The IF/ELSE IF/ELSE/END IF directive syntax tree node."""
condition_expression: str
then_nodes: List[FortielNode] = field(default_factory=list)
elif_nodes: List[FortielElifNode] = field(default_factory=list)
else_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielDoNode(FortielNode):
"""The DO/END DO directive syntax tree node."""
index_name: str
ranges_expression: str
loop_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielForNode(FortielNode):
"""The FOR/END FOR directive syntax tree node."""
index_names: Union[str, List[str], None]
iterable_expression: str
loop_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielCallSegmentNode(FortielNode):
"""The call segment syntax tree node."""
spaces_before: str
name: str
argument: str
@final
@dataclass
class FortielPatternNode(FortielNode):
"""The PATTERN directive syntax tree node."""
pattern: Union[str, Pattern[str]]
match_nodes: List[FortielNode] = field(default_factory=list)
@final
@dataclass
class FortielSectionNode(FortielNode):
"""The SECTION directive syntax tree node."""
name: str
once: bool
pattern_nodes: List[FortielPatternNode] = field(default_factory=list)
@final
@dataclass
class FortielMacroNode(FortielNode):
"""The MACRO/END MACRO directive syntax tree node."""
name: str
pattern_nodes: List[FortielPatternNode] = field(default_factory=list)
section_nodes: List[FortielSectionNode] = field(default_factory=list)
finally_nodes: List[FortielNode] = field(default_factory=list)
@property
def is_construct(self) -> bool:
"""Is current macro a construct?"""
return len(self.section_nodes) > 0 or len(self.finally_nodes) > 0
@property
def section_names(self) -> List[str]:
"""List of the section names."""
return [node.name for node in self.section_nodes]
@final
class FortielCallNode(FortielNode):
"""The call directive syntax tree node."""
# TODO: refactor as data class.
def __init__(self, node: FortielCallSegmentNode) -> None:
super().__init__(node.file_path, node.line_number)
self.spaces_before: str = node.spaces_before
self.name: str = node.name
self.argument: str = node.argument
self.captured_nodes: List[FortielNode] = []
self.call_section_nodes: List[FortielCallSectionNode] = []
@final
class FortielCallSectionNode(FortielNode):
"""The call directive section syntax tree node."""
# TODO: refactor as data class.
def __init__(self, node: FortielCallSegmentNode) -> None:
super().__init__(node.file_path, node.line_number)
self.name: str = node.name
self.argument: str = node.argument
self.captured_nodes: List[FortielNode] = []
_FORTIEL_DIRECTIVE: Final = _compile_re(r'^\s*\#[@$]\s*(?P<directive>.*)?$')
_FORTIEL_USE: Final = _compile_re(
r'^USE\s+(?P<path>(?:\"[^\"]+\") | (?:\'[^\']+\') | (?:\<[^\>]+\>))$')
_FORTIEL_LET: Final = _compile_re(r'''
^LET\s+(?P<name>[A-Z_]\w*)\s*
(?: \(\s* (?P<arguments>
(?:\*\s*){0,2}[A-Z_]\w*
(?:\s*,\s*(?:\*\s*){0,2}[A-Z_]\w* )* ) \s*\) )?
\s*=\s*(?P<value_expression>.*)$
''')
_FORTIEL_DEFINE: Final = _compile_re(r'^DEFINE\s+(?P<name>[A-Z_]\w*)(?P<segment>.*)$')
_FORTIEL_DEL: Final = _compile_re(r'^DEL\s+(?P<names>[A-Z_]\w*(?:\s*,\s*[A-Z_]\w*)*)$')
_FORTIEL_IF: Final = _compile_re(r'^IF\s*(?P<condition_expression>.+)$')
_FORTIEL_ELIF: Final = _compile_re(r'^ELSE\s*IF\s*(?P<condition_expression>.+)$')
_FORTIEL_ELSE: Final = _compile_re(r'^ELSE$')
_FORTIEL_END_IF: Final = _compile_re(r'^END\s*IF$')
_FORTIEL_IFDEF: Final = _compile_re(r'^IFDEF\s+(?P<name>[A-Z_]\w*)$')
_FORTIEL_IFNDEF: Final = _compile_re(r'^IFNDEF\s+(?P<name>[A-Z_]\w*)$')
_FORTIEL_DO: Final = _compile_re(
r'^DO\s+(?P<index_name>[A-Z_]\w*)\s*=\s*(?P<ranges_expression>.*)$')
_FORTIEL_END_DO: Final = _compile_re(r'^END\s*DO$')
_FORTIEL_FOR: Final = _compile_re(
r'^FOR\s+(?P<index_names>[A-Z_]\w*(?:\s*,\s*[A-Z_]\w*)*)\s*IN\s*(?P<iterable_expression>.*)$')
_FORTIEL_END_FOR: Final = _compile_re(r'^END\s*FOR$')
_FORTIEL_CALL: Final = _compile_re(
r'^(?P<spaces>\s*)\@(?P<name>(?:END\s*|ELSE\s*)?[A-Z]\w*)\b(?P<argument>[^!]*)(\s*!.*)?$')
_FORTIEL_MACRO: Final = _compile_re(r'^MACRO\s+(?P<name>[A-Z]\w*)(\s+(?P<pattern>.*))?$')
_FORTIEL_PATTERN: Final = _compile_re(r'^PATTERN\s+(?P<pattern>.*)$')
_FORTIEL_SECTION: Final = _compile_re(
r'^SECTION\s+(?P<once>ONCE\s+)?(?P<name>[A-Z]\w*)(?:\s+(?P<pattern>.*))?$')
_FORTIEL_FINALLY: Final = _compile_re(r'^FINALLY$')
_FORTIEL_END_MACRO: Final = _compile_re(r'^END\s*MACRO$')
_BUILTIN_HEADERS = {'.f90': 'tiel/syntax.fd'}
class FortielParser:
"""Fortiel syntax tree parser."""
def __init__(self, file_path: str, lines: List[str]) -> None:
self._file_path: str = file_path
self._lines: List[str] = lines
self._line: str = self._lines[0]
self._multiline: str = self._line
self._line_index: int = 0
self._line_number: int = 1
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _matches_end(self) -> bool:
return self._line_index >= len(self._lines)
def _advance_line(self) -> None:
"""Advance to the next line, parsing the line continuations."""
self._line_index += 1
self._line_number += 1
if self._matches_end():
self._line = self._multiline = ''
else:
self._line = self._multiline = self._lines[self._line_index].rstrip()
# Parse line continuations.
while self._line.endswith('&'):
self._line_index += 1
self._line_number += 1
if self._matches_end():
message = 'unexpected end of file in continuation lines'
raise FortielSyntaxError(message, self._file_path, self._line_number)
# Update merged line.
next_line = self._lines[self._line_index].rstrip()
self._multiline += '\n' + next_line
# Update line.
next_line = next_line.lstrip()
if next_line.startswith('&'):
next_line = next_line.removeprefix('&').lstrip()
self._line = self._line.removesuffix('&').rstrip() + ' ' + next_line
def _matches_line(self, *patterns: Pattern[str]) -> Optional[Match[str]]:
if self._matches_end():
message = 'unexpected end of file'
raise FortielSyntaxError(message, self._file_path, self._line_number)
for pattern in patterns:
match = pattern.match(self._line)
if match is not None:
return match
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def parse(self) -> FortielTree:
"""Parse the source lines."""
tree = FortielTree(self._file_path)
# Add builtin headers based on file extension.
_, file_ext = path.splitext(self._file_path)
builtins_path = _BUILTIN_HEADERS.get(file_ext.lower())
if builtins_path is not None:
use_builtins_node = FortielUseNode(self._file_path, 0, builtins_path)
tree.root_nodes.append(use_builtins_node)
# Parse file contents.
while not self._matches_end():
tree.root_nodes.append(self._parse_statement())
return tree
def _parse_statement(self) -> FortielNode:
"""Parse a directive or a line list."""
if self._matches_line(_FORTIEL_DIRECTIVE):
return self._parse_directive()
if self._matches_line(_FORTIEL_CALL):
return self._parse_call_segment()
return self._parse_line_list()
def _parse_line_list(self) -> FortielLineListNode:
"""Parse a line list."""
node = FortielLineListNode(self._file_path, self._line_number)
while True:
node.lines.append(self._multiline)
self._advance_line()
if self._matches_end() or self._matches_line(_FORTIEL_DIRECTIVE, _FORTIEL_CALL):
break
return node
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _parse_directive(self) -> FortielNode:
"""Parse a directive."""
# Parse directive head and proceed to the specific parse function.
directive = self._matches_line(_FORTIEL_DIRECTIVE)['directive']
head = type(self)._parse_head(directive)
if head is None:
message = 'empty directive'
raise FortielSyntaxError(message, self._file_path, self._line_number)
if (func := {'use': self._parse_use_directive,
'let': self._parse_let_directive,
'define': self._parse_define_directive,
'del': self._parse_del_directive,
'if': self._parse_if_directive,
'ifdef': self._parse_ifdef_directive,
'ifndef': self._parse_ifndef_directive,
'do': self._parse_do_directive,
'for': self._parse_for_directive,
'macro': self._parse_macro_directive}.get(head)) is not None:
return func()
# Determine the error type:
# either the known directive is misplaced, either the directive is unknown.
if head in map(_make_name, {'else', 'else if', 'end if', 'end do',
'section', 'finally', 'pattern', 'end macro'}):
message = f'misplaced directive <{head}>'
raise FortielSyntaxError(message, self._file_path, self._line_number)
message = f'unknown or mistyped directive <{head}>'
raise FortielSyntaxError(message, self._file_path, self._line_number)
@staticmethod
def _parse_head(directive: Optional[str]) -> Optional[str]:
# Empty directives does not have a head.
if directive is None or directive == '':
return None
# ELSE is merged with IF, END is merged with any following word.
head_words = directive.split(' ', 2)
head = head_words[0].lower()
if len(head_words) > 1:
second_word = head_words[1].lower()
if head == 'end' or (head == 'else' and second_word == 'if'):
head += second_word
return head
def _matches_directive(self, *expected_heads: str) -> Optional[str]:
match = self._matches_line(_FORTIEL_DIRECTIVE)
if match is not None:
directive = match['directive'].lower()
head = type(self)._parse_head(directive)
if head in map(_make_name, expected_heads):
return head
return None
def _match_directive_syntax(
self, pattern: Pattern[str], *groups: str) -> Union[str, Tuple[str, ...]]:
directive = self._matches_line(_FORTIEL_DIRECTIVE)['directive'].rstrip()
if (match := pattern.match(directive)) is None:
head = type(self)._parse_head(directive)
message = f'invalid <{head}> directive syntax'
raise FortielSyntaxError(message, self._file_path, self._line_number)
self._advance_line()
return match.group(*groups)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _parse_use_directive(self) -> FortielUseNode:
"""Parse USE directive."""
node = FortielUseNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_USE, 'path'))
# Remove quotes.
node.imported_file_path = node.imported_file_path[1:-1]
return node
def _parse_let_directive(self) -> FortielLetNode:
"""Parse LET directive."""
# Note that we are not evaluating or
# validating define arguments and body here.
node = FortielLetNode(
self._file_path, self._line_number,
*self._match_directive_syntax(_FORTIEL_LET, 'name', 'arguments', 'value_expression'))
if is_reserved(node.name):
message = f'name `{node.name}` is a reserved word'
raise FortielSyntaxError(message, node.file_path, node.line_number)
# Split and verify arguments.
if node.arguments is not None:
node.arguments = list(map(
(lambda arg: re.sub(r'\s', '', arg)), node.arguments.split(',')))
naked_arguments = map((lambda arg: arg.replace('*', '')), node.arguments)
if (dup := _find_duplicate(naked_arguments)) is not None:
message = f'duplicate argument `{dup}` of the functional <let>'
raise FortielSyntaxError(message, node.file_path, node.line_number)
if len(bad_arguments := list(filter(is_reserved, naked_arguments))) != 0:
message = f'<let> arguments `{"`, `".join(bad_arguments)}` are reserved words'
raise FortielSyntaxError(message, node.file_path, node.line_number)
return node
def _parse_define_directive(self) -> FortielLetNode:
"""Parse DEFINE directive."""
# Note that we are not evaluating or validating define segment here.
name, segment = \
self._match_directive_syntax(_FORTIEL_DEFINE, 'name', 'segment')
node = FortielLetNode(
self._file_path, self._line_number,
name, arguments=None, value_expression=f"'{segment}'")
if is_reserved(node.name):
message = f'name `{node.name}` is a reserved word'
raise FortielSyntaxError(message, node.file_path, node.line_number)
return node
def _parse_del_directive(self) -> FortielDelNode:
"""Parse DEL directive."""
# Note that we are not evaluating or validating define name here.
node = FortielDelNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_DEL, 'names'))
# Split names.
node.names = list(map(str.strip, node.names.split(',')))
return node
def _parse_if_directive(self) -> FortielIfNode:
"""Parse IF/ELSE IF/ELSE/END IF directive."""
# Note that we are not evaluating or validating condition expressions here.
node = FortielIfNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_IF, 'condition_expression'))
while not self._matches_directive('else if', 'else', 'end if'):
node.then_nodes.append(self._parse_statement())
if self._matches_directive('else if'):
while not self._matches_directive('else', 'end if'):
elif_node = FortielElifNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_ELIF, 'condition_expression'))
while not self._matches_directive('else if', 'else', 'end if'):
elif_node.then_nodes.append(self._parse_statement())
node.elif_nodes.append(elif_node)
if self._matches_directive('else'):
self._match_directive_syntax(_FORTIEL_ELSE)
while not self._matches_directive('end if'):
node.else_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_IF)
return node
def _parse_ifdef_directive(self) -> FortielIfNode:
"""Parse IFDEF/ELSE/END IF directive."""
node = FortielIfNode(
self._file_path, self._line_number,
f'defined("{self._match_directive_syntax(_FORTIEL_IFDEF, "name")}")')
while not self._matches_directive('else', 'end if'):
node.then_nodes.append(self._parse_statement())
if self._matches_directive('else'):
self._match_directive_syntax(_FORTIEL_ELSE)
while not self._matches_directive('end if'):
node.else_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_IF)
return node
def _parse_ifndef_directive(self) -> FortielIfNode:
"""Parse IFNDEF/ELSE/END IF directive."""
node = FortielIfNode(
self._file_path, self._line_number,
f'not defined("{self._match_directive_syntax(_FORTIEL_IFNDEF, "name")}")')
while not self._matches_directive('else', 'end if'):
node.then_nodes.append(self._parse_statement())
if self._matches_directive('else'):
self._match_directive_syntax(_FORTIEL_ELSE)
while not self._matches_directive('end if'):
node.else_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_IF)
return node
def _parse_do_directive(self) -> FortielDoNode:
"""Parse DO/END DO directive."""
# Note that we are not evaluating or validating loop bound expression here.
node = FortielDoNode(
self._file_path, self._line_number,
*self._match_directive_syntax(_FORTIEL_DO, 'index_name', 'ranges_expression'))
if is_reserved(node.index_name):
message = f'<do> loop index name `{node.index_name}` is a reserved word'
raise FortielSyntaxError(message, node.file_path, node.line_number)
while not self._matches_directive('end do'):
node.loop_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_DO)
return node
def _parse_for_directive(self) -> FortielForNode:
"""Parse FOR/END FOR directive."""
# Note that we are not evaluating or validating loop expressions here.
node = FortielForNode(
self._file_path, self._line_number,
*self._match_directive_syntax(_FORTIEL_FOR, 'index_names', 'iterable_expression'))
node.index_names = list(map(str.strip, node.index_names.split(',')))
if len(bad_names := list(filter(is_reserved, node.index_names))) != 0:
message = f'<for> loop index names `{"`, `".join(bad_names)}` are reserved words'
raise FortielSyntaxError(message, node.file_path, node.line_number)
while not self._matches_directive('end for'):
node.loop_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_FOR)
return node
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _parse_call_segment(self) -> FortielCallSegmentNode:
"""Parse call segment."""
# Call directive uses different syntax, so it cannot be parsed with common routines.
if (match := self._matches_line(_FORTIEL_CALL)) is None:
message = 'invalid call segment syntax'
raise FortielSyntaxError(message, self._file_path, self._line_number)
# Note that we are not evaluating or matching call arguments and sections here.
node = FortielCallSegmentNode(
self._file_path, self._line_number, *match.group('spaces', 'name', 'argument'))
node.name = _make_name(node.name)
node.argument = node.argument.strip()
self._advance_line()
return node
def _parse_macro_directive(self) -> FortielMacroNode:
"""Parse MACRO/END MACRO directive."""
node = FortielMacroNode(
self._file_path, self._line_number,
(match := self._match_directive_syntax(_FORTIEL_MACRO, 'name', 'pattern'))[0])
node.name = _make_name(node.name)
node.pattern_nodes = self._parse_pattern_directives_list(node, pattern=match[1])
if self._matches_directive('section'):
while not self._matches_directive('finally', 'end macro'):
section_node = FortielSectionNode(
self._file_path, self._line_number,
*(match := self._match_directive_syntax(
_FORTIEL_SECTION, 'name', 'once', 'pattern'))[0:2])
section_node.name = _make_name(section_node.name)
section_node.once = section_node.once is not None
section_node.pattern_nodes = \
self._parse_pattern_directives_list(section_node, pattern=match[2])
node.section_nodes.append(section_node)
if self._matches_directive('finally'):
self._match_directive_syntax(_FORTIEL_FINALLY)
while not self._matches_directive('end macro'):
node.finally_nodes.append(self._parse_statement())
self._match_directive_syntax(_FORTIEL_END_MACRO)
return node
def _parse_pattern_directives_list(
self, node: Union[FortielMacroNode, FortielSectionNode],
pattern: Optional[str]) -> List[FortielPatternNode]:
"""Parse PATTERN directive list."""
pattern_nodes: List[FortielPatternNode] = []
if pattern is not None:
pattern_node = FortielPatternNode(node.file_path, node.line_number, pattern)
while not self._matches_directive('pattern', 'section', 'finally', 'end macro'):
pattern_node.match_nodes.append(self._parse_statement())
pattern_nodes.append(pattern_node)
elif not self._matches_directive('pattern'):
message = 'expected <pattern> directive'
raise FortielSyntaxError(message, self._file_path, self._line_number)
if self._matches_directive('pattern'):
while not self._matches_directive('section', 'finally', 'end macro'):
pattern_node = FortielPatternNode(
self._file_path, self._line_number,
self._match_directive_syntax(_FORTIEL_PATTERN, 'pattern'))
while not self._matches_directive('pattern', 'section', 'finally', 'end macro'):
pattern_node.match_nodes.append(self._parse_statement())
pattern_nodes.append(pattern_node)
# Compile the patterns.
for pattern_node in pattern_nodes:
try:
pattern_node.pattern = _compile_re(pattern_node.pattern)
except re.error as error:
message = f'invalid pattern regular expression `{pattern_node.pattern}`'
raise FortielSyntaxError(
message, pattern_node.file_path, pattern_node.line_number) from error
return pattern_nodes
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel Directives Executor =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
FortielPrintFunc = Callable[[str], None]
_FORTIEL_INLINE_EVAL: Final = _compile_re(r'\${(?P<expression>.+?)}\$', True)
_FORTIEL_INLINE_SHORT_EVAL: Final = _compile_re(r'[$@](?P<expression>\w+)\b', True)
_FORTIEL_INLINE_SHORT_LOOP: Final = _compile_re(r'''
(?P<comma_before>,\s*)?
[\^@](?P<expression>:|\w+) (?P<comma_after>\s*,)?''', True)
_FORTIEL_INLINE_LOOP: Final = _compile_re(r'''
(?P<comma_before>,\s*)?
[\^@]{ (?P<expression>.*?) ([\^@]\|[\^@] (?P<ranges_expression>.*?) )? }[\^@]
(?P<comma_after>\s*,)?''', True)
_FORTIEL_CMDARG_DEFINE: Final = _compile_re(r'(?P<name>\w+)(?:\s*=\s*(?P<value>.*))')
# TODO: implement builtins correctly.
_FORTIEL_BUILTINS_NAMES = [
'__INDEX__', '__FILE__', '__LINE__', '__DATE__', '__TIME__']
class FortielExecutor:
"""Fortiel syntax tree executor."""
def __init__(self, options: FortielOptions):
self._scope: Dict[str, Any] = {}
self._macros: Dict[str, FortielMacroNode] = {}
self._imported_files_paths: Set[str] = set()
self._options: FortielOptions = options
self._scope['defined'] = self._defined
for define in self._options.defines:
define_name, define_value = \
_FORTIEL_CMDARG_DEFINE.match(define).group('name', 'value')
define_value = self._evaluate_expression(define_value, '<shell>', 1)
self._scope[define_name] = define_value
def _defined(self, name: str) -> bool:
return name in self._scope
@property
def _loop_index(self) -> Optional[int]:
return self._scope.get('__LOOP_INDEX__')
@_loop_index.setter
def _loop_index(self, index: Optional[int]) -> None:
self._scope['__LOOP_INDEX__'] = index
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _evaluate_expression(self, expression: str, file_path: str, line_number: int) -> Any:
"""Evaluate Python expression."""
try:
# TODO: when we should correctly remove the line continuations?
expression = expression.replace('&\n', '\n')
self._scope.update(__FILE__=file_path, __LINE__=line_number)
value = eval(expression, self._scope)
return value
except Exception as error:
error_text = str(error)
error_text = error_text.replace('<head>', f'expression `{expression}`')
error_text = error_text.replace('<string>', f'expression `{expression}`')
message = f'Python expression evaluation error: {error_text}'
raise FortielRuntimeError(message, file_path, line_number) from error
def _evaluate_ranges_expression(
self, expression: str, file_path: str, line_number: int) -> range:
"""Evaluate Python ranges expression"""
ranges = self._evaluate_expression(expression, file_path, line_number)
if not (isinstance(ranges, tuple) and (2 <= len(ranges) <= 3) and
list(map(type, ranges)) == len(ranges) * [int]):
message = \
'tuple of two or three integers inside the <do> ' + \
f'directive ranges is expected, got `{expression}`'
raise FortielRuntimeError(message, file_path, line_number)
(start, stop), step = ranges[0:2], (ranges[2] if len(ranges) == 3 else 1)
return range(start, stop + step, step)
def _evaluate_line(self, line: str, file_path: str, line_number: int) -> str:
"""Execute in-line substitutions."""
def _evaluate_inline_loop_expression_sub(match: Match[str]) -> str:
# Evaluate <^..>, <^{..}^> and <^{..^|^..}^> substitutions.
expression, comma_before, comma_after = \
match.group('expression', 'comma_before', 'comma_after')
ranges_expression = match.groupdict().get('ranges_expression')
if ranges_expression is not None:
ranges = self._evaluate_ranges_expression(
ranges_expression, file_path, line_number)
else:
if (index := self._loop_index) is None:
message = '<^{..}^> rangeless substitution outside of the <do> loop body'
raise FortielRuntimeError(message, file_path, line_number)
ranges = range(1, max(0, index) + 1)
sub = ','.join([expression.replace('$$', str(i)) for i in ranges])
if len(sub) > 0:
if comma_before is not None:
sub = comma_before + sub
if comma_after is not None:
sub += comma_after
else:
sub = ',' if (comma_before is not None) and (comma_after is not None) else ''
# Recursively evaluate inner substitutions.
return self._evaluate_line(sub, file_path, line_number)
line = _FORTIEL_INLINE_LOOP.sub(_evaluate_inline_loop_expression_sub, line)
line = _FORTIEL_INLINE_SHORT_LOOP.sub(_evaluate_inline_loop_expression_sub, line)
def _evaluate_inline_eval_expression_sub(match: Match[str]) -> str:
# Evaluate <$..> and <${..}$> substitutions.
expression = match['expression']
value = self._evaluate_expression(expression, file_path, line_number)
# Put negative number into parentheses.
if isinstance(value, (int, float)) and (value < 0):
sub = f'({value})'
else:
sub = str(value)
# Recursively evaluate inner substitutions.
return self._evaluate_line(sub, file_path, line_number)
line = _FORTIEL_INLINE_EVAL.sub(_evaluate_inline_eval_expression_sub, line)
# Special case for OpenMP/OpenACC directives:
if line.lstrip().startswith('!$'):
processed_lines = []
for pragma_line in line.splitlines():
cut = len(pragma_line) - len(pragma_line.lstrip().removeprefix('!$'))
processed_lines.append(
pragma_line[:cut] +
_FORTIEL_INLINE_SHORT_EVAL.sub(
_evaluate_inline_eval_expression_sub, pragma_line[cut:]))
line = '\n'.join(processed_lines)
else:
line = _FORTIEL_INLINE_SHORT_EVAL.sub(_evaluate_inline_eval_expression_sub, line)
# Output the processed line.
return line
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def execute_tree(self, tree: FortielTree, print_func: FortielPrintFunc) -> None:
"""Execute the syntax tree or the syntax tree node."""
# Print primary line marker.
if self._options.line_marker_format == 'fpp':
print_func(f'# 1 "{tree.file_path}" 1')
elif self._options.line_marker_format == 'cpp':
print_func(f'#line 1 "{tree.file_path}" 1')
# Execute tree nodes.
self._execute_node_list(tree.root_nodes, print_func)
def _execute_node(self, node: FortielNode, print_func: FortielPrintFunc) -> None:
"""Execute a node."""
for nodeType, func in {
FortielUseNode: self._execute_use_node,
FortielLetNode: self._execute_let_node,
FortielDelNode: self._execute_del_node,
FortielIfNode: self._execute_if_node,
FortielDoNode: self._execute_do_node,
FortielForNode: self._execute_for_node,
FortielMacroNode: self._execute_macro_node,
FortielCallNode: self._execute_call_node,
FortielLineListNode: self._execute_line_list_node}.items():
if isinstance(node, nodeType):
func = cast(Callable[[FortielNode, FortielPrintFunc], None], func)
return func(node, print_func)
node_type = type(node).__name__
raise RuntimeError(f'internal error: no evaluator for directive type {node_type}')
def _execute_node_list(self, nodes: List[FortielNode], print_func: FortielPrintFunc) -> None:
"""Execute the node list."""
index = 0
while index < len(nodes):
if isinstance(nodes[index], FortielCallSegmentNode):
# List of nodes could be modified during the call.
self._resolve_call_segment(index, nodes)
self._execute_call_node(cast(FortielCallNode, nodes[index]), print_func)
else:
self._execute_node(nodes[index], print_func)
index += 1
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _execute_line_list_node(
self, node: FortielLineListNode, print_func: FortielPrintFunc) -> None:
"""Execute line block."""
# Print line marker.
if self._options.line_marker_format == 'fpp':
print_func(f'# {node.line_number} "{node.file_path}"')
elif self._options.line_marker_format == 'cpp':
print_func(f'#line {node.line_number} "{node.file_path}"')
# Print lines.
for line_number, line in enumerate(node.lines, node.line_number):
print_func(self._evaluate_line(line, node.file_path, line_number))
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _execute_use_node(self, node: FortielUseNode, _: FortielPrintFunc) -> None:
"""Execute USE node."""
# Resolve file path.
node_dir_path = path.dirname(node.file_path)
imported_file_path = _find_file(
node.imported_file_path, self._options.include_paths + [node_dir_path])
if imported_file_path is None:
message = f'`{node.imported_file_path}` was not found in the include paths'
raise FortielRuntimeError(message, node.file_path, node.line_number)
# Ensure that file is used only once.
if imported_file_path not in self._imported_files_paths:
self._imported_files_paths.add(imported_file_path)
try:
with open(imported_file_path, mode='r', encoding='utf-8') as imported_file:
imported_file_lines = imported_file.read().splitlines()
except IsADirectoryError as error:
message = f'`{node.imported_file_path}` is a directory'
raise FortielRuntimeError(message, node.file_path, node.line_number) from error
except IOError as error:
message = f'unable to read file `{node.imported_file_path}`'
raise FortielRuntimeError(message, node.file_path, node.line_number) from error
# Parse and execute the dependency.
# ( Use a dummy print_func in order to skip code lines. )
imported_tree = FortielParser(node.imported_file_path, imported_file_lines).parse()
self.execute_tree(imported_tree, lambda _: None)
def _execute_let_node(self, node: FortielLetNode, _: FortielPrintFunc) -> None:
"""Execute LET node."""
# Check if the variable is not already defined, and is not a build-in name.
if node.name in _FORTIEL_BUILTINS_NAMES:
message = f'builtin name <{node.name}> can not be redefined'
raise FortielRuntimeError(message, node.file_path, node.line_number)
if node.arguments is None:
# Evaluate variable.
self._scope[node.name] = self._evaluate_expression(
node.value_expression, node.file_path, node.line_number)
else:
# Evaluate variable as lambda function.
function_expression = f'lambda {",".join(node.arguments)}: {node.value_expression}'
function = self._evaluate_expression(
function_expression, node.file_path, node.line_number)
self._scope[node.name] = function
def _execute_del_node(self, node: FortielDelNode, _: FortielPrintFunc) -> None:
"""Execute DEL node."""
for name in node.names:
if name not in self._scope:
message = f'name `{name}` was not previously defined'
raise FortielRuntimeError(message, node.file_path, node.line_number)
if name in _FORTIEL_BUILTINS_NAMES:
message = f'builtin name <{name}> can not be undefined'
raise FortielRuntimeError(message, node.file_path, node.line_number)
del self._scope[name]
def _execute_if_node(self, node: FortielIfNode, print_func: FortielPrintFunc) -> None:
"""Execute IF/ELSE IF/ELSE/END IF node."""
# Evaluate condition and execute THEN branch.
condition = self._evaluate_expression(
node.condition_expression, node.file_path, node.line_number)
if condition:
self._execute_node_list(node.then_nodes, print_func)
else:
# Evaluate condition and execute ELSE IF branches.
for elif_node in node.elif_nodes:
condition = self._evaluate_expression(
elif_node.condition_expression, node.file_path, node.line_number)
if condition:
self._execute_node_list(elif_node.then_nodes, print_func)
break
else:
# Execute ELSE branch.
self._execute_node_list(node.else_nodes, print_func)
def _execute_do_node(self, node: FortielDoNode, print_func: FortielPrintFunc) -> None:
"""Execute DO/END DO node."""
# Evaluate loop ranges.
ranges = self._evaluate_ranges_expression(
node.ranges_expression, node.file_path, node.line_number)
if len(ranges) > 0:
# Save previous index value
# in case we are inside the nested loop.
prev_index = self._loop_index
for index in ranges:
# Execute loop body.
self._loop_index = self._scope[node.index_name] = index
self._execute_node_list(node.loop_nodes, print_func)
del self._scope[node.index_name]
# Restore previous index value.
self._loop_index = prev_index
def _execute_for_node(self, node: FortielForNode, print_func: FortielPrintFunc) -> None:
"""Execute FOR/END FOR node."""
# Evaluate loop.
iterable: Iterable[Any] = self._evaluate_expression(
node.iterable_expression, node.file_path, node.line_number)
for index_values in iterable:
if len(node.index_names) == 1:
self._scope[node.index_names[0]] = index_values
else:
for index_name, index_value in zip(node.index_names, index_values):
self._scope[index_name] = index_value
self._execute_node_list(node.loop_nodes, print_func)
for index_name in node.index_names:
del self._scope[index_name]
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def _execute_macro_node(self, node: FortielMacroNode, _: FortielPrintFunc) -> None:
"""Execute MACRO/END MACRO node."""
if node.name in self._macros:
message = f'macro `{node.name}` is already defined'
raise FortielRuntimeError(message, node.file_path, node.line_number)
if len(node.section_nodes) > 0:
# TODO: refactor me
section_names = node.section_names
if node.name in section_names:
message = f'section name cannot be the same with macro `{node.name}` name'
raise FortielRuntimeError(message, node.file_path, node.line_number)
if (dup := _find_duplicate(section_names)) is not None:
message = f'duplicate section `{dup}` of the macro construct `{node.name}`'
raise FortielRuntimeError(message, node.file_path, node.line_number)
# Add macro to the scope.
self._macros[node.name] = node
def _resolve_call_segment(self, index: int, nodes: List[FortielNode]) -> None:
"""Resolve call segments."""
node = cast(FortielCallSegmentNode, nodes[index])
if (macro_node := self._macros.get(node.name)) is None:
message = f'macro `{node.name}` was not previously defined'
raise FortielRuntimeError(message, node.file_path, node.line_number)
# Convert current node to call node and replace it in the node list.
node = nodes[index] = FortielCallNode(node)
end_name = 'end' + node.name
if macro_node.is_construct:
# Pop and process nodes until the end of macro construct call is reached.
next_index = index + 1
while len(nodes) > next_index:
next_node = nodes[next_index]
if isinstance(next_node, FortielCallSegmentNode):
if next_node.name == end_name:
nodes.pop(next_index)
break
if next_node.name in macro_node.section_names:
call_section_node = FortielCallSectionNode(next_node)
node.call_section_nodes.append(call_section_node)
nodes.pop(next_index)
continue
# Resolve the scoped call.
self._resolve_call_segment(next_index, nodes)
# Append the current node to the most recent section of the call node.
next_node = nodes.pop(next_index)
if len(node.call_section_nodes) == 0:
node.captured_nodes.append(next_node)
else:
section_node = node.call_section_nodes[-1]
section_node.captured_nodes.append(next_node)
else:
message = f'expected `@{end_name}` call segment'
raise FortielRuntimeError(message, node.file_path, node.line_number)
def _execute_call_node(self, node: FortielCallNode, print_func: FortielPrintFunc) -> None:
"""Execute CALL node."""
# Use a special print function
# in order to keep indentations from the original source.
# ( Note that we have to keep line markers not indented. )
def _spaced_print_func(line: str):
print_func(line if line.lstrip().startswith('#') else node.spaces_before + line)
macro_node = self._macros[node.name]
self._execute_pattern_list_node(node, macro_node, _spaced_print_func)
# Match and evaluate macro sections.
if macro_node.is_construct:
self._execute_node_list(node.captured_nodes, print_func)
section_iterator = iter(macro_node.section_nodes)
section_node = next(section_iterator, None)
for call_section_node in node.call_section_nodes:
# Find a section node match.
while section_node is not None and \
section_node.name != call_section_node.name:
section_node = next(section_iterator, None)
if section_node is None:
message = f'unexpected call section `{call_section_node.name}`'
raise FortielRuntimeError(
message, call_section_node.file_path, call_section_node.line_number)
# Execute the section.
self._execute_pattern_list_node(
call_section_node, section_node, _spaced_print_func)
self._execute_node_list(call_section_node.captured_nodes, print_func)
# Advance a section for sections with 'once' attribute.
if section_node.once:
section_node = next(section_iterator, None)
# Execute finally section.
self._execute_node_list(macro_node.finally_nodes, _spaced_print_func)
def _execute_pattern_list_node(
self, node: Union[FortielCallNode, FortielCallSectionNode],
macro_node: Union[FortielMacroNode, FortielSectionNode],
print_func: FortielPrintFunc) -> None:
# Find a match in macro or section patterns and
# execute macro primary section or current section.
for pattern_node in macro_node.pattern_nodes:
if (match := pattern_node.pattern.match(node.argument)) is not None:
self._scope |= match.groupdict()
self._execute_node_list(pattern_node.match_nodes, print_func)
break
else:
message = f'macro `{macro_node.name}` call does not match any pattern'
raise FortielRuntimeError(message, node.file_path, node.line_number)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-= Fortiel API and Entry Point =-=-=-=-= #
# =-=-=-=-=-=-=-= =-=-=-=-=-=-=-= #
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= #
def fortiel_preprocess(
file_path: str, output_file_path: Optional[str],
options: FortielOptions = FortielOptions()) -> None:
"""Preprocess the source file."""
# Read the input file and parse it.
with open(file_path, mode='r', encoding='utf-8') as file:
lines = file.read().splitlines()
tree = FortielParser(file_path, lines).parse()
# Execute parse tree and print to output file.
executor = FortielExecutor(options)
if output_file_path is None:
executor.execute_tree(tree, print)
else:
with open(output_file_path, mode='w', encoding='utf-8') as output_file:
executor.execute_tree(tree, lambda line: print(line, file=output_file))
def main() -> None:
"""Fortiel entry point."""
# Make CLI description and parse it.
arg_parser = argparse.ArgumentParser()
# Preprocessor definitions.
arg_parser.add_argument(
'-D', '--define', metavar='name[=value]', action='append', dest='defines', default=[],
help='define a named variable')
# Preprocessor include directories.
arg_parser.add_argument(
'-I', '--include', metavar='include_dir', action='append', dest='include_dirs', default=[],
help='add an include directory path')
# Line marker format.
arg_parser.add_argument(
'-M', '--line_markers', choices=['fpp', 'cpp', 'none'],
default=FortielOptions().line_marker_format, help='line markers format')
# Input and output file paths.
arg_parser.add_argument(
'file_path', help='input file path')
arg_parser.add_argument(
'-o', '--output_file_path', metavar='output_file_path', default=None, help='output file path')
args = arg_parser.parse_args()
# Get input and output file paths.
file_path = args.file_path
output_file_path = args.output_file_path
# Get other options.
options = FortielOptions()
options.defines += args.defines
options.include_paths += args.include_dirs
options.line_marker_format = args.line_markers
# Execute the compiler.
fortiel_preprocess(file_path, output_file_path, options)
if __name__ == '__main__':
main()
``` |
{
"source": "jhu-lcsr/sp_segmenter",
"score": 2
} |
#### File: sp_segmenter/scripts/republisher.py
```python
import rospy
import tf
import tf_conversions.posemath as pm
from geometry_msgs.msg import PoseArray
last_pose = None
def callback(msg):
if len(msg.poses) > 0:
global last_pose
last_pose = msg.poses[0]
if __name__ == '__main__':
rospy.init_node('posearray_tf_republisher')
sub = rospy.Subscriber('poses_out',PoseArray,callback)
br = tf.TransformBroadcaster()
try:
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if not (last_pose is None):
(trans, rot) = pm.toTf(pm.fromMsg(last_pose))
br.sendTransform(trans, rot, rospy.Time.now(), 'drill', 'world')
rate.sleep()
except rospy.ROSInterruptException, e:
print e
``` |
{
"source": "jhult/compose_format",
"score": 2
} |
#### File: jhult/compose_format/setup.py
```python
from setuptools import setup
def readme():
with open('README.rst') as file:
return file.read()
setup(
name='compose_format',
version='1.1.0',
description='format docker-compose files',
long_description=readme(),
url='http://github.com/funkwerk/compose_format',
author='<NAME>',
license='MIT',
packages=['compose_format'],
install_requires=['ruamel.yaml'],
zip_safe=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Environment :: Console',
'Operating System :: OS Independent',
],
keywords='docker-compose format docker yml',
include_package_data=True,
scripts=['bin/compose_format'])
``` |
{
"source": "jhultgre/one-shot-bot",
"score": 2
} |
#### File: one-shot-bot/episodeparsers/firstwatchparser.py
```python
from __future__ import unicode_literals
import logging
import wikiatools
from episode_tracker import EpisodeManager
from .baseparser import Parser
logger = logging.getLogger(__name__)
class FirstWatchParser(Parser):
"""Parser for FirstWatch"""
def __init__(self, f):
super(FirstWatchParser, self).__init__(f)
# dont set variables until parse
def parse_episode(self):
title = self.values['$title']
if 'Second' in title:
logger.info("Second Watch Episode")
self.template_name = 'templates/second-watch.template'
with EpisodeManager('second-watch') as em:
em.add_episode(title, self.guid)
number = em.get_episode_number(self.guid)
self.values['$prev'] = '[[Second Watch %s]]' % (number - 1)
self.values['$next'] = '[[Second Watch %s]]' % (number + 1)
self.wiki_page = 'Second Watch %s' % number
self.commands.append(wikiatools.update_episode_list(
'First Watch',
self.wiki_page,
'{}: {}'.format(self.wiki_page, title),
self.link,
'-second-watch'))
else:
logger.info("First Watch Episode")
self.template_name = 'templates/first-watch.template'
with EpisodeManager('first-watch') as em:
em.add_episode(title, self.guid)
number = em.get_episode_number(self.guid)
self.values['$prev'] = '[[First Watch %s]]' % (number - 1)
self.values['$next'] = '[[First Watch %s]]' % (number + 1)
self.wiki_page = 'First Watch %s' % number
self.commands.append(wikiatools.update_episode_list(
'First Watch',
self.wiki_page,
'{}: {}'.format(self.wiki_page, title),
self.link,
'-first-watch'))
```
#### File: jhultgre/one-shot-bot/episode_tracker.py
```python
import json
import os
import logging
import sys
import codecs
reload(sys)
sys.setdefaultencoding('utf8')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
logger = logging.getLogger(__name__)
class EpisodeManager(object):
"""Keeps track of the episdoe number of complicated episode feeds"""
def __init__(self, podcast, offset=0):
super(EpisodeManager, self).__init__()
logger.info('creating episode manger for %s' % podcast)
self.podcast = podcast
self.modified = False
if os.path.exists('episode_tracker/%s' % podcast):
logger.debug('loading existing data')
with open('episode_tracker/%s' % podcast) as f:
self.data = json.load(f)
else:
logger.debug('no existing data file')
self.data = {}
if offset:
for e in xrange(offset):
self.add_episode('filler', e)
def __enter__(self):
return self
def __exit__(self, e, ev, trace):
logger.info('saving episode data for %s' % self.podcast)
if self.modified:
with open('episode_tracker/%s' % self.podcast, 'w') as f:
self.data = json.dump(self.data, f, indent=4)
def add_episode(self, title, guid):
logger.debug('add episode %s' % title)
self.modified = True
if guid in self.data:
return
else:
self.data[guid] = {'title': title,
'number': len(self.data) + 1}
def get_episode_number(self, guid):
return self.data[guid]['number']
# def main():
# with EpisodeManager('oneshot-bonus', 6) as em:
# pass
# if __name__ == '__main__':
# main()
``` |
{
"source": "jhultman/force-program",
"score": 2
} |
#### File: force-program/src/force_program.py
```python
import numpy as np
import cvxpy as cp
import numpy.linalg as la
import matplotlib.pyplot as plt
from animator import plot_and_animate
def lower_triangular_ones(N):
A = np.zeros((N, N))
inds = np.tril_indices_from(A)
A[inds] = 1
return A
def build_vel_pos_matrices(N, mass):
A_vf = 1 / mass * lower_triangular_ones(N)
A_xv = lower_triangular_ones(N)
rng = np.arange(N)
A_xv[rng, rng] = 1 / 2
A_xf = A_xv @ A_vf
return A_xf, A_vf
def build_state_matrix(A_xf, A_vf):
a_xf = A_xf[-1, :]
a_vf = A_vf[-1, :]
A = np.vstack((a_xf, a_vf))
return A
def solve_force_program_analytically(A, y_d):
mats = (A.T, la.inv(A @ A.T), y_d)
f = la.multi_dot(mats)
return f
def solve_force_program_cvxpy(N, A, y_d, norm=2):
f = cp.Variable(N)
y_d = np.array([1, 0])
constraints = [(A @ f) == y_d]
J = cp.norm(f, p=norm)
obj = cp.Minimize(J)
cp.Problem(obj, constraints).solve()
return f.value
def construct_x_v(f, A_xf, A_vf):
x = A_xf @ f
v = A_vf @ f
return x, v
def prep_xvf_for_plot(x, v, f):
vals = ([0], x, [0], v, f[:1], f)
pos, vel, force = np.concatenate(vals).reshape(3, -1)
return pos, vel, force
def main():
N, mass = 10, 1
y_d = np.array([1, 0])
A_xf, A_vf = build_vel_pos_matrices(N, mass)
A = build_state_matrix(A_xf, A_vf)
for norm in [1, 2, 'inf']:
f = solve_force_program_cvxpy(N, A, y_d, norm)
x, v = construct_x_v(f, A_xf, A_vf)
pos, vel, force = prep_xvf_for_plot(x, v, f)
plot_and_animate(pos, vel, force, fname=f'l_{norm}')
if __name__ == '__main__':
main()
``` |
{
"source": "jhultman/iterated-function-systems",
"score": 2
} |
#### File: iterated-function-systems/src/ifs.py
```python
import numpy as np
from plotter import Plotter, plot_ifs
def get_random_transform(transforms):
choice = np.random.choice(transforms.shape[0], p=transforms[:, -1])
transform = transforms[choice, :-1]
abcd = transform[:4].reshape((2, 2))
ef = transform[4:6]
return abcd, ef
def ifs(x0, y0, transforms, num_iters):
xy = np.hstack((x0, y0))
XY = [xy]
for i in range(num_iters):
abcd, ef = get_random_transform(transforms)
xy = np.matmul(abcd, xy) + ef
XY.append(xy)
return np.array(XY)
def load_transforms():
barnsley = np.loadtxt('../transforms/barnsley.csv')
von_koch = np.loadtxt('../transforms/von_koch.csv')
transforms = [barnsley, von_koch]
return transforms
def get_savepath(title):
fname = title.lower().replace(' ', '_')
savepath = f'../images/{fname}.gif'
return savepath
def main():
transforms = load_transforms()
titles = ['Barnsley', '<NAME>']
N = [3000, 2000]
for transform, title, n in zip(transforms, titles, N):
xy = ifs(0, 0, transform, n)
plotter = Plotter(xy, title, incr=20)
savepath = get_savepath(title)
plotter.animate(savepath)
if __name__ == '__main__':
main()
``` |
{
"source": "jhultman/jacob-utils",
"score": 3
} |
#### File: jacob-utils/jacob_utils/to_script.py
```python
import sys
import json
def _keep(cell):
is_code = cell['cell_type'] == 'code'
return is_code
def write_to_file(fpath_in, fpath_out):
with open(fpath_in, 'r') as f:
notebook = json.load(f)
cells = filter(_keep, notebook['cells'])
with open(fpath_out, 'w') as f:
for cell in cells:
source = ''.join(cell['source']) + 3 * '\n'
f.write(source)
def main():
f1, f2 = sys.argv[1:3]
write_to_file(f1, f2)
if __name__ == '__main__':
main()
``` |
{
"source": "jhultman/pfilter",
"score": 2
} |
#### File: pfilter/src/make_airplane_marker.py
```python
import numpy as np
from imageio import imread
from skimage import feature
def find_verts(edges):
is_edge = edges[::-1, ::-1].T > 0
verts = np.stack(np.where(is_edge), -1)
return verts
def _order_vertices(verts, ccw=+1):
assert ccw in (-1, +1)
vectors = verts - np.median(verts, 0)
angles = np.arctan2(*vectors[:, ::-1].T)
order = np.argsort(ccw * angles)
verts_ccw = verts[order][::ccw]
return verts_ccw
def order_vertices(verts):
"Approximate CCW ordering."
verts_a = _order_vertices(verts, +1)
verts_b = _order_vertices(verts, -1)
agree = (verts_a == verts_b).all(1)
verts_ccw = verts_a[agree]
return verts_ccw
def main():
img = imread('../data/airplane.png').mean(-1)
edges = feature.canny(img, sigma=4)
verts = order_vertices(find_verts(edges))
verts = verts[::20] / verts.max()
np.savetxt('../data/airplane_verts.txt', verts)
if __name__ == '__main__':
main()
``` |
{
"source": "jhultman/PV-RCNN",
"score": 2
} |
#### File: PV-RCNN/vision3d/inference.py
```python
import numpy as np
import torch
import matplotlib.pyplot as plt
from vision3d.core import cfg, Preprocessor, AnchorGenerator
from vision3d.core.bev_drawer import Drawer
from vision3d.detector import Second
def viz_detections(points, boxes):
boxes = boxes.cpu().numpy()
bev_map = Drawer(points, [boxes]).image
fig, ax = plt.subplots(figsize=(8, 8))
ax.imshow(bev_map.transpose(1, 0, 2)[::-1])
ax.set_axis_off()
fig.tight_layout()
plt.show()
def get_model(cfg):
cfg.merge_from_file('../configs/second/car.yaml')
anchors = AnchorGenerator(cfg).anchors
preprocessor = Preprocessor(cfg)
model = Second(cfg).cuda().eval()
ckpt = torch.load('../vision3d/ckpts/epoch_12.pth')['state_dict']
model.load_state_dict(ckpt, strict=True)
return model, preprocessor, anchors
def main():
model, preprocessor, anchors = get_model(cfg)
fpath = '../data/kitti/training/velodyne_reduced/000032.bin'
points = np.fromfile(fpath, np.float32).reshape(-1, 4)
with torch.no_grad():
item = preprocessor(dict(points=[points], anchors=anchors))
for key in ['points', 'features', 'coordinates', 'occupancy', 'anchors']:
item[key] = item[key].cuda()
boxes, batch_idx, class_idx, scores = model.inference(item)
viz_detections(points, boxes)
if __name__ == '__main__':
main()
``` |
{
"source": "jhultman/range-measurements",
"score": 2
} |
#### File: jhultman/range-measurements/beacons.py
```python
import sys
import numba
import numpy as np
import numpy.linalg as la
import matplotlib as mpl
import matplotlib.pyplot as plt
spec = [
('criterion', numba.float32),
('max_iters', numba.int32),
('msmts', numba.float32[:]),
('beacons', numba.float32[:, :]),
('eye', numba.float32[:, :]),
('up', numba.float32),
('down', numba.float32),
('lbd', numba.float32),
]
@numba.jitclass(spec)
class LevenbergMarquardt:
def __init__(self, n, msmts, beacons, criterion, max_iters):
self.criterion = criterion
self.max_iters = max_iters
self.msmts = msmts.astype(np.float32)
self.beacons = beacons.astype(np.float32)
self.eye = np.eye(n).astype(np.float32)
self.up, self.down, self.lbd = 2.0, 0.8, 1.0
def jacobian(self, x):
return 2 * (x - self.beacons)
def func(self, x):
norm = np.sqrt(((x - self.beacons) ** 2).sum(-1))
return norm - self.msmts
def _get_iterate(self, x, fval):
"""Assume linear dynamics locally."""
J = self.jacobian(x)
mat = J.T @ J + self.lbd * self.eye
x_iter = x - np.ascontiguousarray(la.inv(mat)) @ J.T @ fval
return x_iter
def _compare(self, f0, f1, x, x_iter):
"""Accept iterate if better,
else shrink trust region."""
if f0 < f1:
self.lbd *= self.up
return x
else:
self.lbd *= self.down
return x_iter
def solve(self, x):
"""Solve non-linear least squares via LM."""
history = [list(x)]
for _ in range(self.max_iters):
fval = self.func(x)
x_iter = self._get_iterate(x, fval)
fval_iter = self.func(x_iter)
f0, f1 = la.norm(fval), la.norm(fval_iter)
x = self._compare(f0, f1, x, x_iter)
history += [list(x)]
if f1 < self.criterion:
exitcode = 0
break
else:
exitcode = 1
history = np.float32(history)
return exitcode, x, history
def make_lm(beacons, distance, sigma=1e-1):
kwargs = dict(criterion=1.0, max_iters=1000)
m, n = beacons.shape
noise = np.random.normal(0, sigma, size=(m,))
msmts = distance + noise
lm = LevenbergMarquardt(n, msmts, beacons, **kwargs)
return lm
def make_problem():
position = np.array([110, 70], np.float32)
beacons = np.array([[130, 130], [90, 40], [70, 120]], np.float32)
distance = la.norm(position[None, :] - beacons, axis=-1)
return position, beacons, distance
def make_circles(beacons, radii):
kwargs = dict(fill=False, color='seagreen', alpha=0.4, linestyle='dashed')
mapfunc = lambda tup: mpl.patches.Circle(*tup, **kwargs)
circles = map(mapfunc, zip(beacons, radii))
return circles
def add_circles(beacons, radii, ax):
circles = make_circles(beacons, radii)
list(map(ax.add_patch, circles))
def set_plot_properties(fig, ax):
fig.tight_layout()
ax.set(xlim=[0, 200], ylim=[0, 200],
aspect='equal', title='Range Localization')
ax.legend()
def plot_experiment(history, position, beacons, radii):
fig, ax = plt.subplots(figsize=(5, 5))
add_circles(beacons, radii, ax)
ax.scatter(*position, c='darkblue', s=50, marker='x', label='x_true')
ax.scatter(*beacons.T, c='darkblue', s=50, marker='s', label='beacons')
ax.scatter(*history[0], c='orange', s=20, marker='o')
ax.plot(*history.T, c='orange', label='iterates')
set_plot_properties(fig, ax)
fig.savefig('./images/beacon.png', dpi=100, bbox_inches='tight')
plt.show()
def main():
position, beacons, distance = make_problem()
lm = make_lm(beacons, distance)
x0 = np.array([130, 160], np.float32)
exitcode, x, history = lm.solve(x0)
if exitcode != 0:
print('Stopping criterion not reached.')
plot_experiment(history, position, beacons, distance)
if __name__ == '__main__':
main()
``` |
{
"source": "jhult/oci-designer-toolkit",
"score": 2
} |
#### File: visualiser/model/okitValidation.py
```python
__author__ = ["<NAME> (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0"
__module__ = "ociJsonValidator"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import ipaddress
from common.okitLogging import getLogger
# Configure logging
logger = getLogger()
class OCIJsonValidator(object):
def __init__(self, okit_json={}):
self.okit_json = okit_json
self.results = {'errors': [], 'warnings': [], 'info': []}
self.valid = True
def validate(self):
logger.info('Validating OKIT Json')
self.validateCommon()
self.validateAutonomousDatabases()
self.validateBlockStorageVolumes()
self.validateCompartments()
self.validateCustomerPremiseEquipments()
self.validateDhcpOptions()
self.validateDatabaseSystems()
self.validateDynamicRoutingGateways()
self.validateFastConnects()
self.validateFileStorageSystems()
self.validateInstances()
self.validateInternetGateways()
self.validateIPSecConnections()
self.validateLoadBalancers()
self.validateLocalPeeringGateways()
self.validateMySqlDatabaseSystems()
self.validateNATGateways()
self.validateNetworkSecurityGroups()
self.validateObjectStorageBuckets()
self.validateRemotePeeringConnections()
self.validateRouteTables()
self.validateSecurityLists()
self.validateServiceGateways()
self.validateSubnets()
self.validateVirtualCloudNetworks()
return self.valid
def getResults(self):
return self.results
def keyToType(self, key):
return key.replace('_', ' ').title()[:-1]
# Common
def validateCommon(self):
# Build Display Name List
used_display_names = {}
for key in self.okit_json:
if isinstance(self.okit_json[key], list):
for artefact in self.okit_json[key]:
used_display_names[artefact['display_name']] = used_display_names.get(artefact['display_name'], 0) + 1;
for key in self.okit_json:
if isinstance(self.okit_json[key], list):
for artefact in self.okit_json[key]:
if used_display_names[artefact['display_name']] > 1:
self.valid = False
error = {
'id': artefact['id'],
'type': self.keyToType(key),
'artefact': artefact['display_name'],
'message': 'Duplicate Display Name.',
'element': 'display_name'
}
self.results['errors'].append(error)
# Autonomous Database
def validateAutonomousDatabases(self):
for artefact in self.okit_json.get('autonomous_databases', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Check DB Name
if artefact['db_name'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Autonomous Database',
'artefact': artefact['display_name'],
'message': 'Database Name must be specified.',
'element': 'db_name'
}
self.results['errors'].append(error)
# Check Free Tier
if artefact['is_free_tier'] and artefact['is_auto_scaling_enabled']:
self.valid = False
error = {
'id': artefact['id'],
'type': 'Autonomous Database',
'artefact': artefact['display_name'],
'message': 'Auto Scaling is not available with Free Tier.',
'element': 'is_auto_scaling_enabled'
}
self.results['errors'].append(error)
if artefact['is_free_tier'] and artefact['license_model'] != 'LICENSE_INCLUDED':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Autonomous Database',
'artefact': artefact['display_name'],
'message': 'Free Tier only supports License Included.',
'element': 'license_model'
}
self.results['errors'].append(error)
if artefact['subnet_id'] != '' and len(artefact['nsg_ids']) == 0:
self.valid = False
error = {
'id': artefact['id'],
'type': 'Autonomous Database',
'artefact': artefact['display_name'],
'message': 'Autonomous Databases with private access (Subnet) require at least 1 Network Security Group.',
'element': 'nsg_ids'
}
self.results['errors'].append(error)
# Block Storage
def validateBlockStorageVolumes(self):
for artefact in self.okit_json.get('block_storage_volumes', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Compartment
def validateCompartments(self):
for artefact in self.okit_json.get('compartments', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
if artefact['description'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Compartment',
'artefact': artefact['display_name'],
'message': 'Compartment Description must be specified.',
'element': 'description'
}
self.results['errors'].append(error)
# Customer Premise Equipment
def validateCustomerPremiseEquipments(self):
for artefact in self.okit_json.get('customer_premise_equipments', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
if artefact['ip_address'] is None or artefact['ip_address'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Customer Premise Equipment',
'artefact': artefact['display_name'],
'message': 'IP Address must be specified.',
'element': 'ip_address'
}
self.results['errors'].append(error)
# Database Systems
def validateDatabaseSystems(self):
for artefact in self.okit_json.get('database_systems', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Check ssh Key
if artefact['ssh_public_keys'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Database System',
'artefact': artefact['display_name'],
'message': 'Public Keys must be specified.',
'element': 'ssh_public_keys'
}
self.results['errors'].append(error)
# Check Hostname
if artefact['database_edition'] == 'ENTERPRISE_EDITION_EXTREME_PERFORMANCE' and artefact['db_system_options']['storage_management'] == 'LVM':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Database System',
'artefact': artefact['display_name'],
'message': 'Can not configure RAC database with LVM.',
'element': 'database_edition'
}
self.results['errors'].append(error)
# Check RAC / LVM
if artefact['hostname'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Database System',
'artefact': artefact['display_name'],
'message': 'Hostname must be specified.',
'element': 'hostname'
}
self.results['errors'].append(error)
# Dhcp Options
def validateDhcpOptions(self):
for resource in self.okit_json.get('dhcp_options', []):
logger.info('Validating {!s}'.format(resource['display_name']))
defaults = [resource.get('default', False) and r.get('default', False) and r['id'] != resource['id'] and r['vcn_id'] == resource['vcn_id'] for r in self.okit_json.get('dhcp_options', [])]
if any(defaults):
error = {
'id': resource['id'],
'type': 'Dhcp Option',
'artefact': resource['display_name'],
'message': f'Multiple Dhcp Options specified as default {" ".join([r["display_name"] for r in self.okit_json.get("dhcp_options", []) if r.get("default", False) and r["id"] != resource["id"] and r["vcn_id"] != resource["vcn_id"]])}.',
'element': 'default'
}
self.results['errors'].append(error)
# Dynamic Routing Gateway
def validateDynamicRoutingGateways(self):
for artefact in self.okit_json.get('dynamic_routing_gateways', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Fast Connect
def validateFastConnects(self):
for artefact in self.okit_json.get('fast_connects', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# File Storage
def validateFileStorageSystems(self):
for artefact in self.okit_json.get('file_storage_systems', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Instances
def validateInstances(self):
for artefact in self.okit_json.get('instances', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Check ssh Key
if artefact['metadata']['ssh_authorized_keys'] == '':
warning = {
'id': artefact['id'],
'type': 'Instance',
'artefact': artefact['display_name'],
'message': 'No Public Keys specified.',
'element': 'ssh_authorized_keys'
}
self.results['warnings'].append(warning)
# Check Hostname
if artefact.get('primary_vnic', {}).get('hostname_label', '') == '':
self.valid = False
warning = {
'id': artefact['id'],
'type': 'Instance',
'artefact': artefact['display_name'],
'message': 'Hostname should be specified.',
'element': 'hostname_label'
}
self.results['warnings'].append(warning)
for vnic in artefact['vnics']:
if vnic['subnet_id'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Instance',
'artefact': artefact['display_name'],
'message': 'VNIC Must be connected to Subnet.',
'element': 'subnet_id'
}
self.results['errors'].append(error)
# Check Boot volume size
if int(artefact['source_details']['boot_volume_size_in_gbs']) < 50 or int(artefact['source_details']['boot_volume_size_in_gbs']) > 32768:
self.valid = False
error = {
'id': artefact['id'],
'type': 'Instance',
'artefact': artefact['display_name'],
'message': f'Boot Volume Size must between 50GB and 32,768GB',
'element': 'boot_volume_size_in_gbs'
}
self.results['errors'].append(error)
# Internet Gateways
def validateInternetGateways(self):
for artefact in self.okit_json.get('internet_gateways', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# IPSec Connection
def validateIPSecConnections(self):
for artefact in self.okit_json.get('ipsec_connections', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
if artefact['static_routes'] is None or artefact['static_routes'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'IPSec Connection',
'artefact': artefact['display_name'],
'message': 'Static Routes must be specified.',
'element': 'static_routes'
}
self.results['errors'].append(error)
if artefact['drg_id'] is None or artefact['drg_id'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'IPSec Connection',
'artefact': artefact['display_name'],
'message': 'DRG must be specified.',
'element': 'drg_id'
}
self.results['errors'].append(error)
if artefact['cpe_id'] is None or artefact['cpe_id'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'IPSec Connection',
'artefact': artefact['display_name'],
'message': 'Customer Premise must be specified.',
'element': 'cpe_id'
}
self.results['errors'].append(error)
# Load Balancers
def validateLoadBalancers(self):
for artefact in self.okit_json.get('load_balancers', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
if len(artefact['instance_ids']) == 0:
warning = {
'id': artefact['id'],
'type': 'Load Balancer',
'artefact': artefact['display_name'],
'message': 'No Backend Instances have been specified.',
'element': 'instance_ids'
}
self.results['warnings'].append(warning)
# Local Peering Gateways
def validateLocalPeeringGateways(self):
for artefact in self.okit_json.get('local_peering_gateways', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Check Peer Id
if artefact['peer_id'] == '':
warning = {
'id': artefact['id'],
'type': 'Local Peering Gateway',
'artefact': artefact['display_name'],
'message': 'Peer not specified.',
'element': 'peer_id'
}
self.results['warnings'].append(warning)
# Check Route Table Id
if artefact['route_table_id'] == '':
info = {
'id': artefact['id'],
'type': 'Local Peering Gateway',
'artefact': artefact['display_name'],
'message': 'Route Table not specified.',
'element': 'route_table_id'
}
self.results['info'].append(info)
else:
for route_table in self.okit_json.get('route_tables', []):
if route_table['id'] == artefact['route_table_id']:
for rule in route_table['route_rules']:
if rule['target_type'] not in ['dynamic_routing_gateways', 'private_ips']:
error = {
'id': artefact['id'],
'type': 'Local Peering Gateway',
'artefact': artefact['display_name'],
'message': 'A route table that is associated with an LPG can have only rules that target a DRG or a private IP.',
'element': 'route_table_id'
}
self.results['errors'].append(error)
# MySql Database Systems
def validateMySqlDatabaseSystems(self):
for artefact in self.okit_json.get('mysql_database_systems', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Check Admin Username
if artefact['admin_username'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'MySQL Database System',
'artefact': artefact['display_name'],
'message': 'Admin Username is required.',
'element': 'admin_username'
}
self.results['errors'].append(error)
# Check Hostname
if artefact['admin_password'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'MySQL Database System',
'artefact': artefact['display_name'],
'message': 'Admin Password must be specified.',
'element': 'admin_password'
}
self.results['errors'].append(error)
# NAT Gateways
def validateNATGateways(self):
for artefact in self.okit_json.get('nat_gateways', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Network Security Groups
def validateNetworkSecurityGroups(self):
for artefact in self.okit_json.get('network_security_groups', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Object Storage
def validateObjectStorageBuckets(self):
for artefact in self.okit_json.get('object_storage_buckets', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Remote Peering Connection
def validateRemotePeeringConnections(self):
for artefact in self.okit_json.get('remote_peering_connections', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
if artefact['drg_id'] is None or artefact['drg_id'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Remote Peering Connection',
'artefact': artefact['display_name'],
'message': 'DRG must be specified.',
'element': 'drg_id'
}
self.results['errors'].append(error)
# Route Tables
def validateRouteTables(self):
for resource in self.okit_json.get('route_tables', []):
logger.info('Validating {!s}'.format(resource['display_name']))
if len(resource['route_rules']) == 0:
warning = {
'id': resource['id'],
'type': 'Route Table',
'artefact': resource['display_name'],
'message': 'No Rules have been specified.',
'element': 'route_rules'
}
self.results['warnings'].append(warning)
else:
for rule in resource['route_rules']:
if rule['network_entity_id'] == '':
error = {
'id': resource['id'],
'type': 'Route Table',
'artefact': resource['display_name'],
'message': f'Network Entity has not be specified for {" ".join(rule["target_type"].split("_")).title()} rule.',
'element': 'route_rules'
}
self.results['errors'].append(error)
defaults = [resource.get('default', False) and r.get('default', False) and r['id'] != resource['id'] and r['vcn_id'] == resource['vcn_id'] for r in self.okit_json.get('route_tables', [])]
if any(defaults):
error = {
'id': resource['id'],
'type': 'Route Table',
'artefact': resource['display_name'],
'message': f'Multiple Route Tables specified as default {" ".join([r["display_name"] for r in self.okit_json.get("route_tables", []) if r.get("default", False) and r["id"] != resource["id"] and r["vcn_id"] != resource["vcn_id"]])}.',
'element': 'default'
}
self.results['errors'].append(error)
# Security Lists
def validateSecurityLists(self):
for resource in self.okit_json.get('security_lists', []):
logger.info('Validating {!s}'.format(resource['display_name']))
if len(resource['egress_security_rules']) == 0:
warning = {
'id': resource['id'],
'type': 'Security List',
'artefact': resource['display_name'],
'message': 'No Egress Rules have been specified.',
'element': 'egress_security_rules'
}
self.results['warnings'].append(warning)
if len(resource['ingress_security_rules']) == 0:
warning = {
'id': resource['id'],
'type': 'Security List',
'artefact': resource['display_name'],
'message': 'No Ingress Rules have been specified.',
'element': 'ingress_security_rules'
}
self.results['warnings'].append(warning)
defaults = [resource.get('default', False) and r.get('default', False) and r['id'] != resource['id'] and r['vcn_id'] == resource['vcn_id'] for r in self.okit_json.get('security_lists', [])]
if any(defaults):
error = {
'id': resource['id'],
'type': 'Security List',
'artefact': resource['display_name'],
'message': f'Multiple Security Lists specified as default {" ".join([r["display_name"] for r in self.okit_json.get("security_lists", []) if r.get("default", False) and r["id"] != resource["id"] and r["vcn_id"] != resource["vcn_id"]])}.',
'element': 'default'
}
self.results['errors'].append(error)
# Service Gateways
def validateServiceGateways(self):
for artefact in self.okit_json.get('service_gateways', []):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Subnets
def validateSubnets(self):
vcn_cidr_map = {}
for vcn in self.okit_json.get('virtual_cloud_networks', []):
vcn_cidr_map[vcn['id']] = vcn['cidr_blocks']
for artefact in sorted(self.okit_json.get('subnets', []), key=lambda k: k['vcn_id']):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Check Connected to a VCN
if artefact['vcn_id'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Subnet',
'artefact': artefact['display_name'],
'message': 'Subnet is not part of a VCN.',
'element': 'vcn_id'
}
self.results['errors'].append(error)
# Check that CIDR exists
if artefact['cidr_block'] == '':
self.valid = False
error = {
'id': artefact['id'],
'type': 'Subnet',
'artefact': artefact['display_name'],
'message': 'Subnet does not have a CIDR.',
'element': 'cidr_block'
}
self.results['errors'].append(error)
else:
# Check if part of VCN CIDR
if not self.subnet_of(vcn_cidr_map[artefact['vcn_id']], artefact['cidr_block']):
self.valid = False
error = {
'id': artefact['id'],
'type': 'Subnet',
'artefact': artefact['display_name'],
'message': 'Subnet CIDR {!s} is not part of VCN CIDR {!s}.'.format(artefact['cidr_block'],
vcn_cidr_map[artefact['vcn_id']]),
'element': 'cidr_block'
}
self.results['errors'].append(error)
# Check for Subnet Overlap
for other in [s for s in self.okit_json.get('subnets', []) if s['vcn_id'] == artefact['vcn_id'] and s['id'] != artefact['id']]:
if other['cidr_block'] != '' and self.overlaps(artefact['cidr_block'], other['cidr_block']):
self.valid = False
error = {
'id': artefact['id'],
'type': 'Subnet',
'artefact': artefact['display_name'],
'message': 'Subnet CIDR {!s} overlaps Subnet {!s} CIDR {!s}.'.format(artefact['cidr_block'],
other['display_name'],
other['cidr_block']),
'element': 'cidr_block'
}
self.results['errors'].append(error)
# Check Route Table
if (artefact['route_table_id'] == ''):
warning = {
'id': artefact['id'],
'type': 'Subnet',
'artefact': artefact['display_name'],
'message': 'Subnet has no Route Table Assigned.',
'element': 'route_table_id'
}
self.results['warnings'].append(warning)
# Check Security Lists
if (len(artefact['security_list_ids']) == 0):
warning = {
'id': artefact['id'],
'type': 'Subnet',
'artefact': artefact['display_name'],
'message': 'Subnet has no Security Lists Assigned.',
'element': 'security_list_ids'
}
self.results['warnings'].append(warning)
# Virtual Cloud Networks
def validateVirtualCloudNetworks(self):
for artefact in sorted(self.okit_json.get('virtual_cloud_networks', []), key=lambda k: k['compartment_id']):
logger.info('Validating {!s}'.format(artefact['display_name']))
# Check that CIDR exists
if len(artefact['cidr_blocks']) == 0:
self.valid = False
error = {
'id': artefact['id'],
'type': 'Virtual Cloud Network',
'artefact': artefact['display_name'],
'message': 'Virtual Cloud Network does not have a CIDR.',
'element': 'cidr_blocks'
}
self.results['errors'].append(error)
else:
# Check for CIDR Overlap
for other in [s for s in self.okit_json.get('virtual_cloud_networks', []) if s['compartment_id'] == artefact['compartment_id'] and s['id'] != artefact['id']]:
if len(other['cidr_blocks']) > 0:
for cidr_block in artefact['cidr_blocks']:
for other_cidr_block in other['cidr_blocks']:
if self.overlaps(cidr_block, other_cidr_block):
self.valid = False
error = {
'id': artefact['id'],
'type': 'Virtual Cloud Network',
'artefact': artefact['display_name'],
'message': 'VCN CIDR {!s} overlaps VCN {!s} CIDR {!s}.'.format(cidr_block, other['display_name'], other_cidr_block),
'element': 'cidr_blocks'
}
self.results['errors'].append(error)
# Network Methods
def subnet_of(self, supernets, subnet):
try:
return any([ipaddress.ip_network(subnet) in ipaddress.ip_network(supernet).subnets(new_prefix=int(subnet.split('/')[-1])) for supernet in supernets])
# return ipaddress.ip_network(subnet) in ipaddress.ip_network(supernet).subnets(new_prefix=int(subnet.split('/')[-1]))
except ValueError:
return False
def overlaps(self, subnet1, subnet2):
try:
return ipaddress.ip_network(subnet1).overlaps(ipaddress.ip_network(subnet2))
except ValueError:
return False
``` |
{
"source": "JhumanJ/aws_list_all",
"score": 2
} |
#### File: aws_list_all/aws_list_all/fixing_filter.py
```python
import json
import pprint
import boto3
from .client import get_client
class CountFilter:
def __init__(self, complete):
self.complete = complete
def execute(self, listing, response):
if 'Count' in response:
if 'MaxResults' in response:
if response['MaxResults'] <= response['Count']:
self.complete = False
del response['MaxResults']
del response['Count']
class QuantityFilter:
def __init__(self, complete):
self.complete = complete
def execute(self, listing, response):
if 'Quantity' in response:
if 'MaxItems' in response:
if response['MaxItems'] <= response['Quantity']:
self.complete = False
del response['MaxItems']
del response['Quantity']
class NeutralThingFilter:
def execute(self, listing, response):
for neutral_thing in ('MaxItems', 'MaxResults', 'Quantity'):
if neutral_thing in response:
del response[neutral_thing]
class BadThingFilter:
def __init__(self, complete):
self.complete = complete
def execute(self, listing, response):
for bad_thing in (
'hasMoreResults', 'IsTruncated', 'Truncated', 'HasMoreApplications', 'HasMoreDeliveryStreams',
'HasMoreStreams', 'NextToken', 'NextMarker', 'nextMarker', 'Marker'
):
if bad_thing in response:
if response[bad_thing]:
self.complete = False
del response[bad_thing]
class NextTokenFilter:
def __init__(self, complete):
self.complete = complete
def execute(self, listing, response):
# interpret nextToken in several services
if (listing.service, listing.operation) in (('inspector', 'ListFindings'), ('logs', 'DescribeLogGroups')):
if response.get('nextToken'):
self.complete = False
del response['nextToken']
``` |
{
"source": "jhumphry/pyxact",
"score": 3
} |
#### File: doc/examples/example_hooks.py
```python
import decimal
import random
import sys
from pyxact import fields, queries, records, transactions
from pyxact import loggingdb
import example_schema, utils
class ReverseTransaction(example_schema.AccountingTransaction):
def _post_select_hook(self, context, cursor):
super()._post_select_hook(context, cursor)
for i in self.journal_list:
i.amount = -i.amount
self.transaction.t_rev = True
return True
# It is possible to subclass AccountingTransaction from the example_schema module and change the
# post_select_hook() method which normalizes the data after it has been read in. In this case, it
# flips the sign of all of the journal amounts. It will inherit all of the SQLField, SQLTable and
# SQLRecordList attributes from the base class.
SUM_ACCOUNT_QUERY = '''
SELECT SUM(amount)
FROM {accounting.journals}
WHERE account = {account};'''
class SumAccountQuery(queries.SQLQuery, query=SUM_ACCOUNT_QUERY):
account = fields.IntField()
class SetAccountTotal(example_schema.AccountingTransaction):
account = fields.IntField()
desired_sum = fields.NumericField(precision=8, scale=2, allow_floats=True)
current_sum = fields.NumericField(precision=8, scale=2, allow_floats=True,
inexact_quantize=True, query=SumAccountQuery)
def _pre_insert_hook(self, context, cursor):
super()._pre_insert_hook(context, cursor)
if self.transaction is None:
self.transaction = example_schema.TransactionTable()
if self.journal_list is None:
self.journal_list = example_schema.JournalList()
self.transaction.narrative = 'Adjusting account {} to {}'.format(self.account,
self.desired_sum)
self.transaction.creator = 'SYS'
self.transaction.t_rev = False
posting_needed = self.desired_sum - self.current_sum
self.journal_list._clear()
self.journal_list._append(example_schema.JournalTable(account=self.account, amount=posting_needed))
self.journal_list._append(example_schema.JournalTable(account=9999, amount=-posting_needed))
return True
# This more complicated subclass of AccountingTransaction shows how a hook can be used to
# automatically regenerate some parts of the transaction based on other parts. Here the journals are
# generated based on context fields provided by the user and a query refreshed from the database,
# along with the timestamp, tid and row_id fields that are set up to be automatically completed by
# the standard operation of AccountingTransaction
# Note that the current_sum field has 'inexact_quantize' activated. SQLite does not really support
# NUMERIC. It stores values as an integer, binary float or text as necessary to avoid losing ,
# precision but it cannot do decimal fixed-point arithmetic, so uses binary floating-point which
# will not always exactly quantize down to two decimal places. The 'inexact_quantize' parameter
# tells the NumericField to silently discard the excess decimal places without complaining.
def generate_transactions(cursor, n=100):
'''Add random transactions to the example schema'''
trans_details = example_schema.TransactionTable(creator='AAA',
t_rev=False)
trans_journals = example_schema.JournalList(example_schema.JournalTable(),
example_schema.JournalTable(),
)
tmp_transaction = example_schema.AccountingTransaction(transaction=trans_details,
journal_list=trans_journals)
for i in range(0,n):
tmp_transaction.transaction.narrative='Random transaction '+str(i)
x = random.randint(-500, 500)
z = decimal.Decimal(x).scaleb(-2)
tmp_transaction.journal_list[0].amount = z
tmp_transaction.journal_list[0].account = random.randint(1000,1020)
tmp_transaction.journal_list[1].amount = -z
tmp_transaction.journal_list[1].account = random.randint(1000,1020)
tmp_transaction._insert_new(cursor)
if __name__ == '__main__':
conn = utils.process_command_line('Demonstrate pyxact transactions with hooks')
cursor = conn.cursor()
example_schema.create_example_schema(cursor)
example_schema.populate_example_schema(cursor)
# Now we are going to select the first transaction created by populate_example_schema
rev_trans = ReverseTransaction(tid=1)
rev_trans._context_select(cursor)
# rev_trans will have been normalized at the end of the context_select - i.e. the sign of the
# amounts in the journals will have been flipped
rev_trans._insert_new(cursor)
# As insert_new rather than insert_existing has been used, updated values for tid and
# creation_ts will have been used and written back into rev_trans
original_trans = example_schema.AccountingTransaction(tid=1)
original_trans._context_select(cursor)
# rev_trans could be used directly, but this shows that the new data has hit the database
new_trans = example_schema.AccountingTransaction(tid=rev_trans.tid)
new_trans._context_select(cursor)
print('***Original transaction***\n')
print(original_trans)
print('***Reversed transaction***\n')
print(new_trans)
print('\nInserting 100 random transactions\n')
generate_transactions(cursor=cursor, n=100)
account_1010 = SumAccountQuery(account=1010)
account_1010._execute(cursor)
print('Account 1010 has total value: {}'.format(account_1010._result_singlevalue(cursor)))
print('(any spurious decimal places are due to the lack of support for true decimal arithmetic '
'on NUMERIC data types in SQLite)\n')
print('Adjusting to value 3.14\n')
set_account_1010 = SetAccountTotal(account=1010,
desired_sum=decimal.Decimal('3.14'))
set_account_1010._insert_new(cursor)
account_1010._execute(cursor)
print('Account 1010 has total value: {}\n'.format(account_1010._result_singlevalue(cursor)))
```
#### File: doc/examples/utils.py
```python
import argparse
import os
import sys
import sqlite3
try:
import psycopg2
POSTGRESQL_AVAILABLE = True
except:
POSTGRESQL_AVAILABLE = False
try:
import testing.postgresql
POSTGRESQL_TESTING = True
except:
POSTGRESQL_TESTING = False
from pyxact import dialects, loggingdb, loggingdb
import pyxact.psycopg2
DATABASE_USED = None
temp_pg=None
def process_command_line(description='Demonstrate pyxact'):
'''Process the command line arguments and return a functioning DB-API connection'''
global DATABASE_USED
global temp_pg
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--log', help='Dump SQL commands to a file before executing them',
action='store', default=None)
parser.add_argument('--postgresql', help='Whether to use PostgreSQL instead of SQLite',
action='store_true')
parser.add_argument('--database',
help='PostgreSQL database to use (default to a temporary db)',
action='store', default='#temp#')
parser.add_argument('--base_dir', help='Base directory for PostgreSQL temp db',
action='store', default=None)
parser.add_argument('--user', help='PostgreSQL user for upload',
action='store',
default=os.environ.get('USER', 'postgres'))
parser.add_argument('--password', help='PostgreSQL user password',
action='store', default='')
parser.add_argument('--host', help='PostgreSQL host (if using TCP/IP)',
action='store', default=None)
parser.add_argument('--port', help='PostgreSQL port (if required)',
action='store', type=int, default=5432)
args = parser.parse_args()
if args.postgresql:
if not POSTGRESQL_AVAILABLE:
raise RuntimeError('PostgreSQL support not available')
if args.database == '#temp#':
if not POSTGRESQL_AVAILABLE:
raise RuntimeError('testing.postgresql module not available')
temp_pg = testing.postgresql.Postgresql(base_dir=args.base_dir)
temp_pg.wait_booting()
connection = psycopg2.connect(**temp_pg.dsn())
elif args.host:
connection = psycopg2.connect(database=args.database,
user=args.user,
password=<PASSWORD>,
host=args.host,
port=args.post)
else:
connection = psycopg2.connect(database=args.database,
user=args.user,
password=args.password)
dialects.DefaultDialect = pyxact.psycopg2.Psycopg2Dialect
# By changing DefaultDialect we change the default SQL dialect used whenever no specific
# dialect parameter is passed to a relevant pyxact method.
DATABASE_USED = 'PostgreSQL'
else:
connection = sqlite3.connect(':memory:')
connection.execute('PRAGMA foreign_keys = ON;') # We need SQLite foreign key support
dialects.DefaultDialect = dialects.sqliteDialect
DATABASE_USED = 'SQLite'
if args.log:
if args.log == 'STDOUT':
conn = loggingdb.Connection(inner_connection=connection)
else:
conn = loggingdb.Connection(inner_connection=connection, log_file=open(args.log, 'a'))
else:
conn = connection
# pyxact.loggingdb is a facade that can save the SQL commands being executing and the parameters
# used to a file for use in debugging.
return conn
```
#### File: pyxact/pyxact/constraints.py
```python
from . import SQLSchemaBase, FKMatch, FKAction, ConstraintDeferrable, dialects
class SQLConstraint:
'''This abstract base class is the root of the class hierarchy for table
constraints.'''
def __init__(self, sql_name=None):
self.sql_name = sql_name
self.name = None
def __set_name__(self, owner, name):
self.name = name
if self.sql_name is None:
self.sql_name = name
def sql_ddl(self):
'''This method returns a string containing suitable SQL DDL
instructions (in the specified database dialect) that can be inserted
into a CREATE or ALTER TABLE command to apply the required table
constraint.'''
raise NotImplementedError
class CustomConstraint(SQLConstraint):
'''Due to the very varied capabilities of table constraints in different
databases, and the varied syntax for how they are to be specified, it may
not always be possible to use a specific SQLConstraint subclass to achieve
the desired effect. The CustomConstraint class takes a string of pre-formed
SQL to form the text of the constraint. The class user is responsible for
ensuring portability between databases, if desired.'''
def __init__(self, constraint_sql, **kwargs):
super().__init__(**kwargs)
self.constraint_sql = constraint_sql
def sql_ddl(self):
return 'CONSTRAINT '+self.sql_name+' '+self.constraint_sql
class CheckConstraint(SQLConstraint):
'''Check constraints take an SQL expression and ensure that holds for every
row in the table. The class user is responsible for ensuring portability
between databases, if desired.'''
def __init__(self, check_sql, **kwargs):
super().__init__(**kwargs)
self.check_sql = check_sql
def sql_ddl(self):
return 'CONSTRAINT '+self.sql_name+' CHECK ('+self.check_sql+')'
class ColumnsConstraint(SQLConstraint):
'''ColumnsConstraint is an abstract subclass of SQLConstraint which factors
out the commonalities between UNIQUE constraints and PRIMARY KEY
constraints.'''
def __init__(self, column_names, sql_options='', **kwargs):
super().__init__(**kwargs)
if isinstance(column_names, str):
self.column_names = (column_names,)
else:
self.column_names = column_names
self.sql_column_names = None # Will be filled out by SQLRecordMetaClass
self.sql_options = sql_options
def sql_ddl(self):
raise NotImplementedError
class UniqueConstraint(ColumnsConstraint):
'''This class is used to create UNIQUE constraints. Depending on the
database, this make automatically create an index covering the columns.'''
def sql_ddl(self):
result = 'CONSTRAINT' + self.sql_name + ' UNIQUE ('
result += ', '.join(self.sql_column_names)
result += ') ' + self.sql_options
return result
class PrimaryKeyConstraint(ColumnsConstraint):
'''This class is used to create PRIMARY KEY constraints. Depending on the
database, this make automatically create an index covering the columns.'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
def sql_ddl(self):
result = 'CONSTRAINT ' + self.sql_name + ' PRIMARY KEY ('
result += ', '.join(self.sql_column_names)
result += ') ' + self.sql_options
return result
class ForeignKeyConstraint(ColumnsConstraint):
'''This class is used to create FOREIGN KEY constraints. Depending on the
database, this make require the referenced columns to be indexed. If
sql_reference_names is None then it is assumed the referenced columns have
the same names as the columns in the current table.'''
def __init__(self, foreign_table, foreign_schema=None, sql_reference_names=None,
match=FKMatch.SIMPLE, on_update=FKAction.NO_ACTION, on_delete=FKAction.NO_ACTION,
deferrable=ConstraintDeferrable.DEFERRABLE_INITIALLY_DEFERRED, **kwargs):
super().__init__(**kwargs)
self.foreign_table = foreign_table
self.match = match
# Note that by default operations that affect FK constraints in other tables will not be
# permitted (as is usual in SQL) but that FK constraints will be deferred by default to the
# end of the transaction. This allows SQLTransaction to delete all rows in all tables
# associated with the transaction without having to first do a topological sort to work out
# what ordering of deletions will avoid spurious FK constraint errors.
self.on_update = on_update
self.on_delete = on_delete
self.deferrable = deferrable
# Note that we cannot type-check the foreign_schema to avoid circular dependencies
if foreign_schema is None:
self.foreign_schema = None
elif isinstance(foreign_schema, SQLSchemaBase):
self.foreign_schema = foreign_schema
else:
raise TypeError('foreign_schema must be an instance of pyxact.schemas.SQLSchema')
if isinstance(sql_reference_names, str):
self.sql_reference_names = (sql_reference_names,)
else:
self.sql_reference_names = sql_reference_names
# If sql_reference_names is None it will be over-ridden by the sql_column_names
# in SQLRecordMetaClass
def sql_ddl(self):
dialect = dialects.DefaultDialect
if self.foreign_schema is None:
foreign_table = self.foreign_table
else:
foreign_table = self.foreign_schema.qualified_name(self.foreign_table)
result = 'CONSTRAINT ' + self.sql_name + ' FOREIGN KEY ('
result += ', '.join(self.sql_column_names)
result += ') REFERENCES ' + foreign_table + ' ('
result += ', '.join(self.sql_reference_names)
result += ') '
result += dialect.foreign_key_match_sql[self.match] + ' '
result += 'ON DELETE ' + dialect.foreign_key_action_sql[self.on_delete] + ' '
result += 'ON UPDATE ' + dialect.foreign_key_action_sql[self.on_update] + ' '
result += dialect.constraint_deferrable_sql[self.deferrable] + ' '
result += self.sql_options
return result
```
#### File: pyxact/pyxact/dialects.py
```python
import datetime
import decimal
import enum
import re
from . import IsolationLevel, FKAction, FKMatch, ConstraintDeferrable
# These are the default SQL strings that correspond to enumerations in the package
ISOLATION_LEVEL_SQL = {IsolationLevel.MANUAL_TRANSACTIONS : 'ERROR',
IsolationLevel.READ_UNCOMMITTED : 'READ UNCOMMITTED',
IsolationLevel.READ_COMMITTED : 'READ COMMITTED',
IsolationLevel.REPEATABLE_READ : 'REPEATABLE READ',
IsolationLevel.SERIALIZABLE : 'SERIALIZABLE'}
FOREIGN_KEY_MATCH_SQL = {FKMatch.SIMPLE : 'MATCH SIMPLE',
FKMatch.PARTIAL : 'MATCH PARTIAL',
FKMatch.FULL : 'MATCH FULL'}
FOREIGN_KEY_ACTION_SQL = {FKAction.NO_ACTION : 'NO ACTION',
FKAction.RESTRICT : 'RESTRICT',
FKAction.CASCADE : 'CASCADE',
FKAction.SET_NULL : 'SET NULL',
FKAction.SET_DEFAULT : 'SET DEFAULT'}
CONSTRAINT_DEFERRABLE_SQL = {ConstraintDeferrable.NOT_DEFERRABLE : 'NOT DEFERRABLE',
ConstraintDeferrable.DEFERRABLE_INITIALLY_DEFERRED :
'DEFERRABLE INITIALLY DEFERRED',
ConstraintDeferrable.DEFERRABLE_INITIALLY_IMMEDIATE :
'DEFERRABLE INITIALLY IMMEDIATE'}
SCHEMA_SEPARATOR_REGEXP = re.compile(r'\{([^\}\.]+)\.([^\}\.]+)\}', re.UNICODE)
def convert_schema_sep(sql_text, separator='.'):
'''Find any instances of '{schema.obj}' in the sql_text parameter and
return a string using the given separator character 'schema.obj'. This is
used to emulate SQL schema on databases that don't really support them.'''
match = SCHEMA_SEPARATOR_REGEXP.search(sql_text)
result = ''
current_pos = 0
if match:
while match:
result += sql_text[current_pos:match.start()] + match[1] + separator + match[2]
current_pos = match.end()
match = SCHEMA_SEPARATOR_REGEXP.search(sql_text, match.end())
result += sql_text[current_pos:]
return result
class TransactionContext:
'''This is a small helper context manager class that allows the dialect.transaction method to
be used in a 'with' statement. A transaction will have been begun, and at the end of the 'with'
block or when an exception occurs the transaction will be committed or rolled-back as
appropriate.
This can also be used as an async context manager. This will assume that the cursor provided
has coroutines for its cursor.execute method rather than regular methods.'''
def __init__(self, cursor,
on_entry='BEGIN TRANSACTION;',
on_success='COMMIT;',
on_exception='ROLLBACK;'):
self.cursor = cursor
self.on_entry = on_entry
self.on_success = on_success
self.on_exception = on_exception
def __enter__(self):
if self.on_entry:
self.cursor.execute(self.on_entry)
return self
async def __aenter__(self):
if self.on_entry:
await self.cursor.execute(self.on_entry)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
if self.on_success:
self.cursor.execute(self.on_success)
else:
if self.on_exception:
self.cursor.execute(self.on_exception)
return False
async def __aexit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
if self.on_success:
await self.cursor.execute(self.on_success)
else:
if self.on_exception:
await self.cursor.execute(self.on_exception)
return False
class SQLDialect:
'''This is an abstract base class from which concrete dialect classes should be derived.'''
@classmethod
def parameter(cls, number=1, start=1):
'''Return a string that represents a parameter placeholder in the query strings in the
format required by the database adaptor.'''
return '?, '*(number-1) + '?'
@classmethod
def parameter_values(cls, names, start=1, concat=','):
'''Return a string of the pattern 'name1=$1, name2=$2' etc. for the names contained in the
list 'names', starting with parameter number 'start' (where appropriate). The 'concat'
parameter is used to separate the pairs.'''
result = ''
for name in names[:-1]:
result += name + '=? ' + concat + ' '
return result + names[-1]+'=?'
schema_support = True
store_decimal_as_text = False
store_date_time_datetime_as_text = False
enum_support = False
foreign_key_match_sql = FOREIGN_KEY_MATCH_SQL
foreign_key_action_sql = FOREIGN_KEY_ACTION_SQL
constraint_deferrable_sql = CONSTRAINT_DEFERRABLE_SQL
truncate_table_sql = '''TRUNCATE TABLE {table_name};'''
truncate_table_cascade_sql = '''TRUNCATE TABLE {table_name} CASCADE;'''
create_sequence_sql = ('',)
nextval_sequence_sql = ('',)
reset_sequence_sql = ('',)
create_view_sql = '''CREATE VIEW IF NOT EXISTS'''
index_specifies_schema = True
@classmethod
def sql_repr(cls, value):
'''This method returns the value in the form expected by the particular
database and database adaptor specified by the dialect parameter. It
exists to handle cases where the database adaptor cannot accept the
Python type being used - for example while SQL NUMERIC types map quite
well to Python decimal.Decimal types, the sqlite3 database adaptor does
not recognise them, so string values must be stored.'''
return value
@classmethod
def begin_transaction(cls, cursor, isolation_level=None):
'''This method starts a new transaction using the database cursor and the (optional)
isolation level specified, which should be one of the IsolationLevel enum values. It
returns a context manager so must be used in a 'with' statement.'''
raise NotImplementedError
@classmethod
def commit_transaction(cls, cursor, isolation_level=None):
'''This method commits a transaction using the database cursor. The isolation level can be
specified in order to cover cases where MANUAL_TRANSACTIONS (i.e. no automatic management)
is desired.'''
raise NotImplementedError
@classmethod
def rollback_transaction(cls, cursor, isolation_level=None):
'''This method rolls back a transaction using the database cursor. The isolation level can
be specified in order to cover cases where MANUAL_TRANSACTIONS (i.e. no automatic
management) is desired.'''
raise NotImplementedError
@staticmethod
def create_enum_type(cursor, py_type, sql_name, sql_schema=None):
'''Create an enum type in the database for the given py_type under the name sql_name
in the sql_schema (if given).'''
raise NotImplementedError
class sqliteDialect(SQLDialect):
'''This class contains information used internally to generate suitable SQL
for use with the standard library interface to SQLite3, the embedded
database engine that usually comes supplied with Python.'''
schema_support = False
store_decimal_as_text = False
store_date_time_datetime_as_text = True
enum_support = False
foreign_key_match_sql = FOREIGN_KEY_MATCH_SQL
foreign_key_action_sql = FOREIGN_KEY_ACTION_SQL
constraint_deferrable_sql = CONSTRAINT_DEFERRABLE_SQL
truncate_table_sql = '''DELETE FROM {table_name};'''
truncate_table_cascade_sql = truncate_table_sql
create_sequence_sql = ('''
CREATE TABLE IF NOT EXISTS {qualified_name} (start {index_type},
interval {index_type},
lastval {index_type},
nextval {index_type});''',
'''INSERT INTO {qualified_name} VALUES '''
'''({start},{interval},{start},{start});''')
nextval_sequence_sql = ('''UPDATE {qualified_name} SET lastval=nextval, '''
'''nextval=nextval+interval;''',
'''SELECT lastval FROM {qualified_name};''')
reset_sequence_sql = ('''UPDATE {qualified_name} SET lastval=start, nextval=start;''',)
create_view_sql = '''CREATE VIEW IF NOT EXISTS'''
index_specifies_schema = True
@classmethod
def sql_repr(cls, value):
if isinstance(value, bool):
return 1 if value else 0
if isinstance(value, (int, float, str, bytes)) or value is None:
return value
if isinstance(value, decimal.Decimal):
return str(value)
if isinstance(value, datetime.datetime):
if value.tzinfo:
return value.strftime('%Y-%m-%dT%H:%M:%S.%f%z')
return value.strftime('%Y-%m-%dT%H:%M:%S.%f')
if isinstance(value, datetime.date):
return value.strftime('%Y-%m-%d')
if isinstance(value, datetime.time):
return value.strftime('%H:%M:%S.%f')
if isinstance(value, enum.Enum):
return value.value
raise TypeError('sqlite3 Python module cannot handle type {}'.format(str(type(value))))
@classmethod
def begin_transaction(cls, cursor, isolation_level=None):
if isolation_level == IsolationLevel.MANUAL_TRANSACTIONS:
return TransactionContext(cursor, None, None, None)
# Note that while SQLite does support READ_UNCOMMITTED, it is a per-session pragma and not
# per-transaction, which makes it harder to use reliably. We always leave the setting on
# the default, which is the maximalist SERIALIZABLE isolation level.
return TransactionContext(cursor, 'BEGIN TRANSACTION;', 'COMMIT;', 'ROLLBACK;')
@classmethod
def commit_transaction(cls, cursor, isolation_level=None):
if isolation_level != IsolationLevel.MANUAL_TRANSACTIONS:
cursor.execute('COMMIT;')
@classmethod
def rollback_transaction(cls, cursor, isolation_level=None):
if isolation_level != IsolationLevel.MANUAL_TRANSACTIONS:
cursor.execute('ROLLBACK;')
@staticmethod
def create_enum_type(cursor, py_type, sql_name, sql_schema=None):
'''Enum are not supported natively in SQLite, so nothing is necessary to create them.'''
# This will be used by routines when no dialect is specified. It is not a
# constant as it is intended that it may be over-ridden by package users
DefaultDialect = sqliteDialect
```
#### File: pyxact/pyxact/hints.py
```python
from . import dialects
class HV():
'''This is a simple wrapper for strings that are to be passed to SQLField types to indicate that
the string should be looked up via the SQLField's associated hint, rather than stored directly
in the field.'''
__slots__ = ['contents',]
def __init__(self, contents):
self.contents = contents
def __repr__(self):
return self.contents
class Hint():
'''This class contains an SQL SELECT query string (without trailing ';') that returns a
two-column 'key' and 'value' result. This can be used to provide mappings between id columns
used in the database and human-readable values. It provides methods to find the key (id column
value) associated with a given value (human-readable value), get the value associated with a
key, and to list the values.'''
__slots__ = ['query', 'query_noschema']
def __init__(self, query):
self.query = dialects.convert_schema_sep(query, '.')
self.query_noschema = dialects.convert_schema_sep(query, '_')
def get(self, key, cursor):
'''Return the human-readable value associated with the key value, or return None.'''
dialect = dialects.DefaultDialect
base_query = (self.query if dialect.schema_support
else self.query_noschema)
query = 'SELECT value FROM (' + base_query + ') WHERE '
query += dialect.parameter_values(('key',))
query += ';'
cursor.execute(query, (dialect.sql_repr(key),))
result = cursor.fetchone()
if result:
return result[0]
return None
def find(self, value, cursor):
'''Find the key value associated with the human-readable value, or return None if this
fails.'''
dialect = dialects.DefaultDialect
base_query = (self.query if dialect.schema_support
else self.query_noschema)
query = 'SELECT key FROM (' + base_query + ') WHERE '
query += dialect.parameter_values(('value',))
query += ';'
cursor.execute(query, (dialect.sql_repr(value),))
result = cursor.fetchone()
if result:
return result[0]
return None
def list_values(self, cursor):
'''A generator expression that yields all of the human-readable values in turn.'''
dialect = dialects.DefaultDialect
base_query = (self.query if dialect.schema_support
else self.query_noschema)
query = 'SELECT value FROM (' + base_query + ');'
cursor.execute(query)
next_row = cursor.fetchone()
while next_row:
yield next_row[0]
next_row = cursor.fetchone()
```
#### File: pyxact/pyxact/loggingdb.py
```python
import sys
class Cursor:
'''A database cursor facade that implements a subset of DB-API methods and outputs information
on the requests to a file or stdout.'''
def __init__(self, inner_cursor, log_file=sys.stdout):
self.inner_cursor = inner_cursor
self.log_file = log_file
self.context = None
def __enter__(self):
self.context = self.inner_cursor.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return self.context.__exit__(exc_type, exc_val, exc_tb)
def execute(self, sql, params=None):
'''Log a request to execute some SQL with given parameters'''
self.log_file.write("Executed SQL: '{}' with params '{}'\n"
.format(sql, repr(params)))
self.log_file.flush()
if params:
return self.inner_cursor.execute(sql, params)
return self.inner_cursor.execute(sql)
def executemany(self, sql, params=None):
'''Log a request to execute some SQL with multiple sets of parameters'''
self.log_file.write("Executed SQL: '{}' with params:\n"
.format(sql))
self.log_file.flush()
for i in params:
self.log_file.write(repr(i))
self.log_file.write('\n')
self.log_file.write('\n')
return self.inner_cursor.executemany(sql, params)
def fetchone(self):
'''Log a request to return a single result row'''
result = self.inner_cursor.fetchone()
self.log_file.write("Fetched a row:\n")
self.log_file.write(str(result))
self.log_file.write("\n")
return result
def fetchmany(self, size):
'''Log a request to return many result rows'''
result = self.inner_cursor.fetchmany(size)
self.log_file.write("Fetched {} rows\n".format(len(result)))
return result
def fetchall(self):
'''Log a request to return a result'''
result = self.inner_cursor.fetchall()
self.log_file.write("Fetched {} rows\n".format(len(result)))
return result
def copy_from(self, file, table, sep='\t', null='\\N', size=8192, columns=None):
'''Log a request to execute a COPY command to upload bulk data. This is a
Postgresql/psycopg-specific command'''
self.log_file.write("Executed a COPY from file '{}' to table: '{}'"
" with params {}\n"
.format(file.name,
table,
repr((sep, null, size, columns)))
)
self.inner_cursor.copy_from(self, file, table, sep, null, size, columns)
def copy_expert(self, sql, file, size=8192):
'''Log a request to execute a COPY command to upload bulk data. This is a
Postgresql/psycopg-specific command'''
self.log_file.write("Executed a COPY from file '{}' using SQL: '{}'"
" with size '{}'\n"
.format(file.name,
sql,
repr(size))
)
self.inner_cursor.copy_expert(sql, file, size)
def close(self):
'''Close the dummy database cursor object. Does not close the associated output file.'''
self.inner_cursor.close()
class Connection:
'''A database connection facade that implements a subset of DB-API methods and outputs
information on the requests to a file or stdout.'''
def __init__(self, inner_connection, log_file=sys.stdout):
self.inner_connection = inner_connection
self.log_file = log_file
self.log_file.write("***New Log Started***\n\n")
self._autocommit = False
def set_autocommit(self, value):
'''Log an attempt to change the autocommit mode of the mock database connection object'''
self._autocommit = value
self.log_file.write("Set autocommit status to: {}\n".format(value))
self.log_file.flush()
self.inner_connection.set_autocommit(value)
autocommit = property(fset=set_autocommit)
def cursor(self):
'''Create a dummy cursor which uses the same output file.'''
return Cursor(self.inner_connection.cursor(), self.log_file)
def commit(self):
'''Log a request to commit a transaction.'''
self.log_file.write("Committed transaction\n")
self.log_file.flush()
self.inner_connection.commit()
def close(self):
'''Close the database facade. Also closes the file associated with the object unless that
is sys.stdout.'''
self.log_file.write("Closing connection\n\n")
self.log_file.flush()
if self.log_file != sys.stdout:
self.log_file.close()
self.inner_connection.close()
def execute(self, sql, params=None):
'''Log a request to execute some SQL with given parameters'''
self.log_file.write("Executed SQL: '{}' with params '{}'\n"
.format(sql, repr(params)))
self.log_file.flush()
if params:
self.inner_connection.execute(sql, params)
else:
self.inner_connection.execute(sql)
```
#### File: pyxact/pyxact/psycopg2.py
```python
import enum
from . import dialects
from . import IsolationLevel
class Psycopg2Dialect(dialects.SQLDialect):
'''This is a singleton class that defines the variant of SQL supported by PostgreSQL and the
pyscopg2 database adaptor.'''
@classmethod
def parameter(cls, number=1, start=1):
return '%s, '*(number-1) + '%s'
@classmethod
def parameter_values(cls, names: list, start=1, concat=','):
result = ''
for name in names[:-1]:
result += name + '=%s ' + concat + ' '
return result + names[-1]+'=%s'
schema_support = True
store_decimal_as_text = False
store_date_time_datetime_as_text = False
enum_support = True
foreign_key_match_sql = dialects.FOREIGN_KEY_MATCH_SQL
foreign_key_action_sql = dialects.FOREIGN_KEY_ACTION_SQL
constraint_deferrable_sql = dialects.CONSTRAINT_DEFERRABLE_SQL
truncate_table_sql = '''TRUNCATE TABLE {table_name} RESTRICT;'''
truncate_table_cascade_sql = '''TRUNCATE TABLE {table_name} CASCADE;'''
create_sequence_sql = ('''CREATE SEQUENCE IF NOT EXISTS {qualified_name} AS {index_type} '''
'''START {start}''',)
nextval_sequence_sql = ('''SELECT nextval('{qualified_name}');''',)
reset_sequence_sql = ('''ALTER SEQUENCE {qualified_name} RESTART''',)
create_view_sql = '''CREATE OR REPLACE VIEW'''
index_specifies_schema = False
@classmethod
def sql_repr(cls, value):
'''This method returns the value in the form expected by the particular database and
database adaptor specified by the dialect parameter. The psycopg2 adaptor handles most
Python types transparently, so this function does not have to do anything.'''
if isinstance(value, enum.Enum):
return value.name
return value
@classmethod
def begin_transaction(cls, cursor, isolation_level=None):
if isolation_level == IsolationLevel.MANUAL_TRANSACTIONS:
return dialects.TransactionContext(cursor, None, None)
if isolation_level:
cmd = 'BEGIN TRANSACTION ISOLATION MODE '
cmd += dialects.ISOLATION_LEVEL_SQL[isolation_level]
cmd += ';'
else:
cmd = 'BEGIN TRANSACTION;'
return dialects.TransactionContext(cursor, cmd, 'COMMIT;', 'ROLLBACK;')
@classmethod
def commit_transaction(cls, cursor, isolation_level=None):
if isolation_level != IsolationLevel.MANUAL_TRANSACTIONS:
cursor.execute('COMMIT;')
@classmethod
def rollback_transaction(cls, cursor, isolation_level=None):
if isolation_level != IsolationLevel.MANUAL_TRANSACTIONS:
cursor.execute('ROLLBACK;')
@staticmethod
def create_enum_type(cursor, py_type, sql_name, sql_schema=None):
'''Create an enum type in the PostgreSQL database for the given py_type under the name
sql_name in the sql_schema (if given).'''
if sql_schema:
qual_sql_name = sql_schema + '.' + sql_name
else:
qual_sql_name = sql_name
enum_values = ', '.join(["'" + x.name + "'" for x in py_type])
# Using 'DROP TYPE IF EXISTS ... CASCADE' does not work here, because it will silently alter
# existing tables to remove columns using the enum.
cursor.execute('''DO $$ BEGIN
CREATE TYPE {} AS ENUM ({});
EXCEPTION
WHEN duplicate_object THEN null;
END $$;'''.format(qual_sql_name, enum_values))
``` |
{
"source": "jhumphry/regressions",
"score": 4
} |
#### File: regressions/examples/kpls_example.py
```python
import numpy as np
import matplotlib.pyplot as plt
from regressions import kernel_pls, kernels
def z(x):
'''Example function'''
return 4.26 * (np.exp(-x) - 4 * np.exp(-2.0*x) + 3 * np.exp(-3.0*x))
# Define the kernel
kern = kernels.make_gaussian_kernel(width=1.8)
# Create sample data
x_values = np.linspace(0.0, 3.5, 100)
z_pure = z(x_values)
z_pure -= z_pure.mean(0) # Ensure z_pure is centered
noise = np.random.normal(loc=0.0, scale=0.2, size=100)
z_noisy = z_pure + noise
z_noisy -= z_noisy.mean(0) # Ensure z_noisy is centered
# Perform Kernel PLS
kpls_1 = kernel_pls.Kernel_PLS(X=x_values,
Y=z_noisy,
g=1,
X_kernel=kern)
kpls_1_results = kpls_1.prediction(x_values)
kpls_4 = kernel_pls.Kernel_PLS(X=x_values,
Y=z_noisy,
g=4,
X_kernel=kern)
kpls_4_results = kpls_4.prediction(x_values)
kpls_8 = kernel_pls.Kernel_PLS(X=x_values,
Y=z_noisy,
g=8,
X_kernel=kern)
kpls_8_results = kpls_8.prediction(x_values)
# Plot the results of the above calculations
fig = plt.figure('An example of Kernel PLS regression')
plt.title('An example of Kernel PLS regression')
plt.plot(x_values, z_pure, 'k-', label='$z(.)$')
plt.plot(x_values, z_noisy, 'k+', label='$z(.)$ with noise')
plt.plot(x_values, kpls_1_results, 'k--', label='KPLS 1C')
plt.plot(x_values, kpls_4_results, 'k:', label='KPLS 4C')
plt.plot(x_values, kpls_8_results, 'k-.', label='KPLS 8C')
plt.legend(loc=4)
plt.show()
fig.clear()
# Plot some of the extracted components
# These figures plot the underlying function based on 100 (xi, z(xi)) pairs
# as a dotted line in the original problem space. The component extracted
# is a single vector in the 100-dimensional transformed feature space. Each
# dimension in feature space corresponds to a K(?, xi) kernel function. As
# the kernel in this case is the Gaussian kernel which is spacially
# localised, it is workable to map each K(?, xi) function to the
# x-cordinate xi for display in this manner. In the general case,
# meaningfully plotting the components in kernel space is likely to be
# difficult.
fig = plt.figure('Components found in Kernel PLS regression')
fig.set_tight_layout(True)
for i in range(0, 8):
plt.subplot(4, 2, (i+1))
plt.title('Kernel PLS component {}'.format((i+1)))
plt.plot(x_values, z_pure, 'k--')
plt.plot(x_values, kpls_8.P[:, i], 'k-')
plt.gca().set_ybound(lower=-1.5, upper=1.0)
plt.show()
fig.clear()
```
#### File: regressions/regressions/cls.py
```python
from . import *
class CLS(RegressionBase):
"""Classical Least Squares Regression
The classical least squares regression approach is to initially swap the
roles of the X and Y variables, perform linear regression and then to
invert the result. It is useful when the number of X variables is larger
than the number of calibration samples available, when conventional
multiple linear regression would be unable to proceed.
Note :
The regression matrix A_pinv is found using the pseudo-inverse. In
order for this to be calculable, the number of calibration samples
``N`` has be be larger than the number of Y variables ``m``, the
number of X variables ``n`` must at least equal the number of Y
variables, there must not be any collinearities in the calibration Y
data and Yt X must be non-singular.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
Attributes:
A (ndarray m x n): Resulting regression matrix of X on Y
A_pinv (ndarray m x n): Pseudo-inverse of A
"""
def __init__(self, X, Y):
Xc, Yc = super()._prepare_data(X, Y)
if Yc.shape[0] <= Yc.shape[1]:
raise ParameterError('CLS requires more rows (data samples) than '
'output variables (columns of Y data)')
if Xc.shape[1] < Yc.shape[1]:
raise ParameterError('CLS requires at least as input variables '
'(columns of X data) as output variables '
'(columns of Y data)')
self.A = linalg.inv(Yc.T @ Yc) @ Yc.T @ Xc
self.A_pinv = self.A.T @ linalg.inv(self.A @ self.A.T)
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
return self.Y_offset + (Z - self.X_offset) @ self.A_pinv
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
for i in range(0, Z.shape[0]):
result[i, :] = self.Y_offset + (Z[i, :] - self.X_offset) \
@ self.A_pinv
return result
```
#### File: regressions/regressions/kernel_pls.py
```python
import random
from . import *
class Kernel_PLS(RegressionBase):
"""Non-linear Kernel PLS regression using the PLS2 algorithm
This class implements kernel PLS regression by transforming the input
X data into feature space by applying a kernel function between each
pair of inputs. The kernel function provided will be called with two
vectors and should return a float. Kernels should be symmetrical with
regard to the order in which the vectors are supplied. The PLS2
algorithm is then applied to the transformed data. The application of
the kernel function means that non-linear transformations are
possible.
Note:
If ``ignore_failures`` is ``True`` then the resulting object
may have fewer components than requested if convergence does
not succeed.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
X_kernel (function): Kernel function
max_iterations (int, optional) : Maximum number of iterations of
NIPALS to attempt
iteration_convergence (float, optional): Difference in norm
between two iterations at which point the iteration will be
considered to have converged.
ignore_failures (boolean, optional): Do not raise an error if
iteration has to be abandoned before the requested number
of components have been recovered
Attributes:
components (int): number of components extracted (=g)
X_training_set (ndarray N x n): X calibration data (centred)
K (ndarray N x N): X calibration data transformed into feature space
P (ndarray n x g): Loadings on K (Components extracted from data)
Q (ndarray m x g): Loadings on Y (Components extracted from data)
T (ndarray N x g): Scores on K
U (ndarray N x g): Scores on Y
B_RHS (ndarray n x m): Partial regression matrix
"""
def __init__(self, X, Y, g, X_kernel,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
Xc, Yc = super()._prepare_data(X, Y)
self.X_training_set = Xc
self.X_kernel = X_kernel
K = np.empty((self.data_samples, self.data_samples))
for i in range(0, self.data_samples):
for j in range(0, i):
K[i, j] = X_kernel(Xc[i, :], Xc[j, :])
K[j, i] = K[i, j]
K[i, i] = X_kernel(Xc[i, :], Xc[i, :])
centralizer = (np.identity(self.data_samples)) - \
(1.0 / self.data_samples) * \
np.ones((self.data_samples, self.data_samples))
K = centralizer @ K @ centralizer
self.K = K
T = np.empty((self.data_samples, g))
Q = np.empty((self.Y_variables, g))
U = np.empty((self.data_samples, g))
P = np.empty((self.data_samples, g))
self.components = 0
K_j = K
Y_j = Yc
for j in range(0, g):
u_j = Y_j[:, random.randint(0, self.Y_variables-1)]
iteration_count = 0
iteration_change = iteration_convergence * 10.0
while iteration_count < max_iterations and \
iteration_change > iteration_convergence:
w_j = K_j @ u_j
t_j = w_j / np.linalg.norm(w_j, 2)
q_j = Y_j.T @ t_j
old_u_j = u_j
u_j = Y_j @ q_j
u_j /= np.linalg.norm(u_j, 2)
iteration_change = linalg.norm(u_j - old_u_j)
iteration_count += 1
if iteration_count >= max_iterations:
if ignore_failures:
break
else:
raise ConvergenceError('PLS2 failed to converge for '
'component: '
'{}'.format(self.components+1))
T[:, j] = t_j
Q[:, j] = q_j
U[:, j] = u_j
P[:, j] = (K_j.T @ w_j) / (w_j @ w_j)
deflator = (np.identity(self.data_samples) - np.outer(t_j.T, t_j))
K_j = deflator @ K_j @ deflator
Y_j = Y_j - np.outer(t_j, q_j.T)
self.components += 1
# If iteration stopped early because of failed convergence, only
# the actual components will be copied
self.T = T[:, 0:self.components]
self.Q = Q[:, 0:self.components]
self.U = U[:, 0:self.components]
self.P = P[:, 0:self.components]
self.B_RHS = self.U @ linalg.inv(self.T.T @ self.K @ self.U) @ self.Q.T
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. A one-dimensional array will be interpreted as
a single multi-dimensional input unless the number of X
variables in the calibration data was 1, in which case it
will be interpreted as a set of inputs. A two-dimensional
array will be interpreted as one multi-dimensional input
per row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if self.X_variables == 1:
Z = Z.reshape((Z.shape[0], 1))
Kt = np.empty((Z.shape[0], self.data_samples))
else:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the '
'same number of variables as the '
'original X data')
Z = Z.reshape((1, Z.shape[0]))
Kt = np.empty((1, self.data_samples))
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
Kt = np.empty((Z.shape[0], self.data_samples))
for i in range(0, Z.shape[0]):
for j in range(0, self.data_samples):
Kt[i, j] = self.X_kernel(Z[i, :] - self.X_offset,
self.X_training_set[j, :])
centralizer = (1.0 / self.data_samples) * \
np.ones((Z.shape[0], self.data_samples))
Kt = (Kt - centralizer @ self.K) @ \
(np.identity(self.data_samples) -
(1.0 / self.data_samples) * np.ones(self.data_samples))
# Fix centralisation - appears to be necessary but not usually
# mentioned in papers
Kt -= Kt.mean(0)
return self.Y_offset + Kt @ self.B_RHS
```
#### File: regressions/regressions/mlr.py
```python
from . import *
class MLR(RegressionBase):
"""Multiple Linear Regression
Standard multiple linear regression assumes the relationship between the
variables (once the means have been subtracted to center both variables)
is Y = A X + E where E is a vector of zero-mean noise vectors.
Note :
The regression matrix B is found using the pseudo-inverse. In
order for this to be calculable, the number of calibration samples
``N`` has be be larger than the number of X variables ``n``, and
there must not be any collinearities in the calibration X data.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
Attributes:
B (ndarray m x n): Resulting regression matrix
"""
def __init__(self, X, Y):
Xc, Yc = super()._prepare_data(X, Y)
if Xc.shape[0] <= Xc.shape[1]:
raise ParameterError('MLR requires more rows (data samples) than '
'input variables (columns of X data)')
self.B = linalg.inv(Xc.T @ Xc) @ Xc.T @ Yc
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
return self.Y_offset + (Z - self.X_offset) @ self.B
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
for i in range(0, Z.shape[0]):
result[i, :] = self.Y_offset + (Z[i, :] - self.X_offset) \
@ self.B
return result
```
#### File: regressions/regressions/pcr.py
```python
import random
from . import *
class PCR_NIPALS(RegressionBase):
"""Principal Components Regression using the NIPALS algorithm
PCR forms a set of new latent variables from the provided X data
samples which describe as much of the variance in the X data as
possible. The latent variables are then regressed against the provided
Y data. PCR is connected with Principal Components Analysis, where the
latent variables are referred to as Principal Components.
This class uses the Non-linear Iterative PArtial Least Squares
algorithm to extract the components. Either a fixed number of
components should be specified using the ``g`` argument, or a target
proportion of variation explained by the components should be
specified via ``variation_explained``. The variables of the X and Y
data can have their variances standardized. This is useful if they are
of heterogeneous types as otherwise the components extracted can be
dominated by the effects of different measurement scales rather than
by the actual data.
Note:
If ``ignore_failures`` is ``True`` then the resulting object
may have fewer components than requested if convergence does
not succeed.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
variation_explained (float): Proportion of variance in X
calibration data that the components extracted should explain
(from 0.001 - 0.999)
standardize_X (boolean, optional): Standardize the X data
standardize_Y (boolean, optional): Standardize the Y data
max_iterations (int, optional) : Maximum number of iterations of
NIPALS to attempt
iteration_convergence (float, optional): Difference in norm
between two iterations at which point the iteration will be
considered to have converged.
ignore_failures (boolean, optional): Do not raise an error if
iteration has to be abandoned before the requested number
of or coverage by components has been achieved.
Attributes:
components (int): number of components extracted (=g)
T (ndarray N x g): Scores
P (ndarray n x g): Loadings (Components extracted from data)
eigenvalues (ndarray g): Eigenvalues extracted
total_variation (float): Total variation in calibration X data
C (ndarray g x m): Regression coefficients
PgC (ndarray n x m): Precalculated matrix product of P (limited to
g components) and C
"""
def __init__(self, X, Y, g=None, variation_explained=None,
standardize_X=False, standardize_Y=False,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
if (g is None) == (variation_explained is None):
raise ParameterError('Must specify either the number of principal '
'components g to use or the proportion of '
'data variance that must be explained.')
if variation_explained is not None:
if variation_explained < 0.001 or\
variation_explained > 0.999:
raise ParameterError('PCR will not reliably be able to use '
'principal components that explain less '
'than 0.1% or more than 99.9% of the '
'variation in the data.')
Xc, Yc = super()._prepare_data(X, Y, standardize_X, standardize_Y)
if g is not None:
if g < 1 or g > self.max_rank:
raise ParameterError('Number of required components specified '
'is impossible.')
if standardize_X:
self.total_variation = self.X_variables * (self.data_samples - 1.0)
else:
self.total_variation = (Xc @ Xc.T).trace()
self._perform_pca(Xc, g, variation_explained,
max_iterations, iteration_convergence,
ignore_failures)
# Find regression parameters
self.Y_offset = Y.mean(0)
Yc = Y - self.Y_offset
if standardize_Y:
self.Y_scaling = Y.std(0, ddof=1)
Yc /= self.Y_scaling
else:
self.Y_scaling = None
self.C = np.diag(1.0 / self.eigenvalues) @ self.T.T @ Yc
self.PgC = self.P @ self.C
def _perform_pca(self, X, g=None, variation_explained=None,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
"""A non-public routine that performs the PCA using an appropriate
method and sets up self.T, self.P, self.eignvalues and
self.components."""
T = np.empty((self.data_samples, self.max_rank)) # Scores
P = np.empty((self.X_variables, self.max_rank)) # Loadings
eig = np.empty((self.max_rank,))
self.components = 0
X_j = X
while True:
t_j = X_j[:, random.randint(0, self.X_variables-1)]
iteration_count = 0
iteration_change = iteration_convergence * 10.0
while iteration_count < max_iterations and \
iteration_change > iteration_convergence:
p_j = X_j.T @ t_j
p_j /= np.linalg.norm(p_j, 2) # Normalise p_j vectors
old_t_j = t_j
t_j = X_j @ p_j
iteration_change = linalg.norm(t_j - old_t_j)
iteration_count += 1
if iteration_count >= max_iterations:
if ignore_failures:
break
else:
raise ConvergenceError('NIPALS PCA for PCR failed to '
'converge for component: '
'{}'.format(self.components+1))
X_j = X_j - np.outer(t_j, p_j.T) # Reduce in rank
T[:, self.components] = t_j
P[:, self.components] = p_j
eig[self.components] = t_j @ t_j
self.components += 1
if g is not None:
if self.components == g:
break
if variation_explained is not None:
if eig[0:self.components].sum() >= \
variation_explained * self.total_variation:
break
# Only copy the components actually used
self.T = T[:, 0:self.components]
self.P = P[:, 0:self.components]
self.eigenvalues = eig[0:self.components]
def variation_explained(self):
"""Return the proportion of variation explained
Returns:
variation_explained (float): Proportion of the total variation
in the X data explained by the extracted principal components.
"""
return self.eigenvalues.sum() / self.total_variation
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
elif Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X data')
tmp = (Z - self.X_offset)
if self.standardized_X:
tmp *= self.X_rscaling
tmp = tmp @ self.PgC
if self.standardized_Y:
tmp *= self.Y_scaling
return self.Y_offset + tmp
class PCR_SVD(PCR_NIPALS):
"""Principal Components Regression using SVD
This class implements PCR with the same mathematical goals as
:py:class:`PCR_NIPALS` but using a different method to extract the
principal components. The convergence criteria in the NIPALS algorithm
can be formulated into an eigenvalue problem and solved directly using
an existing SVD-based solver. This has the advantage of being entirely
deterministic, but the disadvantage that all components have to be
extracted each time, even if only a few are required to explain most
of the variance in X.
Note:
The attributes of the resulting class are exactly the same as for
:py:class:`PCR_NIPALS`.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
variation_explained (float): Proportion of variance in X
calibration data that the components extracted should explain
(from 0.001 - 0.999)
standardize_X (boolean, optional): Standardize the X data
standardize_Y (boolean, optional): Standardize the Y data
max_iterations : Not relevant for SVD
iteration_convergence : Not relevant for SVD
ignore_failures: Not relevant for SVD
"""
def _perform_pca(self, X, g=None, variation_explained=None,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
"""A non-public routine that performs the PCA using an appropriate
method and sets up self.total_variation, self.T, self.P,
self.eignvalues and self.components."""
u, s, v = linalg.svd(X, full_matrices=False)
T = u @ np.diag(s)
P = v.T
eig = (T.T @ T).diagonal()
if g is not None:
self.T = T[:, 0:g]
self.P = P[:, 0:g]
self.eigenvalues = eig[0:g]
self.components = g
else:
cuml = (eig.cumsum()/self.total_variation)
self.components = cuml.searchsorted(variation_explained) + 1
self.T = T[:, 0:self.components]
self.P = P[:, 0:self.components]
self.eigenvalues = eig[0:self.components]
```
#### File: regressions/regressions/pls_sb.py
```python
from . import *
class PLS_SB(RegressionBase):
"""Regression using the PLS-SB algorithm.
The PLS-SB sets up the same mathematical problem as the PLS2 module,
but then formulates the convergence criteria as an eigenvalue problem
and solves it directly. It is therefore a deterministic algorithm, but
has the drawback that all components must be extracted at once, even
if only a few are required. Note that the output of PLS-SB is not the
same as PLS2. In the PLS2 each component found is removed from the
working copies of the input matrices by a rank-1 operation so the next
iterations will converge on a new component. In PLS-SB all components
are found at once.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
Note:
The attributes of the resulting class are exactly the same as for
:py:class:`pls2.PLS2`.
"""
def __init__(self, X, Y, g):
Xc, Yc = super()._prepare_data(X, Y)
if g < 1 or g > self.max_rank:
raise ParameterError('Number of required components '
'specified is impossible.')
self.components = g
XtY = Xc.T @ Yc
_, W = linalg.eigh(XtY @ XtY.T)
self.W = W[:, :-g-1:-1].real
self.T = Xc @ self.W
self.Q = Yc.T @ self.T
self.Q /= np.linalg.norm(self.Q, axis=0)
self.U = Yc @ self.Q
t_dot_t = (self.T.T @ self.T).diagonal()
self.C = np.diag((self.T.T @ self.U).diagonal() / t_dot_t)
self.P = (Xc.T @ self.T) / t_dot_t
self.B = self.W @ linalg.inv(self.P.T @ self.W) @ self.C @ self.Q.T
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
return self.Y_offset + (Z - self.X_offset).T @ self.B
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
for i in range(0, Z.shape[0]):
result[i, :] = self.Y_offset + \
(Z[i, :] - self.X_offset).T @ self.B
return result
def prediction_iterative(self, Z):
"""Predict the output resulting from a given input, iteratively
This produces the same output as the one-step version ``prediction``
but works by applying each loading in turn to extract the latent
variables corresponding to the input.
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
x_j = Z - self.X_offset
t = np.empty((self.components))
for j in range(0, self.components):
t[j] = x_j @ self.W[:, j]
x_j = x_j - t[j] * self.P[:, j]
result = self.Y_offset + t @ self.C @ self.Q.T
return result
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
t = np.empty((self.components))
for k in range(0, Z.shape[0]):
x_j = Z[k, :] - self.X_offset
for j in range(0, self.components):
t[j] = x_j @ self.W[:, j]
x_j = x_j - t[j] * self.P[:, j]
result[k, :] = self.Y_offset + t @ self.C @ self.Q.T
return result
``` |
{
"source": "jhunhwang/goldenretriever",
"score": 3
} |
#### File: src/elasticsearch/create_querylog_index.py
```python
from argparse import ArgumentParser
from datetime import datetime
from elasticsearch_dsl import Index, Boolean, Document, Date, Text, Integer, connections, Mapping
if __name__ == '__main__':
parser = ArgumentParser(description='log user queries to Elasticsearch')
parser.add_argument('url', default='localhost', help='elasticsearch_url')
parser.add_argument('index_name', default='querylog', help='name of query log index')
args = parser.parse_args()
index = Index(args.index_name)
index.settings = {"number_of_shards": 1, "number_of_replicas": 0}
@index.document
class QueryLog(Document):
created_at = Date()
query_id = Text()
query_text: Text()
responses: Text(multi=True) # allow multi responses in a List
is_correct: Boolean(multi=True)
feedback_timestamp: Date()
def save(self, **kwargs):
self.created_at = datetime.now()
return super().save(**kwargs)
# bug where using Document class to create index seems to not be working and Mapping needs to be defined explicityly
connections.create_connection(hosts=[args.url])
QueryLog.init()
print('querylog_index_created')
m = Mapping()
m.field('created_at', 'date')
m.field('query_id', 'text')
m.field('query_text', 'text')
m.field('responses', 'text', multi=True)
m.field('is_correct', 'boolean', multi=True)
m.field('feedback_timestamp', 'date')
m.save('querylog')
ql = Index('querylog')
print(ql.get_mapping())
```
#### File: goldenretriever/src/minio_handler.py
```python
from minio import Minio
from minio.error import ResponseError, BucketAlreadyOwnedByYou, BucketAlreadyExists
class MinioClient():
def __init__(self, url_endpoint, access_key, secret_key):
self.client = Minio(url_endpoint, access_key, secret_key, secure=False)
def make_bucket(self, bucket_name):
try:
self.client.make_bucket(bucket_name)
except BucketAlreadyExists as err:
print(err)
except BucketAlreadyOwnedByYou as err:
print(err)
except ResponseError as err:
print(err)
def rm_bucket(self, bucket_name):
try:
self.client.remove_bucket(bucket_name)
except ResponseError as err:
print(err)
def upload_model_weights(self, bucket_name, model_obj_name, model_file_path):
try:
print(self.client.fput_object(bucket_name, model_obj_name, model_file_path))
except ResponseError as err:
print(err)
def download_model_weights(self, bucket_name, model_obj_name, model_file_path):
try:
print(self.client.fget_object(bucket_name, model_obj_name, model_file_path))
except ResponseError as err:
print(err)
def upload_emb_index(self, bucket_name, emb_obj_name, emb_file_path):
try:
print(self.client.fput_object(bucket_name, emb_obj_name, emb_file_path))
except ResponseError as err:
print(err)
def download_emb_index(self, bucket_name, emb_obj_name, emb_file_path):
try:
print(self.client.fget_object(bucket_name, emb_obj_name, emb_file_path))
except ResponseError as err:
print(err)
```
#### File: goldenretriever/src/models.py
```python
import numpy as np
import datetime
from abc import ABC, abstractmethod
from sklearn.metrics.pairwise import cosine_similarity
from .data_handler.data_preprocessing import clean_txt
from .data_handler.kb_handler import kb
class Model(ABC):
"""
a shared model interface where
each model should provide
finetune, predict, make_query, export_encoder,
restore_encoder methods
"""
@abstractmethod
def __init__(self):
pass
@abstractmethod
def finetune(self):
"""
finetunes encoder
"""
pass
@abstractmethod
def predict(self):
"""
encode method of encoder
will be used to vectorize texts
"""
pass
@abstractmethod
def load_kb(self):
"""
load and encode knowledge bases to return predictions
"""
@abstractmethod
def make_query(self):
"""
uses predict method to vectorize texts
and provides relevant responses based on
given specifications (eg. num responses) to user
"""
pass
@abstractmethod
def export_encoder(self):
"""
export finetuned weights
"""
pass
@abstractmethod
def restore_encoder(self):
"""
restores encoder with finetuned weights
"""
pass
class GoldenRetriever(Model):
def __init__(self, encoder):
self.encoder = encoder
def finetune(self, question, answer, margin=0.3,
loss='triplet', context=[], neg_answer=[],
neg_answer_context=[], label=[]):
cost_value = self.encoder.finetune_weights(question, answer,
margin=margin, loss=loss,
context=context,
neg_answer=neg_answer,
neg_answer_context=neg_answer_context,
label=label)
return cost_value
def predict(self, text, context=None, string_type='response'):
encoded_responses = self.encoder.encode(text, context=context,
string_type=string_type)
return encoded_responses
def load_kb(self, kb_):
"""
Load the knowledge base or bases
:param kb: kb object as defined in kb_handler
"""
self.kb = {}
if type(kb_) == kb:
context_and_raw_string = kb_.responses.context_string.fillna('') + \
' ' + kb_.responses.raw_string.fillna('')
kb_.vectorised_responses = self.predict(clean_txt(context_and_raw_string),
string_type='response')
self.kb[kb_.name] = kb_
print(f'{datetime.datetime.now()} : kb loaded - {kb_.name} ')
elif hasattr(kb_, '__iter__'):
for one_kb in kb_:
self.load_kb(one_kb)
def make_query(self, querystring, top_k=5, index=False,
predict_type='query', kb_name='default_kb'):
"""
Make a query against the stored vectorized knowledge.
:type type: str
:type kb_name: str
:type index: boolean
:param type: can be 'query' or 'response'. Use to compare statements
:param kb_name: the name of knowledge base in the knowledge dictionary
:param index: Choose index=True to return sorted index of matches.
:return: Top K vectorized answers and their scores
"""
similarity_score = cosine_similarity(self.kb[kb_name].vectorised_responses,
self.predict([querystring],
string_type=predict_type))
sortargs = np.flip(similarity_score.argsort(axis=0))
sortargs = [x[0] for x in sortargs]
# sorted answer conditional if there is a context string,
# then include as a line-separated pre-text
sorted_ans = []
for i in sortargs:
ans = self.kb[kb_name].responses.context_string.iloc[i] + \
'\n' + self.kb[kb_name].responses.raw_string.iloc[i] \
if self.kb[kb_name].responses.context_string.iloc[i] != '' \
else self.kb[kb_name].responses.raw_string.iloc[i]
sorted_ans.append(ans)
if index:
return sorted_ans[:top_k], sortargs[:top_k]
return sorted_ans[:top_k], similarity_score[sortargs[:top_k]]
def export_encoder(self, save_dir):
'''
Path should include partial filename.
https://www.tensorflow.org/api_docs/python/tf/saved_model/save
'''
self.encoder.save_weights(save_dir=save_dir)
def restore_encoder(self, save_dir):
"""
Signatures need to be re-init after weights are loaded.
"""
self.encoder.restore_weights(save_dir=save_dir)
``` |
{
"source": "jhunkeler/cbc",
"score": 2
} |
#### File: cbc/cbc/build.py
```python
import os
from configparser import SafeConfigParser, ExtendedInterpolation
class Controller(object):
def __init__(self):
pass
class Task(object):
def __init__(self, filename):
self.config = SafeConfigParser(interpolation=ExtendedInterpolation(), allow_no_value=True)
def check_config(self):
pass
def run(self):
pass
```
#### File: cbc/cbc/meta.py
```python
import os
import conda_build.metadata
import conda_build.environ
import yaml
import shutil
from glob import glob
from collections import OrderedDict
from .parsers import CBCConfigParser, ExtendedInterpolation
from .environment import Environment
from .exceptions import MetaDataError
class MetaData(object):
def __init__(self, filename, env):
filename = os.path.abspath(filename)
if not os.path.exists(filename):
raise OSError('"{0}" does not exist.'.format(filename));
self.filename = filename
self.confdir = os.path.dirname(self.filename)
if not isinstance(env, Environment):
raise MetaDataError('Expecting instance of cbc.environment.Environment, got: "{0}"'.format(type(env)))
self.env = env
self.builtins = ['cbc_build', 'cbc_cgi', 'settings', 'environ']
self.fields = self.convert_conda_fields(conda_build.metadata.FIELDS)
self.config = CBCConfigParser(interpolation=ExtendedInterpolation(), allow_no_value=True, comment_prefixes='#')
# Include built-in Conda metadata fields
self.config.read_dict(self.fields)
if self.env.configrc is not None:
self.config.read_dict(self.as_dict(self.env.configrc))
# Include user-defined build fields
self.config.read(self.filename)
# Assimilate conda environment variables
self.config['environ'] = conda_build.environ.get_dict()
# Convert ConfigParser -> generic dictionary
self.local = self.as_dict(self.config)
#Field list conversion table taken from conda_build.metadata:
for field in ('source/patches', 'source/url',
'build/entry_points', 'build/script_env',
'build/features', 'build/track_features',
'requirements/build', 'requirements/run',
'requirements/conflicts', 'test/requires',
'test/files', 'test/commands', 'test/imports'):
section, key = field.split('/')
if self.local[section][key]:
self.local[section][key] = self.config.getlist(section, key)
self.local_metadata = {}
for keyword in self.builtins:
if keyword in self.local:
self.local_metadata[keyword] = self.local[keyword]
# Convert dict to YAML-compatible dict
self.conda_metadata = self.scrub(self.local, self.builtins)
def run(self):
self.render_scripts()
def render_scripts(self):
'''Write all conda scripts
'''
for maskkey, maskval in self.env.config['script'].items():
for metakey, metaval in self.compile().items():
if metakey in maskkey:
with open(maskval, 'w+') as metafile:
metafile.write(metaval)
def copy_patches(self):
extensions = ['*.diff', '*.patch']
for extension in extensions:
path = os.path.join(self.confdir, extension)
for patch in glob(path):
shutil.copy2(patch, self.env.pkgdir)
def compile(self):
compiled = {}
compiled['meta'] = yaml.safe_dump(self.conda_metadata, default_flow_style=False, line_break=True, indent=4)
compiled['build_linux'] = self.local_metadata['cbc_build']['linux']
#if 'windows' in self.local_metadata['']
compiled['build_windows'] = self.local_metadata['cbc_build']['windows']
return compiled
def convert_conda_fields(self, fields):
temp = OrderedDict()
for fkey, fval in fields.items():
temp[fkey] = { x: '' for x in fval}
return temp
def scrub(self, obj, force_remove=[]):
obj_c = obj.copy()
if isinstance(obj_c, dict):
for key,val in obj_c.items():
for reserved in force_remove:
if reserved in key:
del obj[reserved]
continue
if isinstance(val, dict):
val = self.scrub(val)
if val is None or val == {} or not val:
try:
del obj[key]
except KeyError as err:
print(err)
return obj
def as_dict(self, config):
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
"""
the_dict = {}
for section in config.sections():
the_dict[section] = {}
for key, val in config.items(section):
for cast in (int, float, bool, str):
try:
the_dict[section][key] = cast(val)
except ValueError:
pass
return the_dict
```
#### File: cbc/cbc/parsers.py
```python
from configparser import ConfigParser, ExtendedInterpolation
class CBCConfigParser(ConfigParser):
def getlist(self,section,option):
value = self.get(section,option)
return list(filter(None, (x.strip() for x in value.splitlines())))
def getlistint(self,section,option):
return [int(x) for x in self.getlist(section,option)]
``` |
{
"source": "jhunkeler/ci_watson",
"score": 2
} |
#### File: ci_watson/ci_watson/artifactory_helpers.py
```python
import copy
from datetime import datetime
import json
import os
import re
import shutil
from difflib import unified_diff
from io import StringIO
try:
from astropy.io import fits
from astropy.io.fits import FITSDiff, HDUDiff
from astropy.utils.introspection import minversion
HAS_ASTROPY = True
except ImportError:
HAS_ASTROPY = False
if HAS_ASTROPY and minversion('astropy', '3.1'):
ASTROPY_LT_3_1 = False
else:
ASTROPY_LT_3_1 = True
__all__ = ['BigdataError', 'check_url', 'get_bigdata_root', 'get_bigdata',
'compare_outputs', 'generate_upload_params',
'generate_upload_schema']
RE_URL = re.compile(r"\w+://\S+")
UPLOAD_SCHEMA = {"files": [
{"pattern": "",
"target": "",
"props": None,
"recursive": "false",
"flat": "true",
"regexp": "false",
"explode": "false",
"excludePatterns": []}]}
TODAYS_DATE = datetime.now().strftime("%Y-%m-%d")
class BigdataError(Exception):
"""Exception related to big data access."""
pass
def check_url(url):
"""Determine if URL can be resolved without error."""
if RE_URL.match(url) is None:
return False
# Optional import: requests is not needed for local big data setup.
import requests
# requests.head does not work with Artifactory landing page.
r = requests.get(url, allow_redirects=True)
# TODO: Can we simply return r.ok here?
if r.status_code >= 400:
return False
return True
def _download(url, dest, timeout=30):
"""Simple HTTP/HTTPS downloader."""
# Optional import: requests is not needed for local big data setup.
import requests
dest = os.path.abspath(dest)
with requests.get(url, stream=True, timeout=timeout) as r:
with open(dest, 'w+b') as data:
for chunk in r.iter_content(chunk_size=0x4000):
data.write(chunk)
return dest
def get_bigdata_root(envkey='TEST_BIGDATA'):
"""
Find and returns the path to the nearest big datasets.
Parameters
----------
envkey : str
Environment variable name. It must contain a string
defining the root Artifactory URL or path to local
big data storage.
"""
if envkey not in os.environ:
raise BigdataError(
'Environment variable {} is undefined'.format(envkey))
path = os.environ[envkey]
if os.path.exists(path) or check_url(path):
return path
return None
def get_bigdata(*args, docopy=True):
"""
Acquire requested data from a managed resource
to the current directory.
Parameters
----------
args : tuple of str
Location of file relative to ``TEST_BIGDATA``.
docopy : bool
Switch to control whether or not to copy a file
into the test output directory when running the test.
If you wish to open the file directly from remote
location or just to see path to source, set this to `False`.
Default: `True`
Returns
-------
dest : str
Absolute path to local copy of data
(i.e., ``/path/to/example.fits``).
Examples
--------
>>> import os
>>> print(os.getcwd())
/path/to
>>> from ci_watson.artifactory_helpers import get_bigdata
>>> filename = get_bigdata('abc', '123', 'example.fits')
>>> print(filename)
/path/to/example.fits
>>> get_bigdata('abc', '123', 'example.fits', docopy=False)
/remote/root/abc/123/example.fits
"""
src = os.path.join(get_bigdata_root(), *args)
src_exists = os.path.exists(src)
src_is_url = check_url(src)
# No-op
if not docopy:
if src_exists or src_is_url:
return os.path.abspath(src)
else:
raise BigdataError('Failed to find data: {}'.format(src))
filename = os.path.basename(src)
dest = os.path.abspath(os.path.join(os.curdir, filename))
if src_exists:
# Found src file on locally accessible directory
if src == dest: # pragma: no cover
raise BigdataError('Source and destination paths are identical: '
'{}'.format(src))
shutil.copy2(src, dest)
elif src_is_url:
_download(src, dest)
else:
raise BigdataError('Failed to retrieve data: {}'.format(src))
return dest
def compare_outputs(outputs, raise_error=True, ignore_keywords=[],
ignore_hdus=[], ignore_fields=[], rtol=0.0, atol=0.0,
input_path=[], docopy=True, results_root=None,
verbose=True):
"""
Compare output with "truth" using appropriate
diff routine; namely:
* ``fitsdiff`` for FITS file comparisons.
* ``unified_diff`` for ASCII products.
Only after all elements of ``outputs`` have been
processed will the method report any success or failure, with
failure of any one comparison *not* preventing the rest of the
comparisons to be performed.
Parameters
----------
outputs : list of tuple or dict
This list defines what outputs from running the test will be
compared. Three distinct types of values as list elements
are supported:
* 2-tuple : ``(test output filename, truth filename)``
* 3-tuple : ``(test output filename, truth filename, HDU names)``
* dict : ``{'files': (output, truth), 'pars': {key: val}}``
If filename contains extension such as ``[hdrtab]``,
it will be interpreted as specifying comparison of just that HDU.
raise_error : bool
Raise ``AssertionError`` if difference is found.
ignore_keywords : list of str
List of FITS header keywords to be ignored by
``FITSDiff`` and ``HDUDiff``.
ignore_hdus : list of str
List of FITS HDU names to ignore by ``FITSDiff``.
This is only available for ``astropy>=3.1``.
ignore_fields : list of str
List FITS table column names to be ignored by
``FITSDiff`` and ``HDUDiff``.
rtol, atol : float
Relative and absolute tolerance to be used by
``FITSDiff`` and ``HDUDiff``.
input_path : list or tuple
A series of sub-directory names under :func:`get_bigdata_root`
that leads to the path of the 'truth' files to be compared
against. If not provided, it assumes that 'truth' is in the
working directory. For example, with :func:`get_bigdata_root`
pointing to ``/grp/test_data``, a file at::
/grp/test_data/pipeline/dev/ins/test_1/test_a.py
would require ``input_path`` of::
["pipeline", "dev", "ins", "test_1"]
docopy : bool
If `True`, 'truth' will be copied to output directory before
comparison is done.
results_root : str or `None`
If not `None`, for every failed comparison, the test output
is automatically renamed to the given 'truth' in the output
directory and :func:`generate_upload_schema` will be called
to generate a JSON scheme for Artifactory upload.
If you do not need this functionality, use ``results_root=None``.
verbose : bool
Print extra info to screen.
Returns
-------
creature_report : str
Report from FITS or ASCII comparator.
This is part of error message if ``raise_error=True``.
Examples
--------
There are multiple use cases for this method, specifically
related to how ``outputs`` are defined upon calling this method.
The specification of the ``outputs`` can be any combination of the
following patterns:
1. 2-tuple inputs::
outputs = [('file1.fits', 'file1_truth.fits')]
This definition indicates that ``file1.fits`` should be compared
as a whole with ``file1_truth.fits``.
2. 2-tuple inputs with extensions::
outputs = [('file1.fits[hdrtab]', 'file1_truth.fits[hdrtab]')]
This definition indicates that only the HDRTAB extension from
``file1.fits`` will be compared to the HDRTAB extension from
``file1_truth.fits``.
3. 3-tuple inputs::
outputs = [('file1.fits', 'file1_truth.fits', ['primary', 'sci'])]
This definition indicates that only the PRIMARY and SCI extensions
should be compared between the two files. This creates a temporary
``HDUList`` object comprising only the given extensions for comparison.
4. Dictionary of inputs and parameters::
outputs = [{'files': ('file1.fits', 'file1_truth.fits'),
'pars': {'ignore_keywords': ['ROOTNAME']}}]
This definition indicates that ROOTNAME will be ignored during
the comparison between the files specified in ``'files'``.
Any input parameter for ``FITSDiff`` or ``HDUDiff`` can be specified
as part of the ``'pars'`` dictionary.
In addition, the input files listed in ``'files'`` can also include
an extension specification, such as ``[hdrtab]``, to limit the
comparison to just that extension.
This example from an actual test definition demonstrates
how multiple input defintions can be used at the same time::
outputs = [
('jw99999_nircam_f140m-maskbar_psfstack.fits',
'jw99999_nircam_f140m-maskbar_psfstack_ref.fits'
),
('jw9999947001_02102_00002_nrcb3_a3001_crfints.fits',
'jw9999947001_02102_00002_nrcb3_a3001_crfints_ref.fits'
),
{'files': ('jw99999_nircam_f140m-maskbar_i2d.fits',
'jw99999_nircam_f140m-maskbar_i2d_ref.fits'),
'pars': {'ignore_hdus': ['HDRTAB']},
{'files': ('jw99999_nircam_f140m-maskbar_i2d.fits',
'jw99999_nircam_f140m-maskbar_i2d_ref.fits',
['primary','sci','dq']),
'pars': {'rtol': 0.000001}
},
{'files': ('jw99999_nircam_f140m-maskbar_i2d.fits[hdrtab]',
'jw99999_nircam_f140m-maskbar_i2d_ref.fits[hdrtab]'),
'pars': {'ignore_keywords': ['NAXIS1', 'TFORM*'],
'ignore_fields': ['COL1', 'COL2']}
}]
.. note:: Each ``outputs`` entry in the list gets interpreted and processed
separately.
"""
if ASTROPY_LT_3_1:
if len(ignore_hdus) > 0: # pragma: no cover
raise ValueError('ignore_hdus cannot be used for astropy<3.1')
default_kwargs = {'rtol': rtol, 'atol': atol,
'ignore_keywords': ignore_keywords,
'ignore_fields': ignore_fields}
else:
default_kwargs = {'rtol': rtol, 'atol': atol,
'ignore_keywords': ignore_keywords,
'ignore_fields': ignore_fields,
'ignore_hdus': ignore_hdus}
all_okay = True
creature_report = ''
updated_outputs = [] # To track outputs for Artifactory JSON schema
for entry in outputs:
diff_kwargs = copy.deepcopy(default_kwargs)
extn_list = None
num_entries = len(entry)
if isinstance(entry, dict):
entry_files = entry['files']
actual = entry_files[0]
desired = entry_files[1]
if len(entry_files) > 2:
extn_list = entry_files[2]
diff_kwargs.update(entry.get('pars', {}))
elif num_entries == 2:
actual, desired = entry
elif num_entries == 3:
actual, desired, extn_list = entry
else:
all_okay = False
creature_report += '\nERROR: Cannot handle entry {}\n'.format(
entry)
continue
# TODO: Use regex?
if actual.endswith(']'):
if extn_list is not None:
all_okay = False
creature_report += (
'\nERROR: Ambiguous extension requirements '
'for {} ({})\n'.format(actual, extn_list))
continue
actual_name, actual_extn = actual.split('[')
actual_extn = actual_extn.replace(']', '')
else:
actual_name = actual
actual_extn = None
if desired.endswith(']'):
if extn_list is not None:
all_okay = False
creature_report += (
'\nERROR: Ambiguous extension requirements '
'for {} ({})\n'.format(desired, extn_list))
continue
desired_name, desired_extn = desired.split('[')
desired_extn = desired_extn.replace(']', '')
else:
desired_name = desired
desired_extn = None
# Get "truth" image
try:
desired = get_bigdata(*input_path, desired_name, docopy=docopy)
except BigdataError:
all_okay = False
creature_report += '\nERROR: Cannot find {} in {}\n'.format(
desired_name, input_path)
continue
if desired_extn is not None:
desired_name = desired
desired = "{}[{}]".format(desired, desired_extn)
if verbose:
print("\nComparing:\n {} \nto\n {}".format(actual, desired))
if actual.endswith('.fits') and desired.endswith('.fits'):
# Build HDULists for comparison based on user-specified extensions
if extn_list is not None:
with fits.open(actual) as f_act:
with fits.open(desired) as f_des:
actual_hdu = fits.HDUList(
[f_act[extn] for extn in extn_list])
desired_hdu = fits.HDUList(
[f_des[extn] for extn in extn_list])
fdiff = FITSDiff(actual_hdu, desired_hdu,
**diff_kwargs)
creature_report += '\na: {}\nb: {}\n'.format(
actual, desired) # diff report only gives hash
# Working with FITS files...
else:
fdiff = FITSDiff(actual, desired, **diff_kwargs)
creature_report += fdiff.report()
if not fdiff.identical:
all_okay = False
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
elif actual_extn is not None or desired_extn is not None:
if 'ignore_hdus' in diff_kwargs: # pragma: no cover
diff_kwargs.pop('ignore_hdus') # Not applicable
# Specific element of FITS file specified
with fits.open(actual_name) as f_act:
with fits.open(desired_name) as f_des:
actual_hdu = f_act[actual_extn]
desired_hdu = f_des[desired_extn]
fdiff = HDUDiff(actual_hdu, desired_hdu, **diff_kwargs)
creature_report += '\na: {}\nb: {}\n'.format(actual, desired)
creature_report += fdiff.report()
if not fdiff.identical:
all_okay = False
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual_name, desired_name))
else:
# ASCII-based diff
with open(actual) as afile:
actual_lines = afile.readlines()
with open(desired) as dfile:
desired_lines = dfile.readlines()
udiff = unified_diff(actual_lines, desired_lines,
fromfile=actual, tofile=desired)
udiffIO = StringIO()
udiffIO.writelines(udiff)
udiff_report = udiffIO.getvalue()
udiffIO.close()
if len(udiff_report) == 0:
creature_report += ('\na: {}\nb: {}\nNo differences '
'found.\n'.format(actual, desired))
else:
all_okay = False
creature_report += udiff_report
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not all_okay and results_root is not None: # pragma: no cover
schema_pattern, tree, testname = generate_upload_params(
results_root, updated_outputs, verbose=verbose)
generate_upload_schema(schema_pattern, tree, testname)
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
def generate_upload_params(results_root, updated_outputs, verbose=True):
"""
Generate pattern, target, and test name for :func:`generate_upload_schema`.
This uses ``BUILD_TAG`` and ``BUILD_MATRIX_SUFFIX`` on Jenkins CI to create
meaningful Artifactory target path. They are optional for local runs.
Other attributes like user, time stamp, and test name are also
automatically determined.
In addition to renamed outputs, ``*.log``is also inserted into the
``schema_pattern``.
Parameters
----------
results_root : str
See :func:`compare_outputs` for more info.
updated_outputs : list
List containing tuples of ``(actual, desired)`` of failed
test output comparison to be processed.
verbose : bool
Print extra info to screen.
Returns
-------
schema_pattern, tree, testname
Analogous to ``pattern``, ``target``, and ``testname`` that are
passed into :func:`generate_upload_schema`, respectively.
"""
import getpass
# Create instructions for uploading results to artifactory for use
# as new comparison/truth files
testname = os.path.split(os.path.abspath(os.curdir))[1]
# Meaningful test dir from build info.
# TODO: Organize results by day test was run. Could replace with git-hash
whoami = getpass.getuser() or 'nobody'
user_tag = 'NOT_CI_{}'.format(whoami)
build_tag = os.environ.get('BUILD_TAG', user_tag)
build_matrix_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', '0')
subdir = '{}_{}_{}'.format(TODAYS_DATE, build_tag, build_matrix_suffix)
tree = os.path.join(results_root, subdir, testname) + os.sep
schema_pattern = []
# Upload all log files
schema_pattern.append('*.log')
# Write out JSON file to enable retention of different results.
# Also rename outputs as new truths.
for test_result, truth in updated_outputs:
new_truth = os.path.basename(truth)
shutil.move(test_result, new_truth)
schema_pattern.append(os.path.abspath(new_truth))
if verbose:
print("Renamed {} as new 'truth' file: {}".format(
os.path.abspath(test_result), os.path.abspath(new_truth)))
return schema_pattern, tree, testname
def generate_upload_schema(pattern, target, testname, recursive=False):
"""
Write out JSON file to upload Jenkins results from test to
Artifactory storage area.
This function relies on the JFROG JSON schema for uploading data into
artifactory using the Jenkins plugin. Docs can be found at
https://www.jfrog.com/confluence/display/RTF/Using+File+Specs
Parameters
----------
pattern : str or list of strings
Specifies the local file system path to test results which should be
uploaded to Artifactory. You can specify multiple artifacts by using
wildcards or a regular expression as designated by the regexp property.
target : str
Specifies the target path in Artifactory in the following format::
[repository_name]/[repository_path]
testname : str
Name of test that generate the results. This will be used to create the
name of the JSON file to enable these results to be uploaded to
Artifactory.
recursive : bool, optional
Specify whether or not to identify files listed in sub-directories
for uploading. Default: `False`
"""
jsonfile = "{}_results.json".format(testname)
recursive = repr(recursive).lower()
if not isinstance(pattern, str):
# Populate schema for this test's data
upload_schema = {"files": []}
for p in pattern:
temp_schema = copy.deepcopy(UPLOAD_SCHEMA["files"][0])
temp_schema.update({"pattern": p, "target": target,
"recursive": recursive})
upload_schema["files"].append(temp_schema)
else:
# Populate schema for this test's data
upload_schema = copy.deepcopy(UPLOAD_SCHEMA)
upload_schema["files"][0].update({"pattern": pattern, "target": target,
"recursive": recursive})
# Write out JSON file with description of test results
with open(jsonfile, 'w') as outfile:
json.dump(upload_schema, outfile, indent=2)
```
#### File: ci_watson/ci_watson/plugin.py
```python
import os
import pytest
__all__ = []
def pytest_addoption(parser):
"""
These pytest hooks allow us to mark tests and run the marked tests with
specific command line options.
"""
# Add option to run slow tests
parser.addoption(
"--slow",
action="store_true",
help="run slow tests"
)
# Add option to use big data sets
parser.addoption(
"--bigdata",
action="store_true",
help="use big data sets (intranet)"
)
# Choose to test under dev or stable
parser.addoption(
"--env",
default="dev",
help="specify what environment to test"
)
# Data file input/output source/destination customization.
parser.addini(
"inputs_root",
"Root dir (or data repository name) for test input files.",
type="args",
default=None,
)
parser.addini(
"results_root",
"Root dir (or data repository name) for test result/output files.",
type="args",
default=None,
)
def pytest_configure(config):
config.getini('markers').append(
'slow: Run tests that are resource intensive')
config.getini('markers').append(
'bigdata: Run tests that require intranet access')
def pytest_runtest_setup(item):
if 'slow' in item.keywords and not item.config.getvalue("slow"):
pytest.skip("need --slow option to run")
if 'bigdata' in item.keywords and not item.config.getvalue("bigdata"):
pytest.skip("need --bigdata option to run")
@pytest.fixture(scope='function')
def _jail(tmpdir):
"""Perform test in a pristine temporary working directory."""
old_dir = os.getcwd()
os.chdir(tmpdir.strpath)
try:
yield tmpdir.strpath
finally:
os.chdir(old_dir)
@pytest.fixture(scope='session')
def envopt(request):
"""Get the ``--env`` command-line option specifying test environment"""
return request.config.getoption("env")
```
#### File: ci_watson/tests/test_artifactory_helpers.py
```python
import json
import os
import pytest
from ci_watson.artifactory_helpers import (
HAS_ASTROPY, BigdataError, get_bigdata_root, get_bigdata,
check_url, compare_outputs, generate_upload_params, generate_upload_schema)
@pytest.mark.bigdata
@pytest.mark.parametrize(
('val', 'ans'),
[('/local/path', False),
('https://google.com', True),
('https://github.com/spacetelescopehstcalblahblah', False)])
def test_check_url(val, ans):
assert check_url(val) is ans
class TestBigdataRoot:
def setup_class(self):
self.key = 'FOOFOO'
def teardown_class(self):
if self.key in os.environ:
del os.environ[self.key]
def test_no_env(self):
if self.key in os.environ:
del os.environ[self.key]
with pytest.raises(BigdataError):
get_bigdata_root(envkey=self.key)
@pytest.mark.bigdata
def test_has_env_url(self):
path = 'https://google.com'
os.environ[self.key] = path
assert get_bigdata_root(envkey=self.key) == path
def test_has_env_local(self):
path = os.path.abspath(os.curdir)
os.environ[self.key] = path
assert get_bigdata_root(envkey=self.key) == path
def test_no_path(self):
os.environ[self.key] = '/some/fake/path'
assert get_bigdata_root(envkey=self.key) is None
@pytest.mark.bigdata
class TestGetBigdata:
def setup_class(self):
self.root = get_bigdata_root()
def test_nocopy(self, _jail, pytestconfig):
args = (pytestconfig.getini('inputs_root')[0],
'dev',
'input',
'j6lq01010_asn.fits')
dest = get_bigdata(*args, docopy=False)
assert dest == os.path.abspath(os.path.join(self.root, *args))
assert len(os.listdir()) == 0
@pytest.mark.parametrize('docopy', [True, False])
def test_no_data(self, docopy):
with pytest.raises(BigdataError):
get_bigdata('fake', 'path', 'somefile.txt', docopy=docopy)
def test_get_data(self, _jail, pytestconfig):
"""
This tests download when TEST_BIGDATA is pointing to Artifactory.
And tests copy when it is pointing to local path.
"""
args = (pytestconfig.getini('inputs_root')[0],
'dev',
'input',
'j6lq01010_asn.fits')
dest = get_bigdata(*args)
assert dest == os.path.abspath(os.path.join(os.curdir, args[-1]))
@pytest.mark.bigdata
@pytest.mark.usefixtures('_jail')
@pytest.mark.skipif(not HAS_ASTROPY, reason='requires astropy to run')
class TestCompareOutputs:
"""
Test a few common comparison scenarios.
FITSDiff and HDUDiff are tested in Astropy, so here we simply
test if they report differences or not, but we do not check
the content too closely.
.. note:: Upload schema functions are tested separately elsewhere.
"""
def setup_class(self):
self.inpath = ('ci-watson', 'dev', 'input')
if os.environ.get('TEST_BIGDATA').startswith('http'):
self.copy = True
else:
self.copy = False
def test_raise_error_fits(self):
"""Test mismatched extensions from the same file."""
get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)
outputs = [('j6lq01010_asn.fits[PRIMARY]', 'j6lq01010_asn.fits[asn]')]
with pytest.raises(AssertionError) as exc:
compare_outputs(outputs, input_path=self.inpath,
docopy=self.copy, verbose=False)
assert 'Headers contain differences' in str(exc)
def test_difference_ascii(self):
"""
Test ASCII with differences but suppress error to inspect
returned report.
"""
get_bigdata(*self.inpath, 'j6lq01010_asn_mod.txt', docopy=True)
report = compare_outputs(
[('j6lq01010_asn_mod.txt', 'j6lq01010_asn.txt')],
input_path=self.inpath, docopy=self.copy, verbose=False,
raise_error=False)
s = report.split(os.linesep)
assert s[2:] == ['@@ -1,4 +1,4 @@',
' # MEMNAME MEMTYPE MEMPRSNT',
'-J6LQ01NAQ EXP-CRJ 2',
'+J6LQ01NAQ EXP-CRJ 1',
' J6LQ01NDQ EXP-CRJ 1',
'-J6LQ01013 PROD-RPT 1',
'+J6LQ01011 PROD-CRJ 1',
'']
@pytest.mark.parametrize(
'filename', ['j6lq01010_asn.fits', 'j6lq01010_asn.txt'])
def test_all_okay(self, filename):
"""Same file has no difference."""
get_bigdata(*self.inpath, filename, docopy=True)
report = compare_outputs(
[(filename, filename)], input_path=self.inpath,
docopy=self.copy, verbose=False)
assert 'No differences found' in report
@pytest.mark.parametrize('docopy', [False, True])
def test_truth_missing(self, docopy):
get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)
with pytest.raises(AssertionError) as exc:
compare_outputs(
[('j6lq01010_asn.fits', 'doesnotexist.fits')],
input_path=self.inpath, docopy=docopy, verbose=False)
assert 'Cannot find doesnotexist.fits' in str(exc)
@pytest.mark.parametrize(
'outputs',
[[('j6lq01010_asn.fits[ASN]', 'j6lq01010_asn_mod.fits', ['image'])],
[('j6lq01010_asn.fits', 'j6lq01010_asn_mod.fits[ASN]', ['image'])]])
def test_ambiguous_extlist(self, outputs):
"""Too many ways to do the same thing."""
get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)
with pytest.raises(AssertionError) as exc:
compare_outputs(outputs, input_path=self.inpath, docopy=self.copy,
verbose=False)
assert 'Ambiguous extension requirements' in str(exc)
def test_mixed_bunch(self):
"""
Test different forms of acceptable ``outputs``.
.. note:: Some other crazy combos are theoretically possible given
the logic but they are not officially supported, hence
not tested here. Add new combo as its support is added.
"""
for filename in ('j6lq01010_asn.fits', 'j6lq01010_asn.txt'):
get_bigdata(*self.inpath, filename, docopy=True)
outputs = [('j6lq01010_asn.fits', 'j6lq01010_asn.fits'),
('j6lq01010_asn.fits[asn]', 'j6lq01010_asn.fits[ASN]'),
{'files': ('j6lq01010_asn.fits[image]',
'j6lq01010_asn_mod.fits[IMAGE]'),
'pars': {'rtol': 1e-7, 'atol': 0.05}},
{'files': ('j6lq01010_asn.fits',
'j6lq01010_asn_mod.fits',
['image']),
'pars': {'rtol': 1e-7, 'atol': 0.05}},
{'files': ('j6lq01010_asn.txt', 'j6lq01010_asn.txt')},
('j6lq01010_asn.fits', 'j6lq01010_asn_mod.fits',
['primary', 'IMAGE']),
('j6lq01010_asn.txt', 'j6lq01010_asn.txt')]
report = compare_outputs(
outputs, input_path=self.inpath, docopy=self.copy,
verbose=False, raise_error=False)
# There are 7 comparisons, and only 1 should show a difference
assert report.count("No differences found") == 6
assert report.count("different pixels found") == 1
class TestGenerateUploadParams:
def setup_class(self):
self.old_envs = {}
for key in ('BUILD_TAG', 'BUILD_MATRIX_SUFFIX'):
self.old_envs[key] = os.environ.get(key)
# Set up something reproducible
os.environ['BUILD_TAG'] = 'tag0'
os.environ['BUILD_MATRIX_SUFFIX'] = 'foo'
def teardown_class(self):
for key, val in self.old_envs.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
def test_gen(self, _jail):
# Dummy file to move.
datafile = 'actual.txt'
with open(datafile, 'w') as f:
f.write('\n')
updated_outputs = [(datafile, '/path/to/desired.txt')]
schema_pattern, tree, testname = generate_upload_params(
'groot', updated_outputs, verbose=False)
assert schema_pattern == ['*.log', os.path.abspath('desired.txt')]
assert isinstance(testname, str) # Actual value non-deterministic
# TODO: Use regex?
split_tree = tree.split(os.sep)
assert split_tree[0] == 'groot'
assert split_tree[1].endswith('_tag0_foo')
assert split_tree[3] == ''
# Make sure file is moved properly.
dirlist = os.listdir()
assert dirlist == ['desired.txt']
def test_generate_upload_schema_multi(_jail):
generate_upload_schema(
['*.log', 'desired.txt'], 'reponame/repopath', 'foo')
# TODO: Better way to compare JSON?
with open('foo_results.json') as f:
j = json.load(f)
assert json.dumps(j, indent=4, sort_keys=True).split(os.linesep) == [
'{',
' "files": [',
' {',
' "excludePatterns": [],',
' "explode": "false",',
' "flat": "true",',
' "pattern": "*.log",',
' "props": null,',
' "recursive": "false",',
' "regexp": "false",',
' "target": "reponame/repopath"',
' },',
' {',
' "excludePatterns": [],',
' "explode": "false",',
' "flat": "true",',
' "pattern": "desired.txt",',
' "props": null,',
' "recursive": "false",',
' "regexp": "false",',
' "target": "reponame/repopath"',
' }',
' ]',
'}']
def test_generate_upload_schema_one(_jail):
generate_upload_schema(
'desired.txt', 'reponame/repopath', 'foo', recursive=True)
# TODO: Better way to compare JSON?
with open('foo_results.json') as f:
j = json.load(f)
assert json.dumps(j, indent=4, sort_keys=True).split(os.linesep) == [
'{',
' "files": [',
' {',
' "excludePatterns": [],',
' "explode": "false",',
' "flat": "true",',
' "pattern": "desired.txt",',
' "props": null,',
' "recursive": "true",',
' "regexp": "false",',
' "target": "reponame/repopath"',
' }',
' ]',
'}']
```
#### File: ci_watson/tests/test_markers.py
```python
import pytest
from ci_watson.artifactory_helpers import get_bigdata, BigdataError
@pytest.mark.slow
def test_skip_slow(pytestconfig):
if not pytestconfig.getoption('slow'):
pytest.fail('@pytest.mark.slow was not skipped')
@pytest.mark.bigdata
def test_skip_bigdata(pytestconfig):
if not pytestconfig.getoption('bigdata'):
pytest.fail('@pytest.mark.bigdata was not skipped')
# User use bigdata option and decorator but has no big data access.
else:
with pytest.raises(BigdataError):
get_bigdata('foo', 'bar')
``` |
{
"source": "jhunkeler/drizzlepac",
"score": 2
} |
#### File: drizzlepac/drizzlepac/drizCR.py
```python
import os
import re
import numpy as np
from scipy import signal
from astropy.io import fits
from stsci.tools import fileutil, logutil, mputil, teal
from . import quickDeriv
from . import util
from . import processInput
from . version import __version__, __version_date__
if util.can_parallel:
import multiprocessing
__taskname__ = "drizzlepac.drizCR" # looks in drizzlepac for sky.cfg
_STEP_NUM = 6 # this relates directly to the syntax in the cfg file
log = logutil.create_logger(__name__, level=logutil.logging.NOTSET)
def drizCR(input=None, configObj=None, editpars=False, **inputDict):
""" Look for cosmic rays. """
log.debug(inputDict)
inputDict["input"] = input
configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict,
loadOnly=(not editpars))
if configObj is None:
return
if not editpars:
run(configObj)
# this is the function that will be called from TEAL
def run(configObj):
# outwcs is not neaded here
imgObjList, outwcs = processInput.setCommonInput(configObj,
createOutwcs=False)
rundrizCR(imgObjList, configObj)
def rundrizCR(imgObjList, configObj, procSteps=None):
if procSteps is not None:
procSteps.addStep('Driz_CR')
step_name = util.getSectionName(configObj, _STEP_NUM)
if not configObj[step_name]['driz_cr']:
log.info('Cosmic-ray identification (driz_cr) step not performed.')
return
paramDict = configObj[step_name]
paramDict['crbit'] = configObj['crbit']
paramDict['inmemory'] = imgObjList[0].inmemory
log.info("USER INPUT PARAMETERS for Driz_CR Step:")
util.printParams(paramDict, log=log)
# if we have the cpus and s/w, ok, but still allow user to set pool size
pool_size = util.get_pool_size(configObj.get('num_cores'), len(imgObjList))
if imgObjList[0].inmemory:
pool_size = 1 # reason why is output in drizzle step
subprocs = []
if pool_size > 1:
log.info('Executing {:d} parallel workers'.format(pool_size))
for image in imgObjList:
manager = multiprocessing.Manager()
mgr = manager.dict({})
p = multiprocessing.Process(
target=_driz_cr,
name='drizCR._driz_cr()', # for err msgs
args=(image, mgr, paramDict.dict())
)
subprocs.append(p)
image.virtualOutputs.update(mgr)
mputil.launch_and_wait(subprocs, pool_size) # blocks till all done
else:
log.info('Executing serially')
for image in imgObjList:
_driz_cr(image, image.virtualOutputs, paramDict)
if procSteps is not None:
procSteps.endStep('Driz_CR')
def _driz_cr(sciImage, virtual_outputs, paramDict):
"""mask blemishes in dithered data by comparison of an image
with a model image and the derivative of the model image.
- ``sciImage`` is an imageObject which contains the science data
- ``blotImage`` is inferred from the ``sciImage`` object here which knows
the name of its blotted image
- ``chip`` should be the science chip that corresponds to the blotted
image that was sent
- ``paramDict`` contains the user parameters derived from the full
``configObj`` instance
- ``dqMask`` is inferred from the ``sciImage`` object, the name of the mask
file to combine with the generated Cosmic ray mask
Here are the options you can override in ``configObj``
``gain`` = 7 # Detector gain, e-/ADU
``grow`` = 1 # Radius around CR pixel to mask
# [default=1 for 3x3 for non-NICMOS]
``ctegrow`` = 0 # Length of CTE correction to be applied
``rn`` = 5 # Read noise in electrons
``snr`` = "4.0 3.0" # Signal-to-noise ratio
``scale`` = "0.5 0.4" # scaling factor applied to the derivative
``backg`` = 0 # Background value
``expkey`` = "exptime" # exposure time keyword
Blot images are saved out to simple fits files with 1 chip in them
so for example in ACS, there will be 1 image file with 2 chips that is
the original image and 2 blotted image files, each with 1 chip
So I'm imagining calling this function twice, once for each chip,
but both times with the same original science image file, output files
and some input (output from previous steps) are referenced in the
imageobject itself
"""
grow = paramDict["driz_cr_grow"]
ctegrow = paramDict["driz_cr_ctegrow"]
crcorr_list = []
cr_mask_dict = {}
for chip in range(1, sciImage._numchips + 1, 1):
exten = sciImage.scienceExt + ',' + str(chip)
sci_chip = sciImage[exten]
if not sci_chip.group_member:
continue
blot_image_name = sci_chip.outputNames['blotImage']
if sciImage.inmemory:
blot_data = sciImage.virtualOutputs[blot_image_name][0].data
else:
if not os.path.isfile(blot_image_name):
raise IOError("Blotted image not found: {:s}"
.format(blot_image_name))
try:
blot_data = fits.getdata(blot_image_name, ext=0)
except IOError:
print("Problem opening blot images")
raise
# Scale blot image, as needed, to match original input data units.
blot_data *= sci_chip._conversionFactor
input_image = sciImage.getData(exten)
# Apply any unit conversions to input image here for comparison
# with blotted image in units of electrons
input_image *= sci_chip._conversionFactor
# make the derivative blot image
blot_deriv = quickDeriv.qderiv(blot_data)
# Boolean mask needs to take into account any crbits values
# specified by the user to be ignored when converting DQ array.
dq_mask = sciImage.buildMask(chip, paramDict['crbit'])
# parse out the SNR information
snr1, snr2 = map(
float, filter(None, re.split("[,;\s]+", paramDict["driz_cr_snr"]))
)
# parse out the scaling information
mult1, mult2 = map(
float, filter(
None, re.split("[,;\s]+", paramDict["driz_cr_scale"])
)
)
gain = sci_chip._effGain
rn = sci_chip._rdnoise
backg = sci_chip.subtractedSky * sci_chip._conversionFactor
# Set scaling factor (used by MultiDrizzle) to 1 since scaling has
# already been accounted for in blotted image
# expmult = 1.
# ################# COMPUTATION PART I ###################
# Create a temporary array mask
t1 = np.absolute(input_image - blot_data)
# ta = np.sqrt(gain * np.abs((blot_data + backg) * expmult) + rn**2)
ta = np.sqrt(gain * np.abs(blot_data + backg) + rn**2)
t2 = (mult1 * blot_deriv + snr1 * ta / gain) # / expmult
tmp1 = t1 <= t2
# Create a convolution kernel that is 3 x 3 of 1's
kernel = np.ones((3, 3), dtype=np.uint16)
# Convolve the mask with the kernel
tmp2 = signal.convolve2d(tmp1, kernel, boundary='symm', mode='same')
# ################# COMPUTATION PART II ###################
# Create the CR Mask
t2 = (mult2 * blot_deriv + snr2 * ta / gain) # / expmult
cr_mask = (t1 <= t2) | (tmp2 >= 9)
# ################# COMPUTATION PART III ##################
# flag additional cte 'radial' and 'tail' pixels surrounding CR pixels
# as CRs
# In both the 'radial' and 'length' kernels below, 0->good and 1->bad,
# so that upon convolving the kernels with cr_mask, the convolution
# output will have low->bad and high->good from which 2 new arrays are
# created having 0->bad and 1->good. These 2 new arrays are then
# 'anded' to create a new cr_mask.
# make radial convolution kernel and convolve it with original cr_mask
cr_grow_kernel = np.ones((grow, grow), dtype=np.uint16)
cr_grow_kernel_conv = signal.convolve2d(
cr_mask, cr_grow_kernel, boundary='symm', mode='same'
)
# make tail convolution kernel and convolve it with original cr_mask
cr_ctegrow_kernel = np.zeros((2 * ctegrow + 1, 2 * ctegrow + 1))
# which pixels are masked by tail kernel depends on sign of
# sci_chip.cte_dir (i.e.,readout direction):
if sci_chip.cte_dir == 1:
# 'positive' direction: HRC: amp C or D; WFC: chip = sci,1; WFPC2
cr_ctegrow_kernel[0:ctegrow, ctegrow] = 1
elif sci_chip.cte_dir == -1:
# 'negative' direction: HRC: amp A or B; WFC: chip = sci,2
cr_ctegrow_kernel[ctegrow+1:2*ctegrow+1, ctegrow] = 1
# do the convolution
cr_ctegrow_kernel_conv = signal.convolve2d(
cr_mask, cr_ctegrow_kernel, boundary='symm', mode='same'
)
# select high pixels from both convolution outputs;
# then 'and' them to create new cr_mask
cr_grow_mask = cr_grow_kernel_conv >= grow**2 # radial
cr_ctegrow_mask = cr_ctegrow_kernel_conv >= ctegrow # length
cr_mask = cr_grow_mask & cr_ctegrow_mask
# Apply CR mask to the DQ array in place
dq_mask &= cr_mask
# Create the corr file
corrFile = np.where(dq_mask, input_image, blot_data)
corrFile /= sci_chip._conversionFactor
corrDQMask = np.where(dq_mask, 0, paramDict['crbit']).astype(np.uint16)
if paramDict['driz_cr_corr']:
crcorr_list.append({
'sciext': fileutil.parseExtn(exten),
'corrFile': corrFile.copy(),
'dqext': fileutil.parseExtn(sci_chip.dq_extn),
'dqMask': corrDQMask
})
# Save the cosmic ray mask file to disk
cr_mask_image = sci_chip.outputNames["crmaskImage"]
if paramDict['inmemory']:
print('Creating in-memory(virtual) FITS file...')
_pf = util.createFile(cr_mask.astype(np.uint8),
outfile=None, header=None)
cr_mask_dict[cr_mask_image] = _pf
sciImage.saveVirtualOutputs(cr_mask_dict)
else:
# Always write out crmaskimage, as it is required input for
# the final drizzle step. The final drizzle step combines this
# image with the DQ information on-the-fly.
#
# Remove the existing mask file if it exists
if os.path.isfile(cr_mask_image):
os.remove(cr_mask_image)
print("Removed old cosmic ray mask file: '{:s}'"
.format(cr_mask_image))
print("Creating output: {:s}".format(cr_mask_image))
util.createFile(cr_mask.astype(np.uint8),
outfile=cr_mask_image, header=None)
if paramDict['driz_cr_corr']:
createCorrFile(sciImage.outputNames["crcorImage"], crcorr_list,
sciImage._filename)
def createCorrFile(outfile, arrlist, template):
"""
Create a _cor file with the same format as the original input image.
The DQ array will be replaced with the mask array used to create the _cor
file.
"""
# Remove the existing cor file if it exists
if os.path.isfile(outfile):
os.remove(outfile)
print("Removing old corr file: '{:s}'".format(outfile))
with fits.open(template, memmap=False) as ftemplate:
for arr in arrlist:
ftemplate[arr['sciext']].data = arr['corrFile']
if arr['dqext'][0] != arr['sciext'][0]:
ftemplate[arr['dqext']].data = arr['dqMask']
ftemplate.writeto(outfile)
print("Created CR corrected file: '{:s}'".format(outfile))
def setDefaults(configObj={}):
""" Return a dictionary of the default parameters
which also been updated with the user overrides.
"""
paramDict = {
'gain': 7, # Detector gain, e-/ADU
'grow': 1, # Radius around CR pixel to mask [default=1 for
# 3x3 for non-NICMOS]
'ctegrow': 0, # Length of CTE correction to be applied
'rn': 5, # Read noise in electrons
'snr': '4.0 3.0', # Signal-to-noise ratio
'scale': '0.5 0.4', # scaling factor applied to the derivative
'backg': 0, # Background value
'expkey': 'exptime' # exposure time keyword
}
if len(configObj) > 0:
for key in configObj:
paramDict[key] = configObj[key]
return paramDict
def help(file=None):
"""
Print out syntax help for running ``astrodrizzle``
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
"""
helpstr = getHelpAsString(docstring=True, show_ver=True)
if file is None:
print(helpstr)
else:
with open(file, mode='w') as f:
f.write(helpstr)
def getHelpAsString(docstring=False, show_ver=True):
"""
Return useful help from a file in the script directory called
``__taskname__.help``
"""
install_dir = os.path.dirname(__file__)
taskname = util.base_taskname(__taskname__, __package__)
htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html')
helpfile = os.path.join(install_dir, taskname + '.help')
if docstring or (not docstring and not os.path.exists(htmlfile)):
if show_ver:
helpString = "\n{:s} Version {:s} updated on {:s}\n\n".format(
__taskname__, __version__, __version_date__
)
else:
helpString = ''
if os.path.exists(helpfile):
helpString += teal.getHelpFileAsString(taskname, __file__)
elif __doc__ is not None:
helpString += __doc__ + os.linesep
else:
helpString = 'file://' + htmlfile
return helpString
drizCR.__doc__ = getHelpAsString(docstring=True, show_ver=False)
```
#### File: drizzlepac/drizzlepac/findobj.py
```python
import sys
import math
import numpy as np
from scipy import signal, ndimage
import stsci.imagestats as imagestats
from . import cdriz
__all__ = ['gaussian1', 'gausspars', 'gaussian', 'moments', 'errfunc',
'findstars', 'apply_nsigma_separation', 'xy_round',
'precompute_sharp_round', 'sharp_round', 'roundness', 'immoments',
'nmoment', 'centroid', 'cmoment', 'central_moments', 'covmat',
'help', 'getHelpAsString']
#def gaussian(amplitude, xcen, ycen, xsigma, ysigma):
#from numpy import *
FWHM2SIG = 2*np.sqrt(2*np.log(2))
#def gaussian1(height, x0, y0, fwhm, nsigma=1.5, ratio=1., theta=0.0):
def gaussian1(height, x0, y0, a, b, c):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
a, b, c - ellipse parameters (coefficients in the quadratic form)
"""
return lambda x, y: height * np.exp(-0.5* (a*(x-x0)**2 + b*(x-x0)*(y-y0) + c*(y-y0)**2))
def gausspars(fwhm, nsigma=1.5, ratio=1, theta=0.):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
fwhm - full width at half maximum of the observation
nsigma - cut the gaussian at nsigma
ratio = ratio of xsigma/ysigma
theta - angle of position angle of the major axis measured
counter-clockwise from the x axis
Returns dimensions nx and ny of the elliptical kernel as well as the
ellipse parameters a, b, c, and f when defining an ellipse through the
quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f
"""
xsigma = fwhm / FWHM2SIG
ysigma = ratio * xsigma
f = nsigma**2/2.
theta = np.deg2rad(theta)
cost = np.cos(theta)
sint = np.sin(theta)
if ratio == 0: # 1D Gaussian
if theta == 0 or theta == 180:
a = 1/xsigma**2
b = 0.0
c = 0.0
elif theta == 90:
a = 0.0
b = 0.0
c = 1/xsigma**2
else:
print('Unable to construct 1D Gaussian with these parameters\n')
raise ValueError
nx = 2 * int(max(2, (xsigma*nsigma*np.abs(cost))))+1
ny = 2 * int(max(2, (xsigma*nsigma*np.abs(sint))))+1
else: #2D gaussian
xsigma2 = xsigma * xsigma
ysigma2 = ysigma * ysigma
a = cost**2/xsigma2 + sint**2/ysigma2
b = 2 * cost * sint *(1.0/xsigma2-1.0/ysigma2)
c = sint**2/xsigma2 + cost**2/ysigma2
d = b**2 - 4*a*c # discriminant
# nx = int(2*max(2, math.sqrt(-8*c*f/d)))+1
# ny = int(2*max(2, math.sqrt(-8*a*f/d)))+1
nx = 2 * int(2*max(1, nsigma*math.sqrt(-c/d)))+1
ny = 2 * int(2*max(1, nsigma*math.sqrt(-a/d)))+1
return nx, ny, a, b, c, f
def gaussian(height, center_x, center_y, width_x, width_y):
#Returns a gaussian function with the given parameters
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*exp(
-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
def moments(data,cntr):
"""
Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments.
"""
total = data.sum()
#X, Y = np.indices(data.shape)
#x = (X*data).sum()/total
#y = (Y*data).sum()/total
x,y = cntr
xi = int(x)
yi = int(y)
if xi < 0 or xi >= data.shape[1] or yi < 0 or yi >= data.shape[0]:
raise ValueError
col = data[:, xi]
width_x = np.sqrt(abs(((np.arange(col.size)-y)**2*col).sum()/col.sum()))
row = data[yi, :]
width_y = np.sqrt(abs(((np.arange(row.size)-x)**2*row).sum()/row.sum()))
height = data.max()
return height, x, y, width_x, width_y
def errfunc(p, *args):
func = gaussian1(*p)
ret =np.ravel(func(*args[1:]) - args[0])
return ret
def findstars(jdata, fwhm, threshold, skymode,
peakmin=None, peakmax=None, fluxmin=None, fluxmax=None,
nsigma=1.5, ratio=1.0, theta=0.0,
use_sharp_round=False,mask=None,
sharplo=0.2,sharphi=1.0,roundlo=-1.0,roundhi=1.0):
# store input image size:
(img_ny, img_nx) = jdata.shape
# Define convolution inputs
nx, ny, a, b, c, f = gausspars(fwhm, nsigma=nsigma, ratio= ratio, theta=theta)
xc = nx//2
yc = ny//2
yin, xin = np.mgrid[0:ny, 0:nx]
kernel = gaussian1(1.0, xc, yc, a, b, c)(xin,yin)
# define size of extraction box for each source based on kernel size
grx = xc
gry = yc
# DAOFIND STYLE KERNEL "SHAPE"
rmat = np.sqrt((xin-xc)**2 + (yin-yc)**2)
rmatell = a*(xin-xc)**2 + b*(xin-xc)*(yin-yc) + c*(yin-yc)**2
xyrmask = np.where((rmatell <= 2*f) | (rmat <= 2.001),1,0).astype(np.int16)
# Previous *style* computation for kernel "shape":
#xyrmask = np.where(rmat <= max(grx,gry),1,0).astype(np.int16)
npts = xyrmask.sum()
rmask = kernel*xyrmask
denom = (rmask*rmask).sum() - rmask.sum()**2/npts
nkern = (rmask - (rmask.sum()/npts))/denom # normalize kernel to preserve
# fluxes for thresholds
nkern *= xyrmask
# initialize values used for getting source centers
relerr = 1./((rmask**2).sum() - (rmask.sum()**2/xyrmask.sum()))
xsigsq = (fwhm / FWHM2SIG)**2
ysigsq = (ratio**2) * xsigsq
# convolve image with gaussian kernel
convdata = signal.convolve2d(jdata, nkern, boundary='symm', mode='same').astype(np.float32)
# clip image to create regions around each source for segmentation
if mask is None:
tdata=np.where(convdata > threshold, convdata, 0)
else:
tdata=np.where((convdata > threshold) & mask, convdata, 0)
# segment image and find sources
s = ndimage.morphology.generate_binary_structure(2, 2)
ldata, nobj = ndimage.label(tdata, structure=s)
fobjects = ndimage.find_objects(ldata)
fluxes = []
fitind = []
if nobj < 2:
print('No objects found for this image. Please check value of "threshold".')
return fitind,fluxes
# determine center of each source, while removing spurious sources or
# applying limits defined by the user
ninit = 0
ninit2 = 0
s2m, s4m = precompute_sharp_round(nx, ny, xc, yc)
satur = False # Default assumption if use_sharp_round=False
sharp = None
round1 = None
round2 = None
for ss,n in zip(fobjects,range(len(fobjects))):
ssx = ss[1].stop - ss[1].start
ssy = ss[0].stop - ss[0].start
if ssx >= tdata.shape[1]-1 or ssy >= tdata.shape[0]-1:
continue
yr0 = ss[0].start - gry
yr1 = ss[0].stop + gry + 1
if yr0 <= 0 or yr1 >= img_ny: continue # ignore sources within ny//2 of edge
xr0 = ss[1].start - grx
xr1 = ss[1].stop + grx + 1
if xr0 <= 0 or xr1 >= img_nx: continue # ignore sources within nx//2 of edge
ssnew = (slice(yr0,yr1),slice(xr0,xr1))
region = tdata[ssnew]
cntr = centroid(region)
# Define region centered on max value in object (slice)
# This region will be bounds-checked to insure that it only accesses
# a valid section of the image (not off the edge)
maxpos = (int(cntr[1]+0.5)+ssnew[0].start,int(cntr[0]+0.5)+ssnew[1].start)
yr0 = maxpos[0] - gry
yr1 = maxpos[0] + gry + 1
if yr0 < 0 or yr1 > img_ny:
continue
xr0 = maxpos[1] - grx
xr1 = maxpos[1] + grx + 1
if xr0 < 0 or xr1 > img_nx:
continue
# Simple Centroid on the region from the input image
jregion = jdata[yr0:yr1,xr0:xr1]
src_flux = jregion.sum()
src_peak = jregion.max()
if (peakmax is not None and src_peak >= peakmax):
continue
if (peakmin is not None and src_peak <= peakmin):
continue
if fluxmin and src_flux <= fluxmin:
continue
if fluxmax and src_flux >= fluxmax:
continue
datamin = jregion.min()
datamax = jregion.max()
if use_sharp_round:
# Compute sharpness and first estimate of roundness:
dregion = convdata[yr0:yr1,xr0:xr1]
satur, round1, sharp = \
sharp_round(jregion, dregion, xyrmask, xc, yc,
s2m, s4m, nx, ny, datamin, datamax)
# Filter sources:
if sharp is None or (sharp < sharplo or sharp > sharphi):
continue
if round1 is None or (round1 < roundlo or round1 > roundhi):
continue
px, py, round2 = xy_round(jregion, grx, gry, skymode,
kernel, xsigsq, ysigsq, datamin, datamax)
# Filter sources:
if px is None:
continue
if use_sharp_round and not satur and \
(round2 is None or round2 < roundlo or round2 > roundhi):
continue
fitind.append((px + xr0, py + yr0, sharp, round1, round2))
# compute a source flux value
fluxes.append(src_flux)
fitindc, fluxesc = apply_nsigma_separation(fitind, fluxes, fwhm*nsigma / 2)
return fitindc, fluxesc
def apply_nsigma_separation(fitind,fluxes,separation,niter=10):
"""
Remove sources which are within nsigma*fwhm/2 pixels of each other, leaving
only a single valid source in that region.
This algorithm only works for sources which end up sequentially next to each other
based on Y position and removes enough duplicates to make the final source list more
managable. It sorts the positions by Y value in order to group those at the
same positions as much as possible.
"""
for n in range(niter):
if len(fitind) < 1:
break
fitarr = np.array(fitind,np.float32)
fluxarr = np.array(fluxes,np.float32)
inpind = np.argsort(fitarr[:,1])
npind = fitarr[inpind]
fluxind = fluxarr[inpind]
fitind = npind.tolist()
fluxes = fluxind.tolist()
dx = npind[1:,0] - npind[:-1,0]
dy = npind[1:,1] - npind[:-1,1]
dr = np.sqrt(np.power(dx,2)+np.power(dy,2))
nsame = np.where(dr <= separation)[0]
if nsame.shape[0] > 0:
for ind in nsame[-1::-1]:
#continue # <- turn off filtering by source separation
del fitind[ind]
del fluxes[ind]
else:
break
return fitind,fluxes
def xy_round(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin=None,datamax=None):
""" Compute center of source
Original code from IRAF.noao.digiphot.daofind.apfind ap_xy_round()
"""
nyk,nxk = ker2d.shape
if datamin is None:
datamin = data.min()
if datamax is None:
datamax = data.max()
# call C function for speed now...
xy_val = cdriz.arrxyround(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin,datamax)
if xy_val is None:
x = None
y = None
round = None
else:
x = xy_val[0]
y = xy_val[1]
round = xy_val[2]
return x,y,round
def precompute_sharp_round(nxk, nyk, xc, yc):
"""
Pre-computes mask arrays to be used by the 'sharp_round' function
for roundness computations based on two- and four-fold symmetries.
"""
# Create arrays for the two- and four-fold symmetry computations:
s4m = np.ones((nyk,nxk),dtype=np.int16)
s4m[yc, xc] = 0
s2m = np.ones((nyk,nxk),dtype=np.int16)
s2m[yc, xc] = 0
s2m[yc:nyk, 0:xc] = -1;
s2m[0:yc+1, xc+1:nxk] = -1;
return s2m, s4m
def sharp_round(data, density, kskip, xc, yc, s2m, s4m, nxk, nyk,
datamin, datamax):
"""
sharp_round -- Compute first estimate of the roundness and sharpness of the
detected objects.
A Python translation of the AP_SHARP_ROUND IRAF/DAOFIND function.
"""
# Compute the first estimate of roundness:
sum2 = np.sum(s2m*density)
sum4 = np.sum(s4m*abs(density))
if sum2 == 0.0:
round = 0.0
elif sum4 <= 0.0: # eps?
round = None
else:
round = 2.0 * sum2 / sum4
# Eliminate the sharpness test if the central pixel is bad:
mid_data_pix = data[yc, xc]
mid_dens_pix = density[yc, xc]
if mid_data_pix > datamax:
return True, round, None
if mid_data_pix < datamin:
return False, round, None
########################
# Sharpness statistics:
satur = np.max(kskip*data) > datamax
# Exclude pixels (create a mask) outside the [datamin, datamax] range:
uskip = np.where((data >= datamin) & (data <= datamax), 1, 0)
# Update the mask with the "skipped" values from the convolution kernel:
uskip *= kskip
# Also, exclude central pixel:
uskip[yc, xc] = 0
npixels = np.sum(uskip)
if (npixels < 1 or mid_dens_pix <= 0.0):
return satur, round, None
sharp = (mid_data_pix - np.sum(uskip*data)/npixels) / mid_dens_pix
#sharp = (mid_data_pix - np.mean(uskip*data)) / mid_dens_pix
return satur, round, sharp
def roundness(im):
"""
from astropy.io import fits as pyfits
data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)
star0=data[403:412,423:432]
star=data[396:432,3522:3558]
In [53]: findobj.roundness(star0)
Out[53]: 0.99401955054989544
In [54]: findobj.roundness(star)
Out[54]: 0.83091919980660645
"""
perimeter = im.shape[0]*2 +im.shape[1]*2 -4
area = im.size
return 4*np.pi*area/perimeter**2
def immoments(im, p,q):
x = list(range(im.shape[1]))
y = list(range(im.shape[0]))
#coord=np.array([x.flatten(),y.flatten()]).T
"""
moment = 0
momentx = 0
for i in x.flatten():
moment+=momentx
sumx=0
for j in y.flatten():
sumx+=i**0*j**0*star0[i,j]
"""
moment = np.sum([i**p*j**q*im[i,j] for j in x for i in y], dtype=np.float64)
return moment
#ss=[i**0*j**0*list(star0[i,j].flatten()) for i in list(x.flatten()) for j in list(y.flatten())]
def nmoment(im,p,q):
m = immoments(im,p,q)
nmoment = m/np.sum(im, dtype=np.float64)
return nmoment
def centroid(im):
"""
Computes the centroid of an image using the image moments:
centroid = {m10/m00, m01/m00}
These calls point to Python version of moments function
m00 = immoments(im,0,0)
m10 = immoments(im, 1,0)
m01 = immoments(im,0,1)
"""
# These calls point to Python version of moments function
m00 = cdriz.arrmoments(im,0,0)
m10 = cdriz.arrmoments(im, 1,0)
m01 = cdriz.arrmoments(im,0,1)
ycen = m10 / m00
xcen = m01 / m00
return xcen, ycen
def cmoment(im,p,q):
xcen,ycen = centroid(im)
#x,y=np.meshgrid(range(403,412),range(423,432))
x = list(range(im.shape[1]))
y = list(range(im.shape[0]))
mu = np.sum([(i-xcen)**p * (j-ycen)**q * im[i,j] for i in y for j in x],
dtype=np.float64)
return mu
def central_moments(im):
xcen,ycen = centroid(im)
mu00 = cmoment(im,p=0,q=0)
mu01 = 0.
mu10 = 0.
mu11 = immoments(im,1,1) - xcen * immoments(im,0,1)
mu20 = immoments(im,2,0) - xcen * immoments(im,1,0)
mu02 = immoments(im,0,2) - ycen*immoments(im,0,1)
mu21 = immoments(im,2,1) - 2*xcen*immoments(im,1,1) - ycen*immoments(im,2,0) + \
2*xcen**2*immoments(im,0,1)
mu12 = immoments(im,1,2) - 2*ycen*immoments(im,1,1) - xcen*immoments(im,0,2) + \
2*ycen**2*immoments(im,1,0)
mu30 = immoments(im,3,0) - 3*xcen*immoments(im,2,0) + 2*xcen**2*immoments(im,1,0)
mu03 = immoments(im,0,3) - 3*ycen*immoments(im,0,2) + 2*ycen**2*immoments(im,0,1)
cmoments = {'mu00': mu00,
'mu01': mu01,
'mu10': mu10,
'mu11': mu11,
'mu20': mu20,
'mu02': mu02,
'mu21': mu21,
'mu12': mu12,
'mu30': mu30,
'mu03': mu03
}
return cmoments
def covmat(im):
cmoments = central_moments(im)
nmu20 = cmoments['mu20'] / cmoments['mu00']
nmu02 = cmoments['mu02'] / cmoments['mu00']
nmu11 = cmoments['mu11'] / cmoments['mu00']
covmat = np.array([[nmu20, nmu11],[nmu11,nmu02]])
return covmat
```
#### File: drizzlepac/drizzlepac/nicmosData.py
```python
from stsci.tools import fileutil
from nictools import readTDD
import numpy as np
from .imageObject import imageObject
class NICMOSInputImage(imageObject):
SEPARATOR = '_'
def __init__(self, filename=None):
super().__init__(filename)
self.timeExt = 'TIME'
# define the cosmic ray bits value to use in the dq array
self.cr_bits_value = 4096
# Detector parameters, nic only has 1 detector in each file
self.full_shape = (256,256)
self._instrument=self._image['PRIMARY'].header["INSTRUME"]
self.native_units = 'COUNTS/S'
self.flatkey = 'FLATFILE'
for chip in range(1,self._numchips+1,1):
self._image[self.scienceExt,chip].cte_dir = 0 #no correction for nicmos
self._effGain = 1. #get the specific gain from the detector subclass
def _assignSignature(self, chip):
"""assign a unique signature for the image based
on the instrument, detector, chip, and size
this will be used to uniquely identify the appropriate
static mask for the image
this also records the filename for the static mask to the outputNames dictionary
"""
sci_chip = self._image[self.scienceExt,chip]
ny=sci_chip._naxis1
nx=sci_chip._naxis2
detnum = sci_chip.detnum
instr=self._instrument
sig=(instr+str(self._detector),(nx,ny),int(detnum)) #signature is a tuple
sci_chip.signature=sig #signature is a tuple
def doUnitConversions(self):
"""Convert the data to electrons
This converts all science data extensions and saves
the results back to disk. We need to make sure
the data inside the chips already in memory is altered as well.
"""
# Image information
_handle = fileutil.openImage(self._filename, mode='readonly', memmap=False)
for det in range(1,self._numchips+1,1):
chip=self._image[self.scienceExt,det]
if chip._gain is not None:
#conversionFactor = (self.getExpTime() * self.getGain())
conversionFactor = chip._gain
if self.isCountRate():
conversionFactor *= chip._exptime
counts_str = 'COUNTS/S'
else:
counts_str = 'COUNTS'
# Multiply the values of the sci extension pixels by the gain.
print("Converting %s[%s,%d] from %s to ELECTRONS"%(self._filename,self.scienceExt,det,counts_str))
"""
# If the exptime is 0 the science image will be zeroed out.
np.multiply(_handle[self.scienceExt,det].data,conversionFactor,_handle[self.scienceExt,det].data)
#chip.data=_handle[self.scienceExt,det].data.copy()
# Set the BUNIT keyword to 'electrons'
chip.header.update('BUNIT','ELECTRONS')
_handle[0].header.update('BUNIT','ELECTRONS')
# Update the PHOTFLAM value
photflam = _handle[0].header['PHOTFLAM']
_handle[0].header.update('PHOTFLAM',(photflam/chip._gain))
chip._effGain = 1.0
"""
chip._effGain = chip._gain
chip._conversionFactor = conversionFactor
else:
msg = "Invalid gain value for data, no conversion done"
print(msg)
raise ValueError(msg)
# Close the files and clean-up
_handle.close()
self._effGain = conversionFactor #1.0
def _setchippars(self):
self._setDefaultReadnoise()
def getexptimeimg(self,chip):
"""
Return an array representing the exposure time per pixel for the detector.
Returns
-------
dark: array
Exposure time array in the same shape as the input image
"""
return self._image[self.timeExt,chip].data
def getflat(self, chip):
"""
Method for retrieving a detector's flat field.
Returns
-------
flat : array
The flat field array in the same shape as the input image with **units of cps**.
"""
# The reference flat field is inverted:
flat = 1.0 / super().getflat(chip)
return flat
def getdarkcurrent(self):
"""
Return the dark current for the NICMOS detectors.
Returns
-------
darkcurrent : float
Dark current value with **units of cps**.
"""
try:
darkcurrent = self._image[0].header['exptime'] * \
self._image[self.scienceExt,1]._darkrate
except:
str = "#############################################\n"
str += "# #\n"
str += "# Error: #\n"
str += "# Cannot find the value for 'EXPTIME' #\n"
str += "# in the image header. NICMOS input #\n"
str += "# images are expected to have this header #\n"
str += "# keyword. #\n"
str += "# #\n"
str += "#Error occured in the NICMOSInputImage class#\n"
str += "# #\n"
str += "#############################################\n"
raise ValueError(str)
return darkcurrent
def getdarkimg(self,chip):
"""
Return an array representing the dark image for the detector.
Returns
-------
dark : array
The dark array in the same shape as the image with **units of cps**.
"""
# Read the temperature dependeant dark file. The name for the file is taken from
# the TEMPFILE keyword in the primary header.
tddobj = readTDD.fromcalfile(self.name)
if tddobj is None:
return np.ones(self.full_shape, dtype=self.image_dtype) * self.getdarkcurrent()
else:
# Create Dark Object from AMPGLOW and Lineark Dark components
darkobj = tddobj.getampglow() + tddobj.getlindark()
# Return the darkimage taking into account an subarray information available
return darkobj[self.ltv2:self.size2,self.ltv1:self.size1]
def isCountRate(self):
"""
isCountRate: Method or IRInputObject used to indicate if the
science data is in units of counts or count rate. This method
assumes that the keyword 'BUNIT' is in the header of the input
FITS file.
"""
has_bunit = False
if 'BUNIT' in self._image['sci',1].header :
has_bunit = True
countrate = False
if (self._image[0].header['UNITCORR'].strip() == 'PERFORM') or \
(has_bunit and self._image['sci',1].header['bunit'].find('/') != -1) :
countrate = True
return countrate
class NIC1InputImage(NICMOSInputImage):
def __init__(self, filename=None):
super().__init__(filename)
self._effGain = 1. #get the gain from the detector subclass
self._detector = self._image["PRIMARY"].header["CAMERA"]
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.08 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate / self._effGain # DN/s
return _darkrate
def _getDefaultReadnoise(self):
""" This could be updated to calculate the readnoise from the NOISFILE.
"""
_rdnoise = 26.0 # electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise / self._effGain # ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(instrpars['rdnoise'], pri_header,
instrpars['rnkeyword'])
chip._exptime = self.getInstrParameter(instrpars['exptime'], pri_header,
instrpars['expkeyword'])
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
self._assignSignature(chip._chip) #this is used in the static mask, static mask name also defined here, must be done after outputNames
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
class NIC2InputImage(NICMOSInputImage):
def __init__(self,filename=None):
super().__init__(filename)
self._effGain=1. #measured
self._detector=self._image["PRIMARY"].header["CAMERA"]
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.08 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate / self._effGain # DN/s
return _darkrate
def _getDefaultReadnoise(self):
_rdnoise = 26.0 #electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise/self._effGain #ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
# this is used in the static mask, static mask name also defined
# here, must be done after outputNames
self._assignSignature(chip._chip)
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
def createHoleMask(self):
"""Add in a mask for the coronographic hole to the general static
pixel mask. """
pass
class NIC3InputImage(NICMOSInputImage):
def __init__(self, filename=None):
super().__init__(filename)
self._detector=self._image["PRIMARY"].header["CAMERA"] #returns 1,2,3
self._effGain = 1.
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.15 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate/self._effGain #DN/s
return _darkrate
def _getDefaultReadnoise(self):
_rdnoise = 29.0 # electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise/self._effGain #ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN'
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 6.5 #measured gain
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
self._assignSignature(chip._chip) #this is used in the static mask, static mask name also defined here, must be done after outputNames
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
```
#### File: drizzlepac/drizzlepac/regfilter.py
```python
__version__ = '0.1'
__version_date__ = '17-Nov-2013'
__author__ = '<NAME>'
def fast_filter_outer_regions(reglist, width, height, origin=1):
# fast_filter_outer_regions filters regions that are outside a rectangle
# ('image's rectangle') of width 'width' and height 'height' that is has the
# bottom-left corner at (origin,origin). This function is based on checking
# for the intersection of the image's rectangle with the bounding box of the
# regions and therefore it is approximate (some regions that in reality are
# not within the image's rectangle will not be filtered out if their
# bounding box still intersects the image rectangle even though the shape
# itself does not intersect the image's rectangle.
for k in range(len(reglist)-1,-1,-1):
reg = reglist[k]
regname = reg.name.lower()
if regname[:3] == 'cir' or regname == 'annulus':
blc, trc = _get_bb_circle(reg)
elif regname[-3:] == 'box':
blc, trc = _get_bb_box(reg)
elif regname == 'ellipse':
blc, trc = _get_bb_ellipse(reg)
elif regname == 'polygon':
blc, trc = _get_bb_polygon(reg)
elif regname == 'point':
x = reg.coord_list[0]
y = reg.coord_list[1]
if not _is_point_inside(width, height, x, y, origin=origin):
del reglist[k]
continue
elif regname[:4] == 'rect':
blc, trc = _get_bb_rect(reg)
elif regname == 'panda':
blc, trc = _get_bb_circle(reg, True)
elif regname == 'epanda':
blc, trc = _get_bb_ellipse(reg, True)
elif regname == 'bpanda':
blc, trc = _get_bb_box(reg, True)
else:
continue
if not _is_rect_inside(width, height, blc, trc, origin=origin):
del reglist[k]
continue
def _is_rect_inside(w1, h1, blc2, trc2, origin=1):
pad = 0.5
o = origin-pad
return ((o < trc2[0]) and (o + w1 > blc2[0]) \
and (o < trc2[1]) and (o + h1 > blc2[1]))
def _is_point_inside(w, h, x, y, origin=1):
pad = 0.5
o = origin-pad
return (o < x and (o + w > x) and (o < y) and (o + h > y))
def _get_bb_rect(shape):
# CIAO rectangles
return (shape.coord_list[0],shape.coord_list[2]), \
(shape.coord_list[1],shape.coord_list[3])
def _get_bb_box(shape, bpanda=False):
from math import sin, cos, radians
# check if angle is provided:
rem = len(shape.coord_list) % 2
# check if bpanda:
pnd = 1 if bpanda else 0
xc = shape.coord_list[0]
yc = shape.coord_list[1]
w = shape.coord_list[-2-rem-pnd] / 2.0
h = shape.coord_list[-1-rem-pnd] / 2.0
th = radians(shape.coord_list[-1]) if rem > 0 else 0.0
cs = cos(th)
sn = sin(th)
xm = max(abs(w*cs-h*sn),abs(w*cs+h*sn))
ym = max(abs(w*sn+h*cs),abs(w*sn-h*cs))
return (xc-xm,yc-ym),(xc+xm,yc+ym)
def _get_bb_circle(shape, panda=False):
# check if panda:
pnd = 1 if panda else 0
xc = shape.coord_list[0]
yc = shape.coord_list[1]
r = shape.coord_list[-1-pnd]
return (xc-r,yc-r),(xc+r,yc+r)
def _get_bb_ellipse(shape, epanda=False):
from math import sin, cos, radians, sqrt
# check if angle is provided:
rem = len(shape.coord_list) % 2
# check if epanda:
pnd = 1 if epanda else 0
xc = shape.coord_list[0]
yc = shape.coord_list[1]
a = shape.coord_list[-2-rem-pnd]
b = shape.coord_list[-1-rem-pnd]
th = radians(shape.coord_list[-1]) if rem > 0 else 0.0
cs = cos(th)
sn = sin(th)
xm = sqrt( (a*cs)**2 + (b*sn)**2 )
ym = sqrt( (a*sn)**2 + (b*cs)**2 )
return (xc-xm,yc-ym),(xc+xm,yc+ym)
def _get_bb_point(shape):
xc = shape.coord_list[0]
yc = shape.coord_list[1]
return (xc-0.5,yc-0.5),(xc+0.5,yc+0.5)
def _get_bb_polygon(shape):
xs = shape.coord_list[0::2]
ys = shape.coord_list[1::2]
minx = min(xs)
maxx = max(xs)
miny = min(ys)
maxy = max(ys)
return (minx,miny),(maxx,maxy)
```
#### File: drizzlepac/drizzlepac/tweakutils.py
```python
import string
import os
import sys
import numpy as np
from scipy import signal, ndimage
from stsci.tools import asnutil, irafglob, parseinput, fileutil, logutil
from astropy.io import fits
import astropy.coordinates as coords
import astropy.units as u
from astropy.utils import deprecated
import stsci.imagestats as imagestats
from . import findobj
from . import cdriz
__all__ = [
'parse_input', 'atfile_sci', 'parse_atfile_cat', 'ndfind',
'get_configobj_root', 'isfloat', 'parse_skypos', 'make_val_float',
'radec_hmstodd', 'parse_exclusions', 'parse_colname', 'readcols',
'read_FITS_cols', 'read_ASCII_cols', 'write_shiftfile', 'createWcsHDU',
'idlgauss_convolve', 'gauss_array', 'gauss', 'make_vector_plot',
'apply_db_fit', 'write_xy_file', 'find_xy_peak', 'plot_zeropoint',
'build_xy_zeropoint', 'build_pos_grid'
]
_ASCII_LETTERS = string.ascii_letters
_NASCII = len(string.ascii_letters)
log = logutil.create_logger(__name__, level=logutil.logging.NOTSET)
def _is_str_none(s):
if s is None or s.strip().upper() in ['', 'NONE', 'INDEF']:
return None
return s
def parse_input(input, prodonly=False, sort_wildcards=True):
catlist = None
if not isinstance(input, list) and ('_asn' in input or '_asc' in input):
# Input is an association table. Get the input files
oldasndict = asnutil.readASNTable(input, prodonly=prodonly)
filelist = [fileutil.buildRootname(fname) for fname in
oldasndict['order']]
elif not isinstance(input, list) and input[0] == '@':
# input is an @ file
# Read the first line in order to determine whether
# catalog files have been specified in a second column...
with open(input[1:]) as f:
line = f.readline()
# Parse the @-file with irafglob to extract the input filename
filelist = irafglob.irafglob(input, atfile=atfile_sci)
print(line)
# If there are additional columns for catalog files...
if len(line.split()) > 1:
# ...parse out the names of the catalog files as well
catlist, catdict = parse_atfile_cat(input)
elif isinstance(input, list):
# input a python list
filelist = []
for fn in input:
flist, output = parse_input(fn, prodonly=prodonly)
# if wild-cards are given, sort for uniform usage:
if fn.find('*') > -1 and sort_wildcards:
flist.sort()
filelist += flist
else:
# input is either a string or something unrecognizable,
# so give it a try:
filelist, output = parseinput.parseinput(input)
# if wild-cards are given, sort for uniform usage:
if input.find('*') > -1 and sort_wildcards:
filelist.sort()
return filelist, catlist
def atfile_sci(line):
return '' if line is None or not line.strip() else line.split()[0]
def parse_atfile_cat(input):
"""
Return the list of catalog filenames specified as part of the input @-file
"""
with open(input[1:]) as f:
catlist = []
catdict = {}
for line in f.readlines():
if line[0] == '#' or not line.strip():
continue
lspl = line.split()
if len(lspl) > 1:
catdict[lspl[0]] = lspl[1:]
catlist.append(lspl[1:])
else:
catdict[lspl[0]] = None
catlist.append(None)
return catlist, catdict
# functions to help work with configobj input
def get_configobj_root(configobj):
kwargs = {}
for key in configobj:
# Only copy in those entries which start with lower case letters
# since sections are all upper-case for this task
if key[0].islower():
kwargs[key] = configobj[key]
return kwargs
def ndfind(array, hmin, fwhm, skymode,
sharplim=[0.2, 1.0], roundlim=[-1, 1], minpix=5,
peakmin=None, peakmax=None, fluxmin=None, fluxmax=None,
nsigma=1.5, ratio=1.0, theta=0.0,
mask=None, use_sharp_round=False, nbright=None):
star_list, fluxes = findobj.findstars(
array, fwhm, hmin, skymode, peakmin=peakmin, peakmax=peakmax,
fluxmin=fluxmin, fluxmax=fluxmax, ratio=ratio, nsigma=nsigma,
theta=theta, use_sharp_round=use_sharp_round, mask=mask,
sharplo=sharplim[0], sharphi=sharplim[1],
roundlo=roundlim[0], roundhi=roundlim[1]
)
if len(star_list) == 0:
print('No valid sources found...')
return tuple([[] for i in range(7 if use_sharp_round else 4)])
star_list = list(np.array(star_list).T)
fluxes = np.array(fluxes, np.float)
if nbright is not None:
idx = np.argsort(fluxes)[::-1]
fluxes = fluxes[idx]
star_list = [s[idx] for s in star_list]
if use_sharp_round:
return (star_list[0], star_list[1], fluxes,
np.arange(star_list[0].size),
star_list[2], star_list[3], star_list[4])
else:
return (star_list[0], star_list[1], fluxes,
np.arange(star_list[0].size), None, None, None)
def isfloat(value):
""" Return True if all characters are part of a floating point value """
try:
float(value)
return True
except ValueError:
return False
def parse_skypos(ra, dec):
"""
Function to parse RA and Dec input values and turn them into decimal
degrees
Input formats could be:
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
nn.nnnnnnnn
"nn.nnnnnnn"
"""
rval = make_val_float(ra)
dval = make_val_float(dec)
if rval is None:
rval, dval = radec_hmstodd(ra, dec)
return rval, dval
def make_val_float(val):
try:
return float(val)
except ValueError:
return None
def radec_hmstodd(ra, dec):
""" Function to convert HMS values into decimal degrees.
This function relies on the astropy.coordinates package to perform the
conversion to decimal degrees.
Parameters
----------
ra : list or array
List or array of input RA positions
dec : list or array
List or array of input Dec positions
Returns
-------
pos : arr
Array of RA,Dec positions in decimal degrees
Notes
-----
This function supports any specification of RA and Dec as HMS or DMS;
specifically, the formats::
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
See Also
--------
astropy.coordinates
"""
if sys.hexversion >= 196864:
hmstrans = str.maketrans(_ASCII_LETTERS, _NASCII * ' ')
else:
hmstrans = string.maketrans(_ASCII_LETTERS, _NASCII * ' ')
if isinstance(ra, list):
rastr = ':'.join(ra)
elif isinstance(ra, float):
rastr = None
pos_ra = ra
elif ra.find(':') < 0:
# convert any non-numeric characters to spaces
# (we already know the units)
rastr = ra.translate(hmstrans).strip()
rastr = rastr.replace(' ', ' ')
# convert 'nn nn nn.nn' to final 'nn:nn:nn.nn' string
rastr = rastr.replace(' ', ':')
else:
rastr = ra
if isinstance(dec, list):
decstr = ':'.join(dec)
elif isinstance(dec, float):
decstr = None
pos_dec = dec
elif dec.find(':') < 0:
decstr = dec.translate(hmstrans).strip()
decstr = decstr.replace(' ', ' ')
decstr = decstr.replace(' ', ':')
else:
decstr = dec
if rastr is None:
pos = (pos_ra, pos_dec)
else:
pos_coord = coords.SkyCoord(rastr + ' ' + decstr,
unit=(u.hourangle, u.deg))
pos = (pos_coord.ra.deg, pos_coord.dec.deg)
return pos
def parse_exclusions(exclusions):
""" Read in exclusion definitions from file named by 'exclusions'
and return a list of positions and distances
"""
fname = fileutil.osfn(exclusions)
if os.path.exists(fname):
with open(fname) as f:
flines = f.readlines()
else:
print('No valid exclusions file "', fname, '" could be found!')
print('Skipping application of exclusions files to source catalogs.')
return None
# Parse out lines which can be interpreted as positions and distances
exclusion_list = []
units = None
for line in flines:
if line[0] == '#' or 'global' in line[:6]:
continue
# Only interpret the part of the line prior to the comment
# if a comment has been attached to the line
if '#' in line:
line = line.split('#')[0].rstrip()
if units is None:
units = 'pixels'
if line[:3] in ['fk4', 'fk5', 'sky']:
units = 'sky'
if line[:5] in ['image', 'physi', 'pixel']:
units = 'pixels'
continue
if 'circle(' in line:
nline = line.replace('circle(', '')
nline = nline.replace(')', '')
nline = nline.replace('"', '')
vals = nline.split(',')
if ':' in vals[0]:
posval = vals[0] + ' ' + vals[1]
else:
posval = (float(vals[0]), float(vals[1]))
else:
# Try to interpret unformatted line
if ',' in line:
split_tok = ','
else:
split_tok = ' '
vals = line.split(split_tok)
if len(vals) == 3:
if ':' in vals[0]:
posval = vals[0] + ' ' + vals[1]
else:
posval = (float(vals[0]), float(vals[1]))
else:
continue
exclusion_list.append(
{'pos': posval, 'distance': float(vals[2]), 'units': units}
)
return exclusion_list
def parse_colname(colname):
""" Common function to interpret input column names provided by the user.
This function translates column specification provided by the user
into a column number.
Notes
-----
This function will understand the following inputs::
'1,2,3' or 'c1,c2,c3' or ['c1','c2','c3']
'1-3' or 'c1-c3'
'1:3' or 'c1:c3'
'1 2 3' or 'c1 c2 c3'
'1' or 'c1'
1
Parameters
----------
colname :
Column name or names to be interpreted
Returns
-------
cols : list
The return value will be a list of strings.
"""
if isinstance(colname, list):
cname = ''
for c in colname:
cname += str(c) + ','
cname = cname.rstrip(',')
elif isinstance(colname, int) or colname.isdigit():
cname = str(colname)
else:
cname = colname
if 'c' in cname[0]:
cname = cname.replace('c', '')
ctok = None
cols = None
if '-' in cname:
ctok = '-'
if ':' in cname:
ctok = ':'
if ctok is not None:
cnums = cname.split(ctok)
c = list(range(int(cnums[0]), int(cnums[1]) + 1))
cols = [str(i) for i in c]
if cols is None:
ctok = ',' if ',' in cname else ' '
cols = cname.split(ctok)
return cols
def readcols(infile, cols=None):
""" Function which reads specified columns from either FITS tables or
ASCII files
This function reads in the columns specified by the user into numpy
arrays regardless of the format of the input table (ASCII or FITS
table).
Parameters
----------
infile : string
Filename of the input file
cols : string or list of strings
Columns to be read into arrays
Returns
-------
outarr : array
Numpy array or arrays of columns from the table
"""
if _is_str_none(infile) is None:
return None
if infile.endswith('.fits'):
outarr = read_FITS_cols(infile, cols=cols)
else:
outarr = read_ASCII_cols(infile, cols=cols)
return outarr
def read_FITS_cols(infile, cols=None): # noqa: N802
""" Read columns from FITS table """
with fits.open(infile, memmap=False) as ftab:
extnum = 0
extfound = False
for extn in ftab:
if 'tfields' in extn.header:
extfound = True
break
extnum += 1
if not extfound:
print('ERROR: No catalog table found in ', infile)
raise ValueError
# Now, read columns from the table in this extension if no column names
# were provided by user, simply read in all columns from table
if _is_str_none(cols[0]) is None:
cols = ftab[extnum].data.names
# Define the output
outarr = [ftab[extnum].data.field(c) for c in cols]
return outarr
def read_ASCII_cols(infile, cols=[1, 2, 3]): # noqa: N802
""" Interpret input ASCII file to return arrays for specified columns.
Notes
-----
The specification of the columns should be expected to have lists for
each 'column', with all columns in each list combined into a single
entry.
For example::
cols = ['1,2,3','4,5,6',7]
where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec
values and 7 represents the flux value for a total of 3 requested
columns of data to be returned.
Returns
-------
outarr : list of arrays
The return value will be a list of numpy arrays, one for each
'column'.
"""
# build dictionary representing format of each row
# Format of dictionary: {'colname':col_number,...}
# This provides the mapping between column name and column number
coldict = {}
with open(infile, 'r') as f:
flines = f.readlines()
for l in flines: # interpret each line from catalog file
if l[0].lstrip() == '#' or l.lstrip() == '':
continue
else:
# convert first row of data into column definitions using indices
coldict = {str(i + 1): i for i, _ in enumerate(l.split())}
break
numcols = len(cols)
outarr = [[] for _ in range(numcols)]
convert_radec = False
# Now, map specified columns to columns in file and populate output arrays
for l in flines: # interpret each line from catalog file
l = l.strip()
lspl = l.split()
# skip blank lines, comment lines, or lines with
# fewer columns than requested by user
if not l or len(lspl) < numcols or l[0] == '#' or "INDEF" in l:
continue
# For each 'column' requested by user, pull data from row
for c, i in zip(cols, list(range(numcols))):
cnames = parse_colname(c)
if len(cnames) > 1:
# interpret multi-column specification as one value
outval = ''
for cn in cnames:
cnum = coldict[cn]
cval = lspl[cnum]
outval += cval + ' '
outarr[i].append(outval)
convert_radec = True
else:
# pull single value from row for this column
cnum = coldict[cnames[0]]
if isfloat(lspl[cnum]):
cval = float(lspl[cnum])
else:
cval = lspl[cnum]
# Check for multi-column values given as "nn:nn:nn.s"
if ':' in cval:
cval = cval.replace(':', ' ')
convert_radec = True
outarr[i].append(cval)
# convert multi-column RA/Dec specifications
if convert_radec:
outra = []
outdec = []
for ra, dec in zip(outarr[0], outarr[1]):
radd, decdd = radec_hmstodd(ra, dec)
outra.append(radd)
outdec.append(decdd)
outarr[0] = outra
outarr[1] = outdec
# convert all lists to numpy arrays
for c in range(len(outarr)):
outarr[c] = np.array(outarr[c])
return outarr
def write_shiftfile(image_list, filename, outwcs='tweak_wcs.fits'):
""" Write out a shiftfile for a given list of input Image class objects
"""
rows = ''
nrows = 0
for img in image_list:
row = img.get_shiftfile_row()
if row is not None:
rows += row
nrows += 1
if nrows == 0: # If there are no fits to report, do not write out a file
return
# write out reference WCS now
if os.path.exists(outwcs):
os.remove(outwcs)
p = fits.HDUList()
p.append(fits.PrimaryHDU())
p.append(createWcsHDU(image_list[0].refWCS))
p.writeto(outwcs)
# Write out shiftfile to go with reference WCS
with open(filename, 'w') as f:
f.write('# frame: output\n')
f.write('# refimage: %s[wcs]\n' % outwcs)
f.write('# form: delta\n')
f.write('# units: pixels\n')
f.write(rows)
print('Writing out shiftfile :', filename)
def createWcsHDU(wcs): # noqa: N802
""" Generate a WCS header object that can be used to populate a reference
WCS HDU.
For most applications, stwcs.wcsutil.HSTWCS.wcs2header()
will work just as well.
"""
header = wcs.to_header()
header['EXTNAME'] = 'WCS'
header['EXTVER'] = 1
# Now, update original image size information
header['NPIX1'] = (wcs.pixel_shape[0], "Length of array axis 1")
header['NPIX2'] = (wcs.pixel_shape[1], "Length of array axis 2")
header['PIXVALUE'] = (0.0, "values of pixels in array")
if hasattr(wcs, 'orientat'):
orientat = wcs.orientat
else:
# find orientat from CD or PC matrix
if wcs.wcs.has_cd():
cd12 = wcs.wcs.cd[0][1]
cd22 = wcs.wcs.cd[1][1]
elif wcs.wcs.has_pc():
cd12 = wcs.wcs.cdelt[0] * wcs.wcs.pc[0][1]
cd22 = wcs.wcs.cdelt[1] * wcs.wcs.pc[1][1]
else:
raise ValueError("Invalid WCS: WCS does not contain neither "
"a CD nor a PC matrix.")
orientat = np.rad2deg(np.arctan2(cd12, cd22))
header['ORIENTAT'] = (orientat, "position angle of "
"image y axis (deg. e of n)")
return fits.ImageHDU(None, header)
#
# Code used for testing source finding algorithms
#
@deprecated(since='3.0.0', name='idlgauss_convolve', warning_type=Warning)
def idlgauss_convolve(image, fwhm):
sigmatofwhm = 2 * np.sqrt(2 * np.log(2))
radius = 1.5 * fwhm / sigmatofwhm # Radius is 1.5 sigma
if radius < 1.0:
radius = 1.0
fwhm = sigmatofwhm / 1.5
print("WARNING!!! Radius of convolution box smaller than one.")
print("Setting the 'fwhm' to minimum value, %f." % fwhm)
sigsq = (fwhm / sigmatofwhm)**2 # sigma squared
nhalf = int(radius) # Center of the kernel
nbox = 2 * nhalf + 1 # Number of pixels inside of convolution box
# x,y coordinates of the kernel:
kern_y, kern_x = np.ix_(np.arange(nbox), np.arange(nbox))
# Compute the square of the distance to the center:
g = (kern_x - nhalf)**2 + (kern_y - nhalf)**2
# We make a mask to select the inner circle of radius "radius":
mask = g <= radius**2
# The number of pixels in the mask within the inner circle:
nmask = mask.sum()
g = np.exp(-0.5 * g / sigsq) # We make the 2D gaussian profile
# Convolving the image with a kernel representing a gaussian
# (which is assumed to be the psf).
# For the kernel, values further than "radius" are equal to zero
c = g * mask
# We normalize the gaussian kernel
c[mask] = (c[mask] - c[mask].mean()) / (c[mask].var() * nmask)
# c1 will be used to the test the roundness
c1 = g[nhalf]
c1 = (c1 - c1.mean()) / ((c1**2).sum() - c1.mean())
# Convolve image with kernel "c":
h = signal.convolve2d(image, c, boundary='fill', mode='same', fillvalue=0)
h[:nhalf, :] = 0 # Set the sides to zero in order to avoid border effects
h[-nhalf:, :] = 0
h[:, :nhalf] = 0
h[:, -nhalf:] = 0
return h, c1
def gauss_array(nx, ny=None, fwhm=1.0, sigma_x=None, sigma_y=None,
zero_norm=False):
""" Computes the 2D Gaussian with size nx*ny.
Parameters
----------
nx : int
ny : int [Default: None]
Size of output array for the generated Gaussian. If ny == None,
output will be an array nx X nx pixels.
fwhm : float [Default: 1.0]
Full-width, half-maximum of the Gaussian to be generated
sigma_x : float [Default: None]
sigma_y : float [Default: None]
Sigma_x and sigma_y are the stddev of the Gaussian functions.
zero_norm : bool [Default: False]
The kernel will be normalized to a sum of 1 when True.
Returns
-------
gauss_arr : array
A numpy array with the generated gaussian function
"""
if ny is None:
ny = nx
if sigma_x is None:
if fwhm is None:
print('A value for either "fwhm" or "sigma_x" needs to be '
'specified!')
raise ValueError
else:
# Convert input FWHM into sigma
sigma_x = fwhm / (2 * np.sqrt(2 * np.log(2)))
if sigma_y is None:
sigma_y = sigma_x
xradius = nx // 2
yradius = ny // 2
# Create grids of distance from center in X and Y
xarr = np.abs(np.arange(-xradius, xradius + 1))
yarr = np.abs(np.arange(-yradius, yradius + 1))
hnx = gauss(xarr, sigma_x)
hny = gauss(yarr, sigma_y)
hny = hny.reshape((ny, 1))
h = hnx * hny
# Normalize gaussian kernel to a sum of 1
h = h / np.abs(h).sum()
if zero_norm:
h -= h.mean()
return h
def gauss(x, sigma):
""" Compute 1-D value of gaussian at position x relative to center."""
return (np.exp(-np.power(x, 2) / (2 * np.power(sigma, 2))) /
(sigma * np.sqrt(2 * np.pi)))
# Plotting Utilities for drizzlepac
def make_vector_plot(coordfile, columns=[1, 2, 3, 4], data=None,
figure_id=None, title=None, axes=None, every=1,
labelsize=8, ylimit=None, limit=None, xlower=None,
ylower=None, output=None, headl=4, headw=3,
xsh=0.0, ysh=0.0, fit=None, scale=1.0, vector=True,
textscale=5, append=False, linfit=False, rms=True,
plotname=None):
""" Convert a XYXYMATCH file into a vector plot or set of residuals plots.
This function provides a single interface for generating either a
vector plot of residuals or a set of 4 plots showing residuals.
The data being plotted can also be adjusted for a linear fit
on-the-fly.
Parameters
----------
coordfile : string
Name of file with matched sets of coordinates. This input file can
be a file compatible for use with IRAF's geomap.
columns : list [Default: [0,1,2,3]]
Column numbers for the X,Y positions from each image
data : list of arrays
If specified, this can be used to input matched data directly
title : string
Title to be used for the generated plot
axes : list
List of X and Y min/max values to customize the plot axes
every : int [Default: 1]
Slice value for the data to be plotted
limit : float
Radial offset limit for selecting which sources are included in
the plot
labelsize : int [Default: 8] or str
Font size to use for tick labels, either in font points or as a
string understood by tick_params().
ylimit : float
Limit to use for Y range of plots.
xlower : float
ylower : float
Limit in X and/or Y offset for selecting which sources are included
in the plot
output : string
Filename of output file for generated plot
headl : int [Default: 4]
Length of arrow head to be used in vector plot
headw : int [Default: 3]
Width of arrow head to be used in vector plot
xsh : float
ysh : float
Shift in X and Y from linear fit to be applied to source positions
from the first image
scale : float
Scale from linear fit to be applied to source positions from the
first image
fit : array
Array of linear coefficients for rotation (and scale?) in X and Y
from a linear fit to be applied to source positions from the
first image
vector : bool [Default: True]
Specifies whether or not to generate a vector plot. If False, task
will generate a set of 4 residuals plots instead
textscale : int [Default: 5]
Scale factor for text used for labelling the generated plot
append : bool [Default: False]
If True, will overplot new plot on any pre-existing plot
linfit : bool [Default: False]
If True, a linear fit to the residuals will be generated and
added to the generated residuals plots
rms : bool [Default: True]
Specifies whether or not to report the RMS of the residuals as a
label on the generated plot(s).
plotname : str [Default: None]
Write out plot to a file with this name if specified.
"""
from matplotlib import pyplot as plt
if data is None:
data = readcols(coordfile, cols=columns)
xy1x = data[0]
xy1y = data[1]
xy2x = data[2]
xy2y = data[3]
numpts = xy1x.shape[0]
if fit is not None:
xy1x, xy1y = apply_db_fit(data, fit, xsh=xsh, ysh=ysh)
dx = xy2x - xy1x
dy = xy2y - xy1y
else:
dx = xy2x - xy1x - xsh
dy = xy2y - xy1y - ysh
# apply scaling factor to deltas
dx *= scale
dy *= scale
print('Total # points: {:d}'.format(len(dx)))
if limit is not None:
indx = np.sqrt(dx**2 + dy**2) <= limit
dx = dx[indx].copy()
dy = dy[indx].copy()
xy1x = xy1x[indx].copy()
xy1y = xy1y[indx].copy()
if xlower is not None:
xindx = np.abs(dx) >= xlower
dx = dx[xindx].copy()
dy = dy[xindx].copy()
xy1x = xy1x[xindx].copy()
xy1y = xy1y[xindx].copy()
print('# of points after clipping: {:d}'.format(len(dx)))
dr = np.sqrt(dx**2 + dy**2)
max_vector = dr.max()
if output is not None:
write_xy_file(output, [xy1x, xy1y, dx, dy])
fig = plt.figure(num=figure_id)
if not append:
plt.clf()
if vector:
dxs = imagestats.ImageStats(dx.astype(np.float32))
dys = imagestats.ImageStats(dy.astype(np.float32))
minx = xy1x.min()
maxx = xy1x.max()
miny = xy1y.min()
maxy = xy1y.max()
plt_xrange = maxx - minx
plt_yrange = maxy - miny
qplot = plt.quiver(xy1x[::every], xy1y[::every], dx[::every],
dy[::every], units='y', headwidth=headw,
headlength=headl)
key_dx = 0.01 * plt_xrange
key_dy = 0.005 * plt_yrange * textscale
maxvec = max_vector / 2.
key_len = round(maxvec + 0.005, 2)
plt.xlabel('DX: %.4f to %.4f +/- %.4f' % (dxs.min, dxs.max,
dxs.stddev))
plt.ylabel('DY: %.4f to %.4f +/- %.4f' % (dys.min, dys.max,
dys.stddev))
plt.title(r"$Vector\ plot\ of\ %d/%d\ residuals:\ %s$" %
(xy1x.shape[0], numpts, title))
plt.quiverkey(qplot, minx + key_dx, miny - key_dy, key_len,
"%0.2f pixels" % (key_len),
coordinates='data', labelpos='E', labelcolor='Maroon',
color='Maroon')
else:
plot_defs = [[xy1x, dx, "X (pixels)", "DX (pixels)"],
[xy1y, dx, "Y (pixels)", "DX (pixels)"],
[xy1x, dy, "X (pixels)", "DY (pixels)"],
[xy1y, dy, "Y (pixels)", "DY (pixels)"]]
if axes is None:
# Compute a global set of axis limits for all plots
minx = min(xy1x.min(), xy1y.min())
maxx = max(xy1x.max(), xy1y.max())
miny = min(dx.min(), dy.min())
maxy = max(dx.max(), dy.max())
else:
minx = axes[0][0]
maxx = axes[0][1]
miny = axes[1][0]
maxy = axes[1][1]
if ylimit is not None:
miny = -ylimit
maxy = ylimit
rms_labelled = False
if title is None:
fig.suptitle("Residuals [%d/%d]" % (xy1x.shape[0], numpts),
ha='center', fontsize=labelsize + 6)
else:
# This definition of the title supports math symbols in the title
fig.suptitle(r"$" + title + "$", ha='center',
fontsize=labelsize + 6)
for pnum, p in enumerate(plot_defs):
pn = pnum + 1
ax = fig.add_subplot(2, 2, pn)
plt.plot(
p[0], p[1], 'b.',
label='RMS(X) = %.4f, RMS(Y) = %.4f' % (dx.std(), dy.std())
)
lx = [int((p[0].min() - 500) / 500) * 500,
int((p[0].max() + 500) / 500) * 500]
plt.plot(lx, [0.0, 0.0], 'k', linewidth=3)
plt.axis([minx, maxx, miny, maxy])
if rms and not rms_labelled:
leg_handles, leg_labels = ax.get_legend_handles_labels()
fig.legend(leg_handles, leg_labels, loc='center left',
fontsize='small', frameon=False,
bbox_to_anchor=(0.33, 0.51), borderaxespad=0)
rms_labelled = True
ax.tick_params(labelsize=labelsize)
# Fine-tune figure; hide x ticks for top plots and y ticks for
# right plots
if pn <= 2:
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel(plot_defs[pnum][2])
if pn % 2 == 0:
plt.setp(ax.get_yticklabels(), visible=False)
else:
ax.set_ylabel(plot_defs[pnum][3])
if linfit:
lxr = int((lx[-1] - lx[0]) / 100)
lyr = int((p[1].max() - p[1].min()) / 100)
a = np.vstack([p[0], np.ones(len(p[0]))]).T
m, c = np.linalg.lstsq(a, p[1])[0]
yr = [m * lx[0] + c, lx[-1] * m + c]
plt.plot([lx[0], lx[-1]], yr, 'r')
plt.text(
lx[0] + lxr, p[1].max() + lyr,
"%0.5g*x + %0.5g [%0.5g,%0.5g]" % (m, c, yr[0], yr[1]),
color='r'
)
plt.draw()
if plotname:
suffix = plotname[-4:]
if '.' not in suffix:
output += '.png'
format = 'png'
else:
if suffix[1:] in ['png', 'pdf', 'ps', 'eps', 'svg']:
format = suffix[1:]
plt.savefig(plotname, format=format)
def apply_db_fit(data, fit, xsh=0.0, ysh=0.0):
xy1x = data[0]
xy1y = data[1]
if fit is not None:
xy1 = np.zeros((xy1x.shape[0], 2), np.float64)
xy1[:, 0] = xy1x
xy1[:, 1] = xy1y
xy1 = np.dot(xy1, fit)
xy1x = xy1[:, 0] + xsh
xy1y = xy1[:, 1] + ysh
return xy1x, xy1y
def write_xy_file(outname, xydata, append=False, format=["%20.6f"]):
if not isinstance(xydata, list):
xydata = list(xydata)
if not append:
if os.path.exists(outname):
os.remove(outname)
with open(outname, 'a+') as f:
for row in range(len(xydata[0][0])):
outstr = ""
for cols, fmts in zip(xydata, format):
for col in range(len(cols)):
outstr += fmts % (cols[col][row])
f.write(outstr + "\n")
print('wrote XY data to: ', outname)
@deprecated(since='3.0.0', name='find_xy_peak', warning_type=Warning)
def find_xy_peak(img, center=None, sigma=3.0):
""" Find the center of the peak of offsets """
# find level of noise in histogram
istats = imagestats.ImageStats(img.astype(np.float32), nclip=1,
fields='stddev,mode,mean,max,min')
if istats.stddev == 0.0:
istats = imagestats.ImageStats(img.astype(np.float32),
fields='stddev,mode,mean,max,min')
imgsum = img.sum()
# clip out all values below mean+3*sigma from histogram
imgc = img[:, :].copy()
imgc[imgc < istats.mode + istats.stddev * sigma] = 0.0
# identify position of peak
yp0, xp0 = np.where(imgc == imgc.max())
# Perform bounds checking on slice from img
ymin = max(0, int(yp0[0]) - 3)
ymax = min(img.shape[0], int(yp0[0]) + 4)
xmin = max(0, int(xp0[0]) - 3)
xmax = min(img.shape[1], int(xp0[0]) + 4)
# take sum of at most a 7x7 pixel box around peak
xp_slice = (slice(ymin, ymax),
slice(xmin, xmax))
yp, xp = ndimage.measurements.center_of_mass(img[xp_slice])
if np.isnan(xp) or np.isnan(yp):
xp = 0.0
yp = 0.0
flux = 0.0
zpqual = None
else:
xp += xp_slice[1].start
yp += xp_slice[0].start
# compute S/N criteria for this peak: flux/sqrt(mean of rest of array)
flux = imgc[xp_slice].sum()
delta_size = float(img.size - imgc[xp_slice].size)
if delta_size == 0:
delta_size = 1
delta_flux = float(imgsum - flux)
if flux > imgc[xp_slice].max():
delta_flux = flux - imgc[xp_slice].max()
else:
delta_flux = flux
zpqual = flux / np.sqrt(delta_flux / delta_size)
if np.isnan(zpqual) or np.isinf(zpqual):
zpqual = None
if center is not None:
xp -= center[0]
yp -= center[1]
flux = imgc[xp_slice].max()
del imgc
return xp, yp, flux, zpqual
def plot_zeropoint(pars):
""" Plot 2d histogram.
Pars will be a dictionary containing:
data, figure_id, vmax, title_str, xp,yp, searchrad
"""
from matplotlib import pyplot as plt
xp = pars['xp']
yp = pars['yp']
searchrad = int(pars['searchrad'] + 0.5)
plt.figure(num=pars['figure_id'])
plt.clf()
if pars['interactive']:
plt.ion()
else:
plt.ioff()
plt.imshow(pars['data'], vmin=0, vmax=pars['vmax'],
interpolation='nearest')
plt.viridis()
plt.colorbar()
plt.title(pars['title_str'])
plt.plot(xp + searchrad, yp + searchrad, color='red', marker='+',
markersize=24)
plt.plot(searchrad, searchrad, color='yellow', marker='+', markersize=120)
plt.text(searchrad, searchrad, "Offset=0,0", verticalalignment='bottom',
color='yellow')
plt.xlabel("Offset in X (pixels)")
plt.ylabel("Offset in Y (pixels)")
if pars['interactive']:
plt.show()
if pars['plotname']:
suffix = pars['plotname'][-4:]
output = pars['plotname']
if '.' not in suffix:
output += '.png'
format = 'png'
else:
if suffix[1:] in ['png', 'pdf', 'ps', 'eps', 'svg']:
format = suffix[1:]
plt.savefig(output, format=format)
@deprecated(since='3.0.0', name='build_xy_zeropoint', warning_type=Warning)
def build_xy_zeropoint(imgxy, refxy, searchrad=3.0, histplot=False,
figure_id=1, plotname=None, interactive=True):
""" Create a matrix which contains the delta between each XY position and
each UV position.
"""
print('Computing initial guess for X and Y shifts...')
# run C function to create ZP matrix
zpmat = cdriz.arrxyzero(imgxy.astype(np.float32), refxy.astype(np.float32),
searchrad)
xp, yp, flux, zpqual = find_xy_peak(zpmat, center=(searchrad, searchrad))
if zpqual is not None:
print('Found initial X and Y shifts of ', xp, yp)
print(' with significance of ', zpqual, 'and ', flux, ' matches')
else:
# try with a lower sigma to detect a peak in a sparse set of sources
xp, yp, flux, zpqual = find_xy_peak(
zpmat, center=(searchrad, searchrad), sigma=1.0
)
if zpqual:
print('Found initial X and Y shifts of ', xp, yp)
print(' with significance of ', zpqual, 'and ',
flux, ' matches')
else:
print('!' * 80)
print('!')
print('! WARNING: No valid shift found within a search radius of ',
searchrad, ' pixels.')
print('!')
print('!' * 80)
if histplot:
zpstd = flux // 5
if zpstd < 10:
zpstd = 10
if zpqual is None:
zpstd = 10
title_str = ("Histogram of offsets: Peak has %d matches at "
"(%0.4g, %0.4g)" % (flux, xp, yp))
plot_pars = {'data': zpmat, 'figure_id': figure_id, 'vmax': zpstd,
'xp': xp, 'yp': yp, 'searchrad': searchrad,
'title_str': title_str, 'plotname': plotname,
'interactive': interactive}
plot_zeropoint(plot_pars)
return xp, yp, flux, zpqual
@deprecated(since='3.0.0', name='build_pos_grid', warning_type=Warning)
def build_pos_grid(start, end, nstep, mesh=False):
"""
Return a grid of positions starting at X,Y given by 'start', and ending
at X,Y given by 'end'. The grid will be completely filled in X and Y by
every 'step' interval.
"""
# Build X and Y arrays
dx = end[0] - start[0]
if dx < 0:
nstart = end
end = start
start = nstart
dx = -dx
stepx = dx / nstep
# Perform linear fit to find exact line that connects start and end
xarr = np.arange(start[0], end[0] + stepx / 2.0, stepx)
yarr = np.interp(xarr, [start[0], end[0]], [start[1], end[1]])
# create grid of positions
if mesh:
xa, ya = np.meshgrid(xarr, yarr)
xarr = xa.ravel()
yarr = ya.ravel()
return xarr, yarr
```
#### File: drizzlepac/tests/combine_ecsv_files.py
```python
import argparse
import datetime
import glob
import os
import sys
from astropy.io import ascii
from astropy.table import Table, vstack
# ------------------------------------------------------------------------------------------------------------
def find_files(input_file_basepath):
"""Find the ecsv files.
Parameters
----------
input_file_basepath : string
Path to start recursive search for the .ecsv files.
Returns
-------
file_list: list
List of ecsv full filenames
"""
# Search for ecsv files ignoring '_current' sym links to existing directories
file_list = glob.glob("{}**/*[!current]/*.ecsv".format(input_file_basepath), recursive=True)
n_found = len(file_list)
if n_found == 0:
sys.exit("No .ecsv files found. Exiting...")
elif n_found == 1:
print("{} ecsv file found.\n".format(n_found))
else:
print("{} ecsv files found.\n".format(n_found))
return(file_list)
# ------------------------------------------------------------------------------------------------------------
def generate_output_file(ecsv_file_list, output_filename, startingDT, clobber):
"""Generate combined output ecsv file.
Parameters
----------
ecsv_file_list : list
List of ecsv full filenames (such as those generated by `find_files`)
output_filename : string
Name of the output combined .ecsv file.
startingDT : datetime object
starting date/time
clobber : Boolean
Overwrite existing files with the same name as output_filename?
Returns
-------
Nothing.
"""
n_found = len(ecsv_file_list)
for filectr, ecsv_filename in enumerate(ecsv_file_list, start=1):
table_data = ascii.read(ecsv_filename, format='ecsv') # Read ecsv file
# print incremental status update msg
padding = " "*(len(str(n_found))-len(str(filectr)))
if len(table_data) < 2:
plural_string = ""
else:
plural_string = "s"
print("{}{}/{}: added {} row{} from {}.".format(padding,
filectr,
n_found,
len(table_data),
plural_string,
ecsv_filename))
# add new column with dataset name info to the 0th (left most) position in the table.
dataset = os.path.basename(ecsv_filename)[:-5] # scrape dataset name out of ecsv filename
dataset_column = Table.Column(name='datasetName', data=[dataset]*len(table_data)) # make new column
table_data.add_column(dataset_column, index=0) # add dataset column to table data to append.
if filectr == 1: # append out_data with ecsv file data for all files after the list item.
out_data = table_data.copy()
else: # use the data from the first ecsv file to initialize out_data
out_data = vstack([out_data, table_data])
ascii.write(out_data, output_filename, format='ecsv', overwrite=clobber) # write output file.
if n_found == 1:
file_plural_string = ""
else:
file_plural_string = "s"
total_rows = len(out_data) # display total number of rows in output file.
if total_rows == 1:
row_plural_string = ""
else:
row_plural_string = "s"
print("\nWrote {} row{} from {} input file{} to output file {}".format(total_rows,
row_plural_string,
n_found,
file_plural_string,
output_filename))
total_runtime=(datetime.datetime.now() - startingDT).total_seconds()
print('Total processing time: {} seconds'.format(total_runtime))
print('Average time per row: {} seconds'.format(total_runtime/total_rows))
# out_data.pprint(max_width=-1)
# ------------------------------------------------------------------------------------------------------------
def run_ecsv_combine(clobber=False, input_file_basepath=None, output_filename=None):
"""Main calling subroutine.
Parameters
----------
clobber : Boolean
Overwrite existing files with the same name as output_filename?
input_file_basepath : string
Path to start recursive search for the .ecsv files.
output_filename : string
Name of the output combined .ecsv file.
Returns
-------
Nothing.
"""
startingDT = datetime.datetime.now()
# 0a: set up input arg defaults,
if not input_file_basepath:
input_file_basepath = os.getcwd()
if not output_filename:
output_filename = "{}/{}.ecsv".format(os.getcwd(), os.getcwd().split("/")[-1])
if clobber is False and os.path.exists(output_filename) is True:
sys.exit("Output file {} already exists. Please rename the existing file and rerun or rerun with the "
"'clobber' option turned on (-c) to overwrite the existing file.".format(output_filename))
# 0c: make sure input_file_basepath always ends with a "/".
if not input_file_basepath.endswith("/"):
input_file_basepath += "/"
# 1: create list of ecsv files to be combined.
ecsv_file_list = find_files(input_file_basepath)
# 2: combine individual ecsv files into a single monolithic file.
generate_output_file(ecsv_file_list, output_filename, startingDT, clobber)
# ------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Find and combine all individual ecsv files into a single '
'file.')
parser.add_argument('-c', '--clobber', required=False, action='store_true', help='If this option is '
'turned on, any existing file with same name as the output_filename will be '
'overwritten.')
parser.add_argument('-i', '--input_file_basepath', required=False, default=None, help='path to start '
'recursive search for the .ecsv files. If not specified, the current working '
'directory will be used.')
parser.add_argument('-o', '--output_filename', required=False, default=None, help='Name of the output '
'combined .ecsv file. This may include a full file path. If not specified, the file '
'will be named "<CURRENT WORKING DIRECTORY>/<CURRENT WORKING DIRECTORY>.ecsv".')
args = parser.parse_args()
run_ecsv_combine(clobber=args.clobber,
input_file_basepath=args.input_file_basepath,
output_filename=args.output_filename)
```
#### File: tests/hla/base_test.py
```python
import os
import pytest
import math
from astropy.io import fits
from astropy.table import Table
import numpy as np
import stwcs
from stwcs import updatewcs
from stsci.tools import fileutil
from ci_watson.artifactory_helpers import get_bigdata_root
from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds
try:
from ci_watson.artifactory_helpers import check_url
except ImportError:
from ci_watson.artifactory_helpers import _is_url as check_url
from .base_classes import BaseTest
__all__ = ['BaseHLATest', 'BaseHLAParTest', 'centroid_compare', 'BaseUnit']
@pytest.mark.usefixtures('_jail')
class BaseHLATest(BaseTest):
ignore_hdus = []
input_repo = 'hst-hla-pipeline'
results_root = 'hst-hla-pipeline-results'
output_shift_file = None
fit_limit = 0.010 # 10 milli-arcseconds
docopy = False # Do not make additional copy by default
rtol = 1e-6
refstr = 'jref'
prevref = os.environ.get(refstr)
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
reffile_lookup = ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE', 'DGEOFILE']
def set_environ(self):
# Enforce copies of data when TEST_BIGDATA is URL
input_dir = get_bigdata_root()
if input_dir and check_url(input_dir):
self.docopy = True
# NOTE: This could be explicitly controlled using pytest fixture
# but too many ways to do the same thing would be confusing.
# Refine this logic if using pytest fixture.
# HSTCAL cannot open remote CRDS on FTP but central storage is okay.
# So use central storage if available to avoid FTP.
if self.prevref is None or self.prevref.startswith(('ftp', 'http')):
os.environ[self.refstr] = self.curdir + os.sep
self.use_ftp_crds = True
# Turn off Astrometry updates
os.environ['ASTROMETRY_STEP_CONTROL'] = 'OFF'
def raw_from_asn(self, asn_file, suffix='_flt.fits'):
return raw_from_asn(asn_file, suffix='_flt.fits')
def get_input_file(self, *args, refsep='$', **kwargs):
# If user has specified action for docopy, apply it with
# default behavior being whatever was defined in the base class.
docopy = kwargs.get('docopy', self.docopy)
# Download or copy input file (e.g., RAW) into the working directory.
# The associated CRDS reference files in ``refstr`` are also
# downloaded, if necessary.
curdir = os.getcwd()
filenames = self.get_data(*args, docopy=docopy)
for filename in filenames:
ref_files = ref_from_image(filename, reffile_lookup=self.reffile_lookup)
print("Looking for {} REF_FILES: {}".format(filename, ref_files))
for ref_file in ref_files:
if ref_file.strip() == '':
continue
if refsep not in ref_file: # Local file
self.get_data('customRef', ref_file, docopy=docopy)
else:
# Start by checking to see whether IRAF variable *ref/*tab
# has been added to os.environ
refdir, refname = ref_file.split(refsep)
refdir_parent = os.path.split(refdir)[0]
# Define refdir to point to current directory if:
# i. refdir is not defined in environment already
# ii. refdir in os.environ points to another test directory
# This logic should leave refdir unchanged if it already
# points to a globally defined directory.
if refdir not in os.environ or refdir_parent in curdir:
os.environ[refdir] = curdir + os.sep
# Download from FTP, if applicable
if self.use_ftp_crds:
download_crds(ref_file, timeout=self.timeout)
return filenames
# Pytest function to support the parameterization of these classes
def pytest_generate_tests(metafunc):
# called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
idlist = [funcargs['id'] for funcargs in funcarglist]
del argnames[argnames.index('id')]
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
for funcargs in funcarglist], ids=idlist)
@pytest.mark.usefixtures('_jail')
class BaseHLAParTest(BaseHLATest):
params = {'test_modes':[dict(input="",
test_dir=None,
step_class=None,
step_pars=dict(),
output_truth="",
output_hdus=[])
]
}
def test_modes(self, input, test_dir, step_class, step_pars,
output_truth, output_hdus):
"""
Template method for parameterizing some tests based on JWST code.
"""
if test_dir is None:
return
self.test_dir = test_dir
self.ref_loc = [self.test_dir, 'truth']
# can be removed once all truth files have been updated
self.ignore_keywords += ['FILENAME']
input_file = self.get_data(self.test_dir, input)
result = step_class.call(input_file, **step_pars)
output_file = result.meta.filename
result.save(output_file)
result.close()
output_pars = None
if isinstance(output_truth, tuple):
output_pars = output_truth[1]
output_truth = output_truth[0]
if not output_pars:
if output_hdus:
output_spec = (output_file, output_truth, output_hdus)
else:
output_spec = (output_file, output_truth)
else:
output_spec = {'files':(output_file, output_truth),
'pars':output_pars}
outputs = [output_spec]
self.compare_outputs(outputs)
def centroid_compare(centroid):
return centroid[1]
class BaseUnit(BaseHLATest):
buff = 0
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
atol = 1.0e-5
def bound_image(self, image):
"""
Compute region where image is non-zero
"""
coords = np.nonzero(image)
ymin = coords[0].min()
ymax = coords[0].max()
xmin = coords[1].min()
xmax = coords[1].max()
return (ymin, ymax, xmin, xmax)
def centroid(self, image, size, center):
"""
Compute the centroid of a rectangular area
"""
ylo = int(center[0]) - size // 2
yhi = min(ylo + size, image.shape[0])
xlo = int(center[1]) - size // 2
xhi = min(xlo + size, image.shape[1])
center = [0.0, 0.0, 0.0]
for y in range(ylo, yhi):
for x in range(xlo, xhi):
center[0] += y * image[y,x]
center[1] += x * image[y,x]
center[2] += image[y,x]
if center[2] == 0.0: return None
center[0] /= center[2]
center[1] /= center[2]
return center
def centroid_close(self, list_of_centroids, size, point):
"""
Find if any centroid is close to a point
"""
for i in range(len(list_of_centroids)-1, -1, -1):
if (abs(list_of_centroids[i][0] - point[0]) < size / 2 and
abs(list_of_centroids[i][1] - point[1]) < size / 2):
return 1
return 0
def centroid_distances(self, image1, image2, amp, size):
"""
Compute a list of centroids and the distances between them in two images
"""
distances = []
list_of_centroids, lst_pts = self.centroid_list(image2, amp, size)
for center2, pt in zip(list_of_centroids, lst_pts):
center1 = self.centroid(image1, size, pt)
if center1 is None: continue
disty = center2[0] - center1[0]
distx = center2[1] - center1[1]
dist = math.sqrt(disty * disty + distx * distx)
dflux = abs(center2[2] - center1[2])
distances.append([dist, dflux, center1, center2])
distances.sort(key=centroid_compare)
return distances
def centroid_list(self, image, amp, size):
"""
Find the next centroid
"""
list_of_centroids = []
list_of_points = []
points = np.transpose(np.nonzero(image > amp))
for point in points:
if not self.centroid_close(list_of_centroids, size, point):
center = self.centroid(image, size, point)
list_of_centroids.append(center)
list_of_points.append(point)
return list_of_centroids, list_of_points
def centroid_statistics(self, title, fname, image1, image2, amp, size):
"""
write centroid statistics to compare differences btw two images
"""
stats = ("minimum", "median", "maximum")
images = (None, None, image1, image2)
im_type = ("", "", "test", "reference")
diff = []
distances = self.centroid_distances(image1, image2, amp, size)
indexes = (0, len(distances)//2, len(distances)-1)
fd = open(fname, 'w')
fd.write("*** %s ***\n" % title)
if len(distances) == 0:
diff = [0.0, 0.0, 0.0]
fd.write("No matches!!\n")
elif len(distances) == 1:
diff = [distances[0][0], distances[0][0], distances[0][0]]
fd.write("1 match\n")
fd.write("distance = %f flux difference = %f\n" % (distances[0][0], distances[0][1]))
for j in range(2, 4):
ylo = int(distances[0][j][0]) - (1+self.buff)
yhi = int(distances[0][j][0]) + (2+self.buff)
xlo = int(distances[0][j][1]) - (1+self.buff)
xhi = int(distances[0][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s image centroid = (%f,%f) image flux = %f\n" %
(im_type[j], distances[0][j][0], distances[0][j][1], distances[0][j][2]))
fd.write(str(subimage) + "\n")
else:
fd.write("%d matches\n" % len(distances))
for k in range(0,3):
i = indexes[k]
diff.append(distances[i][0])
fd.write("\n%s distance = %f flux difference = %f\n" % (stats[k], distances[i][0], distances[i][1]))
for j in range(2, 4):
ylo = int(distances[i][j][0]) - (1+self.buff)
yhi = int(distances[i][j][0]) + (2+self.buff)
xlo = int(distances[i][j][1]) - (1+self.buff)
xhi = int(distances[i][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s %s image centroid = (%f,%f) image flux = %f\n" %
(stats[k], im_type[j], distances[i][j][0], distances[i][j][1], distances[i][j][2]))
fd.write(str(subimage) + "\n")
fd.close()
return tuple(diff)
def make_point_image(self, input_image, point, value):
"""
Create an image with a single point set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
output_image[point] = value
return output_image
def make_grid_image(self, input_image, spacing, value):
"""
Create an image with points on a grid set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
shape = output_image.shape
for y in range(spacing//2, shape[0], spacing):
for x in range(spacing//2, shape[1], spacing):
output_image[y,x] = value
return output_image
def print_wcs(self, title, wcs):
"""
Print the wcs header cards
"""
print("=== %s ===" % title)
print(wcs.to_header_string())
def read_image(self, filename):
"""
Read the image from a fits file
"""
hdu = fits.open(filename)
image = hdu[1].data
hdu.close()
return image
def read_wcs(self, filename):
"""
Read the wcs of a fits file
"""
hdu = fits.open(filename)
wcs = stwcs.wcsutil.HSTWCS(hdu, 1)
hdu.close()
return wcs
def write_wcs(self, hdu, image_wcs):
"""
Update header with WCS keywords
"""
hdu.header['ORIENTAT'] = image_wcs.orientat
hdu.header['CD1_1'] = image_wcs.wcs.cd[0][0]
hdu.header['CD1_2'] = image_wcs.wcs.cd[0][1]
hdu.header['CD2_1'] = image_wcs.wcs.cd[1][0]
hdu.header['CD2_2'] = image_wcs.wcs.cd[1][1]
hdu.header['CRVAL1'] = image_wcs.wcs.crval[0]
hdu.header['CRVAL2'] = image_wcs.wcs.crval[1]
hdu.header['CRPIX1'] = image_wcs.wcs.crpix[0]
hdu.header['CRPIX2'] = image_wcs.wcs.crpix[1]
hdu.header['CTYPE1'] = image_wcs.wcs.ctype[0]
hdu.header['CTYPE2'] = image_wcs.wcs.ctype[1]
hdu.header['VAFACTOR'] = 1.0
def write_image(self, filename, wcs, *args):
"""
Read the image from a fits file
"""
extarray = ['SCI', 'WHT', 'CTX']
pimg = fits.HDUList()
phdu = fits.PrimaryHDU()
phdu.header['NDRIZIM'] = 1
phdu.header['ROOTNAME'] = filename
pimg.append(phdu)
for img in args:
# Create a MEF file with the specified extname
extn = extarray.pop(0)
extname = fileutil.parseExtn(extn)
ehdu = fits.ImageHDU(data=img)
ehdu.header['EXTNAME'] = extname[0]
ehdu.header['EXTVER'] = extname[1]
self.write_wcs(ehdu, wcs)
pimg.append(ehdu)
pimg.writeto(filename)
del pimg
```
#### File: drizzlepac/tests/resources.py
```python
import getpass
import os
import sys
import math
from io import StringIO
import shutil
import datetime
from os.path import splitext
from difflib import unified_diff
import pytest
from astropy.io import fits
from astropy.io.fits import FITSDiff
from astropy.utils.data import conf
import numpy as np
import stwcs
from stsci.tools import fileutil
from ci_watson.artifactory_helpers import get_bigdata, generate_upload_schema
from ci_watson.hst_helpers import download_crds, ref_from_image
# Base classes for actual tests.
# NOTE: Named in a way so pytest will not pick them up here.
@pytest.mark.bigdata
class BaseCal:
prevdir = os.getcwd()
use_ftp_crds = True
timeout = 30 # seconds
tree = 'dev'
# Numpy default for allclose comparison
rtol = 1e-6
atol = 1e-5
# To be defined by instrument
refstr = ''
prevref = ''
input_loc = ''
ref_loc = ''
ignore_keywords = []
# To be defined by individual test
subdir = ''
@pytest.fixture(autouse=True)
def setup_class(self, tmpdir, envopt, pytestconfig):
"""
Run test in own dir so we can keep results separate from
other tests.
"""
if not tmpdir.ensure(self.subdir, dir=True):
p = tmpdir.mkdir(self.subdir).strpath
else:
p = tmpdir.join(self.subdir).strpath
os.chdir(p)
# NOTE: This could be explicitly controlled using pytest fixture
# but too many ways to do the same thing would be confusing.
# Refine this logic if using pytest fixture.
# HSTCAL cannot open remote CRDS on FTP but central storage is okay.
# So use central storage if available to avoid FTP.
if self.prevref is None or self.prevref.startswith(('ftp', 'http')):
os.environ[self.refstr] = p + os.sep
self.use_ftp_crds = True
# Turn off Astrometry updates
os.environ['ASTROMETRY_STEP_CONTROL'] = 'OFF'
# This controls astropy.io.fits timeout
conf.remote_timeout = self.timeout
# Update tree to point to correct environment
self.tree = envopt
# Collect pytest configuration values specified in setup.cfg or pytest.ini
self.inputs_root = pytestconfig.getini('inputs_root')[0]
self.results_root = pytestconfig.getini('results_root')[0]
def teardown_class(self):
"""Reset path and variables."""
conf.reset('remote_timeout')
os.chdir(self.prevdir)
if self.use_ftp_crds and self.prevref is not None:
os.environ[self.refstr] = self.prevref
def get_data(self, *args):
"""
Download `filename` into working directory using
`get_bigdata`. This will then return the full path to
the local copy of the file.
"""
local_file = get_bigdata(self.inputs_root, self.tree, self.input_loc, *args)
return local_file
def get_input_file(self, *args, refsep='$'):
"""
Download or copy input file (e.g., RAW) into the working directory.
The associated CRDS reference files in ``refstr`` are also
downloaded, if necessary.
"""
filename = self.get_data(*args)
ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE', 'DGEOFILE'])
print("Looking for REF_FILES: {}".format(ref_files))
for ref_file in ref_files:
if ref_file.strip() == '':
continue
if refsep not in ref_file: # Local file
refname = self.get_data('customRef', ref_file)
else: # Download from FTP, if applicable
refname = os.path.join(ref_file)
if self.use_ftp_crds:
download_crds(refname, self.timeout)
return filename
def compare_outputs(self, outputs, raise_error=True):
"""
Compare output with "truth" using appropriate
diff routine; namely,
``fitsdiff`` for FITS file comparisons
``unified_diff`` for ASCII products.
Parameters
----------
outputs : list of tuple
A list of tuples, each containing filename (without path)
of CALXXX output and truth, in that order.
raise_error : bool
Raise ``AssertionError`` if difference is found.
Returns
-------
report : str
Report from ``fitsdiff``.
This is part of error message if ``raise_error=True``.
"""
all_okay = True
creature_report = ''
# Create instructions for uploading results to artifactory for use
# as new comparison/truth files
testpath, testname = os.path.split(os.path.abspath(os.curdir))
# organize results by day test was run...could replace with git-hash
whoami = getpass.getuser() or 'nobody'
dt = datetime.datetime.now().strftime("%d%b%YT")
ttime = datetime.datetime.now().strftime("%H_%M_%S")
user_tag = 'NOT_CI_{}_{}'.format(whoami, ttime)
build_tag = os.environ.get('BUILD_TAG', user_tag)
build_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', 'standalone')
testdir = "{}_{}_{}".format(testname, build_tag, build_suffix)
tree = os.path.join(self.results_root, self.input_loc,
dt, testdir) + os.sep
updated_outputs = []
for actual, desired in outputs:
# Get "truth" image
s = self.get_data('truth', desired)
if s is not None:
desired = s
if actual.endswith('fits'):
# Working with FITS files...
fdiff = FITSDiff(actual, desired, rtol=self.rtol, atol=self.atol,
ignore_keywords=self.ignore_keywords)
creature_report += fdiff.report()
if not fdiff.identical:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not fdiff.identical and all_okay:
all_okay = False
else:
# Try ASCII-based diff
with open(actual) as afile:
actual_lines = afile.readlines()
with open(desired) as dfile:
desired_lines = dfile.readlines()
udiff = unified_diff(actual_lines, desired_lines,
fromfile=actual, tofile=desired)
old_stdout = sys.stdout
udiffIO = StringIO()
sys.stdout = udiffIO
sys.stdout.writelines(udiff)
sys.stdout = old_stdout
udiff_report = udiffIO.getvalue()
creature_report += udiff_report
if len(udiff_report) > 2 and all_okay:
all_okay = False
if len(udiff_report) > 2:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not all_okay:
# Write out JSON file to enable retention of different results
new_truths = [os.path.abspath(i[1]) for i in updated_outputs]
for files in updated_outputs:
print("Renaming {} as new 'truth' file: {}".format(
files[0], files[1]))
shutil.move(files[0], files[1])
log_pattern = [os.path.join(os.path.dirname(x), '*.log') for x in new_truths]
generate_upload_schema(pattern=new_truths + log_pattern,
testname=testname,
target= tree)
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
class BaseACS(BaseCal):
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseACSHRC(BaseACS):
input_loc = 'acs/hrc'
ref_loc = 'acs/hrc/ref'
class BaseACSWFC(BaseACS):
input_loc = 'acs/wfc'
ref_loc = 'acs/wfc/ref'
class BaseWFC3(BaseCal):
refstr = 'iref'
input_loc = 'wfc3'
ref_loc = 'wfc3/ref'
prevref = os.environ.get(refstr)
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseSTIS(BaseCal):
refstr = 'oref'
prevref = os.environ.get(refstr)
input_loc = 'stis'
ref_loc = 'stis/ref'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseWFPC2(BaseCal):
refstr = 'uref'
prevref = os.environ.get(refstr)
input_loc = 'wfpc2'
ref_loc = 'wfpc2/ref'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
def centroid_compare(centroid):
return centroid[1]
class BaseUnit(BaseCal):
buff = 0
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
atol = 1.0e-5
def bound_image(self, image):
"""
Compute region where image is non-zero
"""
coords = np.nonzero(image)
ymin = coords[0].min()
ymax = coords[0].max()
xmin = coords[1].min()
xmax = coords[1].max()
return (ymin, ymax, xmin, xmax)
def centroid(self, image, size, center):
"""
Compute the centroid of a rectangular area
"""
ylo = int(center[0]) - size // 2
yhi = min(ylo + size, image.shape[0])
xlo = int(center[1]) - size // 2
xhi = min(xlo + size, image.shape[1])
center = [0.0, 0.0, 0.0]
for y in range(ylo, yhi):
for x in range(xlo, xhi):
center[0] += y * image[y,x]
center[1] += x * image[y,x]
center[2] += image[y,x]
if center[2] == 0.0: return None
center[0] /= center[2]
center[1] /= center[2]
return center
def centroid_close(self, list_of_centroids, size, point):
"""
Find if any centroid is close to a point
"""
for i in range(len(list_of_centroids)-1, -1, -1):
if (abs(list_of_centroids[i][0] - point[0]) < size / 2 and
abs(list_of_centroids[i][1] - point[1]) < size / 2):
return 1
return 0
def centroid_distances(self, image1, image2, amp, size):
"""
Compute a list of centroids and the distances between them in two images
"""
distances = []
list_of_centroids, lst_pts = self.centroid_list(image2, amp, size)
for center2, pt in zip(list_of_centroids, lst_pts):
center1 = self.centroid(image1, size, pt)
if center1 is None: continue
disty = center2[0] - center1[0]
distx = center2[1] - center1[1]
dist = math.sqrt(disty * disty + distx * distx)
dflux = abs(center2[2] - center1[2])
distances.append([dist, dflux, center1, center2])
distances.sort(key=centroid_compare)
return distances
def centroid_list(self, image, amp, size):
"""
Find the next centroid
"""
list_of_centroids = []
list_of_points = []
points = np.transpose(np.nonzero(image > amp))
for point in points:
if not self.centroid_close(list_of_centroids, size, point):
center = self.centroid(image, size, point)
list_of_centroids.append(center)
list_of_points.append(point)
return list_of_centroids, list_of_points
def centroid_statistics(self, title, fname, image1, image2, amp, size):
"""
write centroid statistics to compare differences btw two images
"""
stats = ("minimum", "median", "maximum")
images = (None, None, image1, image2)
im_type = ("", "", "test", "reference")
diff = []
distances = self.centroid_distances(image1, image2, amp, size)
indexes = (0, len(distances)//2, len(distances)-1)
fd = open(fname, 'w')
fd.write("*** %s ***\n" % title)
if len(distances) == 0:
diff = [0.0, 0.0, 0.0]
fd.write("No matches!!\n")
elif len(distances) == 1:
diff = [distances[0][0], distances[0][0], distances[0][0]]
fd.write("1 match\n")
fd.write("distance = %f flux difference = %f\n" % (distances[0][0], distances[0][1]))
for j in range(2, 4):
ylo = int(distances[0][j][0]) - (1+self.buff)
yhi = int(distances[0][j][0]) + (2+self.buff)
xlo = int(distances[0][j][1]) - (1+self.buff)
xhi = int(distances[0][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s image centroid = (%f,%f) image flux = %f\n" %
(im_type[j], distances[0][j][0], distances[0][j][1], distances[0][j][2]))
fd.write(str(subimage) + "\n")
else:
fd.write("%d matches\n" % len(distances))
for k in range(0,3):
i = indexes[k]
diff.append(distances[i][0])
fd.write("\n%s distance = %f flux difference = %f\n" % (stats[k], distances[i][0], distances[i][1]))
for j in range(2, 4):
ylo = int(distances[i][j][0]) - (1+self.buff)
yhi = int(distances[i][j][0]) + (2+self.buff)
xlo = int(distances[i][j][1]) - (1+self.buff)
xhi = int(distances[i][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s %s image centroid = (%f,%f) image flux = %f\n" %
(stats[k], im_type[j], distances[i][j][0], distances[i][j][1], distances[i][j][2]))
fd.write(str(subimage) + "\n")
fd.close()
return tuple(diff)
def make_point_image(self, input_image, point, value):
"""
Create an image with a single point set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
output_image[point] = value
return output_image
def make_grid_image(self, input_image, spacing, value):
"""
Create an image with points on a grid set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
shape = output_image.shape
for y in range(spacing//2, shape[0], spacing):
for x in range(spacing//2, shape[1], spacing):
output_image[y,x] = value
return output_image
def print_wcs(self, title, wcs):
"""
Print the wcs header cards
"""
print("=== %s ===" % title)
print(wcs.to_header_string())
def read_image(self, filename):
"""
Read the image from a fits file
"""
hdu = fits.open(filename)
image = hdu[1].data
hdu.close()
return image
def read_wcs(self, filename):
"""
Read the wcs of a fits file
"""
hdu = fits.open(filename)
wcs = stwcs.wcsutil.HSTWCS(hdu, 1)
hdu.close()
return wcs
def write_wcs(self, hdu, image_wcs):
"""
Update header with WCS keywords
"""
hdu.header['ORIENTAT'] = image_wcs.orientat
hdu.header['CD1_1'] = image_wcs.wcs.cd[0][0]
hdu.header['CD1_2'] = image_wcs.wcs.cd[0][1]
hdu.header['CD2_1'] = image_wcs.wcs.cd[1][0]
hdu.header['CD2_2'] = image_wcs.wcs.cd[1][1]
hdu.header['CRVAL1'] = image_wcs.wcs.crval[0]
hdu.header['CRVAL2'] = image_wcs.wcs.crval[1]
hdu.header['CRPIX1'] = image_wcs.wcs.crpix[0]
hdu.header['CRPIX2'] = image_wcs.wcs.crpix[1]
hdu.header['CTYPE1'] = image_wcs.wcs.ctype[0]
hdu.header['CTYPE2'] = image_wcs.wcs.ctype[1]
hdu.header['VAFACTOR'] = 1.0
def write_image(self, filename, wcs, *args):
"""
Read the image from a fits file
"""
extarray = ['SCI', 'WHT', 'CTX']
pimg = fits.HDUList()
phdu = fits.PrimaryHDU()
phdu.header['NDRIZIM'] = 1
phdu.header['ROOTNAME'] = filename
pimg.append(phdu)
for img in args:
# Create a MEF file with the specified extname
extn = extarray.pop(0)
extname = fileutil.parseExtn(extn)
ehdu = fits.ImageHDU(data=img)
ehdu.header['EXTNAME'] = extname[0]
ehdu.header['EXTVER'] = extname[1]
self.write_wcs(ehdu, wcs)
pimg.append(ehdu)
pimg.writeto(filename)
del pimg
def add_suffix(fname, suffix, range=None):
"""Add suffix to file name
Parameters
----------
fname: str
The file name to add the suffix to
suffix: str
The suffix to add_suffix
range: range
If specified, the set of indexes will be added to the
outputs.
Returns
-------
fname, fname_with_suffix
2-tuple of the original file name and name with suffix.
If `range` is defined, `fname_with_suffix` will be a list.
"""
fname_root, fname_ext = splitext(fname)
if range is None:
with_suffix = ''.join([
fname_root,
'_',
suffix,
fname_ext
])
else:
with_suffix = []
for idx in range:
with_suffix.append(''.join([
fname_root,
'_',
str(idx),
'_',
suffix,
fname_ext
]))
return fname, with_suffix
```
#### File: tests/wfpc2/test_wfpc2.py
```python
import os
import pytest
from stsci.tools import teal
from drizzlepac import astrodrizzle
from stwcs import updatewcs
from ..resources import BaseWFPC2
class TestWFPC2(BaseWFPC2):
@pytest.mark.skip(reason="disable until truth files can be updated")
def test_waiver_single(self):
""" This test confirms that drizzlepac can correcly process .
"""
# Prepare input files.
raw_inputs = ["u40x010hm_c0f.fits", "u40x010hm_c1f.fits"]
inputs = [os.path.basename(self.get_input_file('input', i))
for i in raw_inputs]
output = 'wfpc2_single_waiver'
outfile = '{}_drz.fits'.format(output)
reffile = 'reference_single_waiver.fits'
# Update WCS for all inputs
driz_inputs = updatewcs.updatewcs(inputs[0], use_db=False)
# run astrodrizzle now...
adriz_parobj = teal.load('astrodrizzle', defaults=True)
adriz_parobj['output'] = output
adriz_parobj['build'] = True
adriz_parobj['in_memory'] = False
adriz_parobj['runfile'] = 'wfpc2_single_waiver.log'
adriz_parobj['STATE OF INPUT FILES']['preserve'] = False
adriz_parobj['STATE OF INPUT FILES']['clean'] = True
adriz_parobj['STEP 1: STATIC MASK']['static'] = False
adriz_parobj['STEP 2: SKY SUBTRACTION']['skysub'] = True
adriz_parobj['STEP 2: SKY SUBTRACTION']['skystat'] = 'mode'
adriz_parobj['STEP 2: SKY SUBTRACTION']['skywidth'] = 0.3
adriz_parobj['STEP 2: SKY SUBTRACTION']['skylower'] = -100.0
adriz_parobj['STEP 2: SKY SUBTRACTION']['use_static'] = False
adriz_parobj['STEP 2: SKY SUBTRACTION']['sky_bits'] = None
adriz_parobj['STEP 3: DRIZZLE SEPARATE IMAGES']['driz_separate'] = False
adriz_parobj['STEP 4: CREATE MEDIAN IMAGE']['median'] = False
adriz_parobj['STEP 5: BLOT BACK THE MEDIAN IMAGE']['blot'] = False
adriz_parobj['STEP 6: REMOVE COSMIC RAYS WITH DERIV, DRIZ_CR']['driz_cr'] = False
astrodrizzle.AstroDrizzle(driz_inputs, configobj=adriz_parobj)
# Compare results
outputs = [(outfile, reffile)]
self.compare_outputs(outputs)
@pytest.mark.skip(reason="disable until truth files can be updated")
def test_waiver_asn(self):
""" This test confirms that drizzlepac can correcly process input
WFPC2 data stored in WAIVER fits format.
"""
# Prepare input files.
raw_inputs = ['u40x010hm_c0f.fits', 'u40x010im_c0f.fits',
'u40x010jm_c0f.fits', 'u40x010km_c0f.fits',
'u40x010hm_c1f.fits', 'u40x010im_c1f.fits',
'u40x010jm_c1f.fits', 'u40x010km_c1f.fits']
inputs = [os.path.basename(self.get_input_file('input', i))
for i in raw_inputs]
output = 'wfpc2_waiver'
outfile = '{}_drz.fits'.format(output)
reffile = 'reference_wfpc2_asn_waiver.fits'
# Update WCS for all inputs
driz_inputs = updatewcs.updatewcs(inputs[:4], use_db=False)
# run astrodrizzle now...
adriz_parobj = teal.load('astrodrizzle', defaults=True)
adriz_parobj['output'] = output
adriz_parobj['build'] = True
adriz_parobj['in_memory'] = True
adriz_parobj['runfile'] = 'wfpc2_asn_waiver.log'
adriz_parobj['STATE OF INPUT FILES']['preserve'] = False
adriz_parobj['STATE OF INPUT FILES']['clean'] = True
adriz_parobj['STEP 1: STATIC MASK']['static_sig'] = 3.0
adriz_parobj['STEP 2: SKY SUBTRACTION']['skysub'] = False
astrodrizzle.AstroDrizzle(driz_inputs, configobj=adriz_parobj)
# Compare results
outputs = [(outfile, reffile)]
self.compare_outputs(outputs)
def test_wfpc2_single(self):
""" This test confirms that drizzlepac can correcly process single
WFPC2 exposures.
"""
# Prepare input files.
raw_inputs = ["u9yq0703m_c0m.fits", "u9yq0703m_c1m.fits"]
inputs = [os.path.basename(self.get_input_file('input', i))
for i in raw_inputs]
output = 'wfpc2_single_mef'
outfile = '{}_drz.fits'.format(output)
reffile = 'reference_single_mef.fits'
# Update WCS for all inputs
driz_inputs = updatewcs.updatewcs(inputs[0], use_db=False)
# run astrodrizzle now...
adriz_parobj = teal.load('astrodrizzle', defaults=True)
adriz_parobj['output'] = output
adriz_parobj['build'] = True
adriz_parobj['in_memory'] = False
adriz_parobj['runfile'] = 'wfpc2_single_mef.log'
adriz_parobj['STATE OF INPUT FILES']['preserve'] = False
adriz_parobj['STATE OF INPUT FILES']['clean'] = True
adriz_parobj['STEP 1: STATIC MASK']['static'] = False
adriz_parobj['STEP 2: SKY SUBTRACTION']['skysub'] = False
adriz_parobj['STEP 3: DRIZZLE SEPARATE IMAGES']['driz_separate'] = False
adriz_parobj['STEP 4: CREATE MEDIAN IMAGE']['median'] = False
adriz_parobj['STEP 5: BLOT BACK THE MEDIAN IMAGE']['blot'] = False
adriz_parobj['STEP 6: REMOVE COSMIC RAYS WITH DERIV, DRIZ_CR']['driz_cr'] = False
astrodrizzle.AstroDrizzle(driz_inputs, configobj=adriz_parobj)
# Compare results
outputs = [(outfile, reffile)]
self.compare_outputs(outputs)
def test_mef_asn(self):
""" This test confirms that drizzlepac can correcly process input
WFPC2 data stored in Multi-extensions FITS(MEF) format.
"""
# Prepare input files.
raw_inputs = ['u9yq0703m_c0m.fits', 'u9yq0704m_c0m.fits',
'u9yq0707m_c0m.fits', 'u9yq0708m_c0m.fits',
'u9yq0703m_c1m.fits', 'u9yq0704m_c1m.fits',
'u9yq0707m_c1m.fits', 'u9yq0708m_c1m.fits']
inputs = [os.path.basename(self.get_input_file('input', i))
for i in raw_inputs]
output = 'wfpc2_mef'
outfile = '{}_drz.fits'.format(output)
reffile = 'reference_wfpc2_asn_mef.fits'
# Update WCS for all inputs
driz_inputs = updatewcs.updatewcs(inputs[:4], use_db=False)
# run astrodrizzle now...
adriz_parobj = teal.load('astrodrizzle', defaults=True)
adriz_parobj['output'] = output
adriz_parobj['build'] = True
adriz_parobj['in_memory'] = True
adriz_parobj['runfile'] = 'wfpc2_asn_mef.log'
adriz_parobj['STATE OF INPUT FILES']['preserve'] = False
adriz_parobj['STATE OF INPUT FILES']['clean'] = True
adriz_parobj['STEP 2: SKY SUBTRACTION']['skysub'] = False
astrodrizzle.AstroDrizzle(driz_inputs, configobj=adriz_parobj)
# Compare results
outputs = [(outfile, reffile)]
self.compare_outputs(outputs)
``` |
{
"source": "jhunkeler/gwcs",
"score": 2
} |
#### File: gwcs/gwcs/coordinate_frames.py
```python
import logging
import numpy as np
from astropy.utils.misc import isiterable
from astropy import time
from astropy import units as u
from astropy import utils as astutil
from astropy import coordinates as coord
from astropy.wcs.wcsapi.low_level_api import (validate_physical_types,
VALID_UCDS)
__all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame',
'CoordinateFrame', 'TemporalFrame']
UCD1_TO_CTYPE = {
'pos.eq.ra': 'RA',
'pos.eq.dec': 'DEC',
'pos.galactic.lon': 'GLON',
'pos.galactic.lat': 'GLAT',
'pos.ecliptic.lon': 'ELON',
'pos.ecliptic.lat': 'ELAT',
'pos.bodyrc.lon': 'TLON',
'pos.bodyrc.lat': 'TLAT',
'custom:pos.helioprojective.lat': 'HPLT',
'custom:pos.helioprojective.lon': 'HPLN',
'custom:pos.heliographic.stonyhurst.lon': 'HGLN',
'custom:pos.heliographic.stonyhurst.lat': 'HGLT',
'custom:pos.heliographic.carrington.lon': 'CRLN',
'custom:pos.heliographic.carrington.lat': 'CRLT',
'em.freq': 'FREQ',
'em.energy': 'ENER',
'em.wavenumber': 'WAVN',
'em.wl': 'WAVE',
'spect.dopplerVeloc.radio': 'VRAD',
'spect.dopplerVeloc.opt': 'VOPT',
'src.redshift': 'ZOPT',
'spect.dopplerVeloc': 'VELO',
'custom:spect.doplerVeloc.beta': 'BETA',
'time': 'TIME',
}
STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__]
STANDARD_REFERENCE_POSITION = ["GEOCENTER", "BARYCENTER", "HELIOCENTER",
"TOPOCENTER", "LSR", "LSRK", "LSRD",
"GALACTIC_CENTER", "LOCAL_GROUP_CENTER"]
def get_ctype_from_ucd(ucd):
"""
Return the FITS ``CTYPE`` corresponding to a UCD1 value.
Parameters
----------
ucd : str
UCD string, for example one of ```WCS.world_axis_physical_types``.
Returns
-------
CTYPE : str
The corresponding FITS ``CTYPE`` value or an empty string.
"""
return UCD1_TO_CTYPE.get(ucd, "")
class CoordinateFrame:
"""
Base class for Coordinate Frames.
Parameters
----------
naxes : int
Number of axes.
axes_type : str
One of ["SPATIAL", "SPECTRAL", "TIME"]
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
reference_position : str
Reference position - one of `STANDARD_REFERENCE_POSITION`
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, naxes, axes_type, axes_order, reference_frame=None,
reference_position=None, unit=None, axes_names=None,
name=None, axis_physical_types=None):
self._naxes = naxes
self._axes_order = tuple(axes_order)
if isinstance(axes_type, str):
self._axes_type = (axes_type,)
else:
self._axes_type = tuple(axes_type)
self._reference_frame = reference_frame
if unit is not None:
if astutil.isiterable(unit):
unit = tuple(unit)
else:
unit = (unit,)
if len(unit) != naxes:
raise ValueError("Number of units does not match number of axes.")
else:
self._unit = tuple([u.Unit(au) for au in unit])
else:
self._unit = tuple(u.Unit("") for na in range(naxes))
if axes_names is not None:
if isinstance(axes_names, str):
axes_names = (axes_names,)
else:
axes_names = tuple(axes_names)
if len(axes_names) != naxes:
raise ValueError("Number of axes names does not match number of axes.")
else:
axes_names = tuple([""] * naxes)
self._axes_names = axes_names
if name is None:
self._name = self.__class__.__name__
else:
self._name = name
self._reference_position = reference_position
if len(self._axes_type) != naxes:
raise ValueError("Length of axes_type does not match number of axes.")
if len(self._axes_order) != naxes:
raise ValueError("Length of axes_order does not match number of axes.")
super(CoordinateFrame, self).__init__()
self._axis_physical_types = self._set_axis_physical_types(axis_physical_types)
def _set_axis_physical_types(self, pht=None):
"""
Set the physical type of the coordinate axes using VO UCD1+ v1.23 definitions.
"""
if pht is not None:
if isinstance(pht, str):
pht = (pht,)
elif not isiterable(pht):
raise TypeError("axis_physical_types must be of type string or iterable of strings")
if len(pht) != self.naxes:
raise ValueError('"axis_physical_types" must be of length {}'.format(self.naxes))
ph_type = []
for axt in pht:
if axt not in VALID_UCDS and not axt.startswith("custom:"):
ph_type.append("custom:{}".format(axt))
else:
ph_type.append(axt)
elif isinstance(self, CelestialFrame):
if isinstance(self.reference_frame, coord.Galactic):
ph_type = "pos.galactic.lon", "pos.galactic.lat"
elif isinstance(self.reference_frame, (coord.GeocentricTrueEcliptic,
coord.GCRS,
coord.PrecessedGeocentric)):
ph_type = "pos.bodyrc.lon", "pos.bodyrc.lat"
elif isinstance(self.reference_frame, coord.builtin_frames.BaseRADecFrame):
ph_type = "pos.eq.ra", "pos.eq.dec"
elif isinstance(self.reference_frame, coord.builtin_frames.BaseEclipticFrame):
ph_type = "pos.ecliptic.lon", "pos.ecliptic.lat"
else:
ph_type = tuple("custom:{}".format(t) for t in self.axes_names)
elif isinstance(self, SpectralFrame):
if self.unit[0].physical_type == "frequency":
ph_type = ("em.freq",)
elif self.unit[0].physical_type == "length":
ph_type = ("em.wl",)
elif self.unit[0].physical_type == "energy":
ph_type = ("em.energy",)
elif self.unit[0].physical_type == "speed":
ph_type = ("spect.dopplerVeloc",)
logging.warning("Physical type may be ambiguous. Consider "
"setting the physical type explicitly as "
"either 'spect.dopplerVeloc.optical' or "
"'spect.dopplerVeloc.radio'.")
else:
ph_type = ("custom:{}".format(self.unit[0].physical_type),)
elif isinstance(self, TemporalFrame):
ph_type = ("time",)
elif isinstance(self, Frame2D):
if all(self.axes_names):
ph_type = self.axes_names
else:
ph_type = self.axes_type
ph_type = tuple("custom:{}".format(t) for t in ph_type)
else:
ph_type = tuple("custom:{}".format(t) for t in self.axes_type)
validate_physical_types(ph_type)
return tuple(ph_type)
def __repr__(self):
fmt = '<{0}(name="{1}", unit={2}, axes_names={3}, axes_order={4}'.format(
self.__class__.__name__, self.name,
self.unit, self.axes_names, self.axes_order)
if self.reference_position is not None:
fmt += ', reference_position="{0}"'.format(self.reference_position)
if self.reference_frame is not None:
fmt += ", reference_frame={0}".format(self.reference_frame)
fmt += ")>"
return fmt
def __str__(self):
if self._name is not None:
return self._name
return self.__class__.__name__
@property
def name(self):
""" A custom name of this frame."""
return self._name
@name.setter
def name(self, val):
""" A custom name of this frame."""
self._name = val
@property
def naxes(self):
""" The number of axes in this frame."""
return self._naxes
@property
def unit(self):
"""The unit of this frame."""
return self._unit
@property
def axes_names(self):
""" Names of axes in the frame."""
return self._axes_names
@property
def axes_order(self):
""" A tuple of indices which map inputs to axes."""
return self._axes_order
@property
def reference_frame(self):
""" Reference frame, used to convert to world coordinate objects. """
return self._reference_frame
@property
def reference_position(self):
""" Reference Position. """
return getattr(self, "_reference_position", None)
@property
def axes_type(self):
""" Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. """
return self._axes_type
def coordinates(self, *args):
""" Create world coordinates object"""
coo = tuple([arg * un if not hasattr(arg, "to") else arg.to(un) for arg, un in zip(args, self.unit)])
return coo
def coordinate_to_quantity(self, *coords):
"""
Given a rich coordinate object return an astropy quantity object.
"""
# NoOp leaves it to the model to handle
# If coords is a 1-tuple of quantity then return the element of the tuple
# This aligns the behavior with the other implementations
if not hasattr(coords, 'unit') and len(coords) == 1:
return coords[0]
return coords
@property
def axis_physical_types(self):
return self._axis_physical_types
@property
def _world_axis_object_classes(self):
return {self._axes_type[0]: (
u.Quantity,
(),
{'unit': self.unit[0]})}
@property
def _world_axis_object_components(self):
return [(self._axes_type[0], 0, 'value')]
class CelestialFrame(CoordinateFrame):
"""
Celestial Frame Representation
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
A reference frame.
unit : str or units.Unit instance or iterable of those
Units on axes.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=None, reference_frame=None,
unit=None, axes_names=None,
name=None, axis_physical_types=None):
naxes = 2
if reference_frame is not None:
if not isinstance(reference_frame, str):
if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES:
_axes_names = list(reference_frame.representation_component_names.values())
if 'distance' in _axes_names:
_axes_names.remove('distance')
if axes_names is None:
axes_names = _axes_names
naxes = len(_axes_names)
_unit = list(reference_frame.representation_component_units.values())
if unit is None and _unit:
unit = _unit
if axes_order is None:
axes_order = tuple(range(naxes))
if unit is None:
unit = tuple([u.degree] * naxes)
axes_type = ['SPATIAL'] * naxes
super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type,
axes_order=axes_order,
reference_frame=reference_frame,
unit=unit,
axes_names=axes_names,
name=name, axis_physical_types=axis_physical_types)
@property
def _world_axis_object_classes(self):
return {'celestial': (
coord.SkyCoord,
(),
{'frame': self.reference_frame,
'unit': self.unit})}
@property
def _world_axis_object_components(self):
return [('celestial', 0, 'spherical.lon'),
('celestial', 1, 'spherical.lat')]
def coordinates(self, *args):
"""
Create a SkyCoord object.
Parameters
----------
args : float
inputs to wcs.input_frame
"""
if isinstance(args[0], coord.SkyCoord):
return args[0].transform_to(self.reference_frame)
return coord.SkyCoord(*args, unit=self.unit, frame=self.reference_frame)
def coordinate_to_quantity(self, *coords):
""" Convert a ``SkyCoord`` object to quantities."""
if len(coords) == 2:
arg = coords
elif len(coords) == 1:
arg = coords[0]
else:
raise ValueError("Unexpected number of coordinates in "
"input to frame {} : "
"expected 2, got {}".format(self.name, len(coords)))
if isinstance(arg, coord.SkyCoord):
arg = arg.transform_to(self._reference_frame)
try:
lon = arg.data.lon
lat = arg.data.lat
except AttributeError:
lon = arg.spherical.lon
lat = arg.spherical.lat
return lon, lat
elif all(isinstance(a, u.Quantity) for a in arg):
return tuple(arg)
else:
raise ValueError("Could not convert input {} to lon and lat quantities.".format(arg))
class SpectralFrame(CoordinateFrame):
"""
Represents Spectral Frame
Parameters
----------
axes_order : tuple or int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
unit : str or units.Unit instance
Spectral unit.
axes_names : str
Spectral axis name.
name : str
Name for this frame.
reference_position : str
Reference position - one of `STANDARD_REFERENCE_POSITION`
"""
def __init__(self, axes_order=(0,), reference_frame=None, unit=None,
axes_names=None, name=None, axis_physical_types=None,
reference_position=None):
super(SpectralFrame, self).__init__(naxes=1, axes_type="SPECTRAL", axes_order=axes_order,
axes_names=axes_names, reference_frame=reference_frame,
unit=unit, name=name,
reference_position=reference_position,
axis_physical_types=axis_physical_types)
@property
def _world_axis_object_classes(self):
return {'spectral': (
u.Quantity,
(),
{'unit': self.unit[0]})}
@property
def _world_axis_object_components(self):
return [('spectral', 0, 'value')]
def coordinates(self, *args, equivalencies=[]):
if hasattr(args[0], 'unit'):
return args[0].to(self.unit[0], equivalencies=equivalencies)
if np.isscalar(args):
return args * self.unit[0]
else:
return args[0] * self.unit[0]
def coordinate_to_quantity(self, *coords):
if hasattr(coords[0], 'unit'):
return coords[0]
return coords[0] * self.unit[0]
class TemporalFrame(CoordinateFrame):
"""
A coordinate frame for time axes.
Parameters
----------
reference_frame : `~astropy.time.Time`
A Time object which holds the time scale and format.
If data is provided, it is the time zero point.
To not set a zero point for the frame initialize `reference_frame`
with an empty list.
unit : str or `~astropy.units.Unit`
Time unit.
axes_names : str
Time axis name.
axes_order : tuple or int
A dimension in the data that corresponds to this axis.
name : str
Name for this frame.
"""
def __init__(self, reference_frame, unit=None, axes_order=(0,),
axes_names=None, name=None, axis_physical_types=None):
axes_names = axes_names or "{}({}; {}".format(reference_frame.format,
reference_frame.scale,
reference_frame.location)
super().__init__(naxes=1, axes_type="TIME", axes_order=axes_order,
axes_names=axes_names, reference_frame=reference_frame,
unit=unit, name=name, axis_physical_types=axis_physical_types)
self._attrs = {}
for a in self.reference_frame.info._represent_as_dict_extra_attrs:
try:
self._attrs[a] = getattr(self.reference_frame, a)
except AttributeError:
pass
@property
def _world_axis_object_classes(self):
comp = (
time.Time,
(),
{'unit': self.unit[0], **self._attrs},
self._convert_to_time)
return {'temporal': comp}
@property
def _world_axis_object_components(self):
if isinstance(self.reference_frame.value, np.ndarray):
return [('temporal', 0, 'value')]
def offset_from_time_and_reference(time):
return (time - self.reference_frame).sec
return [('temporal', 0, offset_from_time_and_reference)]
def coordinates(self, *args):
if np.isscalar(args):
dt = args
else:
dt = args[0]
return self._convert_to_time(dt, unit=self.unit[0], **self._attrs)
def _convert_to_time(self, dt, *, unit, **kwargs):
if (not isinstance(dt, time.TimeDelta) and
isinstance(dt, time.Time) or
isinstance(self.reference_frame.value, np.ndarray)):
return time.Time(dt, **kwargs)
if not hasattr(dt, 'unit'):
dt = dt * unit
return self.reference_frame + dt
def coordinate_to_quantity(self, *coords):
if isinstance(coords[0], time.Time):
ref_value = self.reference_frame.value
if not isinstance(ref_value, np.ndarray):
return (coords[0] - self.reference_frame).to(self.unit[0])
else:
# If we can't convert to a quantity just drop the object out
# and hope the transform can cope.
return coords[0]
# Is already a quantity
elif hasattr(coords[0], 'unit'):
return coords[0]
if isinstance(coords[0], np.ndarray):
return coords[0] * self.unit[0]
else:
raise ValueError("Can not convert {} to Quantity".format(coords[0]))
class CompositeFrame(CoordinateFrame):
"""
Represents one or more frames.
Parameters
----------
frames : list
List of frames (TemporalFrame, CelestialFrame, SpectralFrame, CoordinateFrame).
name : str
Name for this frame.
"""
def __init__(self, frames, name=None):
self._frames = frames[:]
naxes = sum([frame._naxes for frame in self._frames])
axes_type = list(range(naxes))
unit = list(range(naxes))
axes_names = list(range(naxes))
axes_order = []
ph_type = list(range(naxes))
for frame in frames:
axes_order.extend(frame.axes_order)
for frame in frames:
for ind, axtype, un, n, pht in zip(frame.axes_order, frame.axes_type,
frame.unit, frame.axes_names, frame.axis_physical_types):
axes_type[ind] = axtype
axes_names[ind] = n
unit[ind] = un
ph_type[ind] = pht
if len(np.unique(axes_order)) != len(axes_order):
raise ValueError("Incorrect numbering of axes, "
"axes_order should contain unique numbers, "
"got {}.".format(axes_order))
super(CompositeFrame, self).__init__(naxes, axes_type=axes_type,
axes_order=axes_order,
unit=unit, axes_names=axes_names,
name=name)
self._axis_physical_types = tuple(ph_type)
@property
def frames(self):
return self._frames
def __repr__(self):
return repr(self.frames)
def coordinates(self, *args):
coo = []
if len(args) == len(self.frames):
for frame, arg in zip(self.frames, args):
coo.append(frame.coordinates(arg))
else:
for frame in self.frames:
fargs = [args[i] for i in frame.axes_order]
coo.append(frame.coordinates(*fargs))
return coo
def coordinate_to_quantity(self, *coords):
if len(coords) == len(self.frames):
args = coords
elif len(coords) == self.naxes:
args = []
for _frame in self.frames:
if _frame.naxes > 1:
# Collect the arguments for this frame based on axes_order
args.append([coords[i] for i in _frame.axes_order])
else:
args.append(coords[_frame.axes_order[0]])
else:
raise ValueError("Incorrect number of arguments")
qs = []
for _frame, arg in zip(self.frames, args):
ret = _frame.coordinate_to_quantity(arg)
if isinstance(ret, tuple):
qs += list(ret)
else:
qs.append(ret)
return qs
@property
def _world_axis_object_components(self):
"""
We need to generate the components respecting the axes_order.
"""
out = [None] * self.naxes
for frame in self.frames:
for i, ao in enumerate(frame.axes_order):
out[ao] = frame._world_axis_object_components[i]
if any([o is None for o in out]):
raise ValueError("axes_order leads to incomplete world_axis_object_components")
return out
@property
def _world_axis_object_classes(self):
out = {}
for frame in self.frames:
out.update(frame._world_axis_object_classes)
return out
class StokesProfile(str):
# This list of profiles in Table 7 in Greisen & Calabretta (2002)
# modified to be 0 indexed
profiles = {
'I': 0,
'Q': 1,
'U': 2,
'V': 3,
'RR': -1,
'LL': -2,
'RL': -3,
'LR': -4,
'XX': -5,
'YY': -6,
'XY': -7,
'YX': -8,
}
@classmethod
def from_index(cls, indexes):
"""
Construct a StokesProfile object from a numerical index.
Parameters
----------
indexes : `int`, `numpy.ndarray`
An index or array of indices to construct StokesProfile objects from.
"""
nans = np.isnan(indexes)
indexes = np.asanyarray(indexes, dtype=int)
out = np.empty_like(indexes, dtype=object)
for profile, index in cls.profiles.items():
out[indexes == index] = profile
out[nans] = np.nan
if out.size == 1 and not nans:
return StokesProfile(out.item())
elif nans.all():
return np.array(out, dtype=float)
return out
def __new__(cls, content):
content = str(content)
if content not in cls.profiles.keys():
raise ValueError(f"The profile name must be one of {cls.profiles.keys()} not {content}")
return str.__new__(cls, content)
def value(self):
return self.profiles[self]
class StokesFrame(CoordinateFrame):
"""
A coordinate frame for representing stokes polarisation states
Parameters
----------
name : str
Name of this frame.
"""
def __init__(self, axes_order=(0,), name=None):
super(StokesFrame, self).__init__(1, ["STOKES"], axes_order, name=name,
axes_names=("stokes",), unit=u.one,
axis_physical_types="phys.polarization.stokes")
@property
def _world_axis_object_classes(self):
return {'stokes': (
StokesProfile,
(),
{},
StokesProfile.from_index)}
@property
def _world_axis_object_components(self):
return [('stokes', 0, 'value')]
def coordinates(self, *args):
if isinstance(args[0], u.Quantity):
arg = args[0].value
else:
arg = args[0]
return StokesProfile.from_index(arg)
def coordinate_to_quantity(self, *coords):
if isinstance(coords[0], str):
if coords[0] in StokesProfile.profiles.keys():
return StokesProfile.profiles[coords[0]] * u.one
else:
return coords[0]
class Frame2D(CoordinateFrame):
"""
A 2D coordinate frame.
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'),
name=None, axis_physical_types=None):
super(Frame2D, self).__init__(naxes=2, axes_type=["SPATIAL", "SPATIAL"],
axes_order=axes_order, name=name,
axes_names=axes_names, unit=unit,
axis_physical_types=axis_physical_types)
def coordinates(self, *args):
args = [args[i] for i in self.axes_order]
coo = tuple([arg * un for arg, un in zip(args, self.unit)])
return coo
def coordinate_to_quantity(self, *coords):
# list or tuple
if len(coords) == 1 and astutil.isiterable(coords[0]):
coords = list(coords[0])
elif len(coords) == 2:
coords = list(coords)
else:
raise ValueError("Unexpected number of coordinates in "
"input to frame {} : "
"expected 2, got {}".format(self.name, len(coords)))
for i in range(2):
if not hasattr(coords[i], 'unit'):
coords[i] = coords[i] * self.unit[i]
return tuple(coords)
```
#### File: jhunkeler/gwcs/setup.py
```python
import os
import sys
from setuptools import setup, find_packages
from configparser import ConfigParser
if sys.version_info < (3, 6):
error = """
GWCS supports Python versions 3.6 and above.
"""
sys.exit(error)
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
def get_package_data():
# Installs the schema files
schemas = []
root = os.path.join(PACKAGENAME, 'schemas')
for node, dirs, files in os.walk(root):
for fname in files:
if fname.endswith('.yaml'):
schemas.append(
os.path.relpath(os.path.join(node, fname), root))
# In the package directory, install to the subdirectory 'schemas'
schemas = [os.path.join('schemas', s) for s in schemas]
return schemas
schemas = get_package_data()
PACKAGE_DATA ={'gwcs':schemas}
entry_points = {'asdf_extensions': 'gwcs = gwcs.extension:GWCSExtension',
'bandit.formatters': 'bson = bandit_bson:formatter'}
DOCS_REQUIRE = [
'sphinx',
'sphinx-automodapi',
'sphinx-rtd-theme',
'stsci-rtd-theme',
'sphinx-astropy',
'sphinx-asdf',
]
TESTS_REQUIRE = [
'pytest>=4.6,<6',
'pytest-doctestplus',
'scipy',
]
setup(name=PACKAGENAME,
use_scm_version=True,
setup_requires=['setuptools_scm'],
description=DESCRIPTION,
install_requires=[
'astropy>=4.1',
'numpy',
'scipy',
'asdf'],
packages=find_packages(),
extras_require={
'test': TESTS_REQUIRE,
'docs': DOCS_REQUIRE,
},
tests_require=TESTS_REQUIRE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
package_data=PACKAGE_DATA,
entry_points=entry_points,
)
``` |
{
"source": "jhunkeler/h5axeconfig",
"score": 3
} |
#### File: h5axeconfig/grism/polynomials.py
```python
import numpy as np
class Poly2d(object):
def __init__(self,v,name=None):
self.name=name
self.coefs={}
if isinstance(v,np.ndarray):
count=int((np.sqrt(1+8*len(v))-1)/2)
ii=0
for j in range(count):
for k in range(j+1):
self.coefs[(j-k,k)]=v[ii]
ii+=1
else:
self.coefs[(0,0)]=np.array([v])
#self.npar=len(self.coefs)
#self._order=self.npar-1
@property
def order(self):
return self.npar-1
@property
def npar(self):
return len(self.coefs)
def __str__(self):
return self.name
def __call__(self,x,y):
#for coef in self.coefs:
#print(list(zip(*x.keys())))
#i,j=*list(zip(*self.coefs.keys()))
#i=list(i)
#j=list(j)
#v=list(self.coefs.values())
p=np.zeros_like(x)
for (i,j),v in self.coefs.items():
p+=(v*x**i*y**j)
return p
class Poly1d(object):
# def __init__(self,conf,key,beam):
# self.name=key.lower()+'_'+beam.lower()
# self.coefs=[]
# for k,v in conf.items():
# k=k.lower()
# if k.startswith(key+'_'+beam.lower()):
# self.coefs.append(Poly2d(k,v))
# self.order=len(self.coefs)-1
def __init__(self,h5,key):
self.name=key
hf=h5[self.name]
self.coefs=[Poly2d(hf[order][()]) for order in hf]
#self.coefs=[]
#for order in hf:
# d=hf[order][()]
# self.coefs.append(Poly2d(d))
@property
def order(self):
return len(self.coefs)-1
def __str__(self):
return '{}: order={}'.format(self.name,self.order)
def __call__(self,x,y,order=None):
if order is None:
#for coef in reversed(self.coefs):
# print(self.name,coef(x,y))
p=np.array([coef(x,y) for coef in reversed(self.coefs)])
else:
#coef=self.coefs[order]
#p=coef(x,y)
p=self.coefs[order](x,y)
return p
```
#### File: h5axeconfig/h5axeconfig/utils.py
```python
import h5py
import numpy as np
import os
def resolveFile(filename,path=None):
''' get a filename to a config file based the hardcoded path '''
if path is None:
path,theFile=os.path.split(__file__)
path=os.path.join(path,'data')
fullfile=os.path.join(path,filename)
return fullfile
def detectorData(filename,*args):
fullfile=resolveFile(filename)
det={}
with h5py.File(fullfile,'r') as h5:
h5g=list(h5.values())[0] # just check the first grism
for detname,h5d in h5g.items():
det[detname]=tuple(h5Attr(h5d,arg) for arg in args)
return det
def h5Attr(h5,key):
''' Extract an attribute from the h5 and retype it '''
try:
val=h5.attrs[key]
if isinstance(val,np.bytes_):
val=val.decode('utf-8')
low=val.lower()
if low =='none':
val=None
elif low == 'true':
val=True
elif low == 'false':
val=False
else:
pass
else:
pass
except:
val=None
return val
def vNewton(funct,deriv,x0,itmax=1000,tolerance=1e-3,**kwargs):
''' Vectorized method to solve non-linear equations with
Newton's method '''
# store the output and perturbations
x=np.copy(x0)
dx=np.zeros_like(x0)
itn=0 # number of iterations
g=np.where(np.abs(dx) >= tolerance)[0]
while g.size != 0 and itn != itmax:
num=funct(x[g],**kwargs) # the function call
den=deriv(x[g],**kwargs) # the derivative call
# compute perturbations and new positions
dx[g]=num/den
x[g]-=dx[g]
# update the counter and elements to iterate on
itn+=1
g=np.where(np.abs(dx)>=tolerance)[0]
if itn == itmax:
print("Warning> max iterations reached.")
return x,itn
if __name__=='__main__':
with h5py.File('../data/hst_wfc3_ir_beams.h5','r') as h5:
v=getAttr(h5['IR']['G102'],'nbeam')
print(v,type(v))
``` |
{
"source": "jhunkeler/hstcal",
"score": 2
} |
#### File: tests/wfc3/test_uvis_32single.py
```python
import subprocess
import pytest
from ..helpers import BaseWFC3
@pytest.mark.xfail(reason="Temporary xfail. New input/truth files on Artifactory, but branch not merged.")
class TestUVIS32Single(BaseWFC3):
"""
Test pos UVIS2 subarray data with CTE correction
"""
detector = 'uvis'
def _single_raw_calib(self, rootname):
raw_file = '{}_raw.fits'.format(rootname)
# Prepare input file.
self.get_input_file(raw_file)
# Run CALWF3
subprocess.call(['calwf3.e', raw_file, '-vts'])
# Compare results
outputs = [('{}_flt.fits'.format(rootname), '{}_flt_ref.fits'.format(rootname)),
('{}_flc.fits'.format(rootname), '{}_flc_ref.fits'.format(rootname)),
('{}_rac_tmp.fits'.format(rootname), '{}_rac_tmp_ref.fits'.format(rootname))]
self.compare_outputs(outputs)
# Ported from ``calwf3_uv_32``.
@pytest.mark.parametrize(
'rootname', ['ib3805v0q'])
# 'rootname', ['ib3805v0q',
# 'ib2kabmaq',
# 'ib3503wwq',
# 'ibde04msq',
# 'icoc14hcq'])
def test_uvis_32single(self, rootname):
self._single_raw_calib(rootname)
``` |
{
"source": "jhunkeler/nirspec_pipe_testing_tool",
"score": 2
} |
#### File: calwebb_spec2_pytests/auxiliary_code/flattest_ifu.py
```python
import time
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy.io import fits
from gwcs import wcstools
from gwcs.utils import _toindex
from jwst import datamodels
from jwst.assign_wcs import nirspec
from . import auxiliary_functions as auxfunc
"""
This script tests the pipeline flat field step output for IFU data. It is the python version of the IDL script
(with the same name) written by <NAME>, and changes on it made by <NAME>.
"""
# HEADER
__author__ = "<NAME>"
__version__ = "2.6"
# HISTORY
# Nov 2017 - Version 1.0: initial version completed
# May 2018 - Version 2.0: Completely changed script to use the datamodel instead of the compute_world_coordinates
# script, and added new routines for statistics calculations.
# Jun 2018 - Version 2.2: Removed function reverse_cols because it was not behaving as expected.
# Feb 2019 - Version 2.3: Maria added lines to properly rotate NRS2 s- and d-flats.
# Apr 2019 - Version 2.4: Implemented logging capability.
# May 2019 - Version 2.5: Implemented plot of residuals as well as histogram.
# Jun 2019 - Version 2.6: Updated name of interpolated flat to be the default pipeline name for this file.
def mk_hist(title, delfg, delfg_mean, delfg_median, delfg_std, save_figs, show_figs, plot_name):
# create histogram
font = {#'family' : 'normal',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
alpha = 0.2
fontsize = 15
fig = plt.figure(1, figsize=(12, 10))
plt.subplots_adjust(hspace=.4)
ax = plt.subplot(111)
plt.title(title)
if "all_slices" in title:
plt.xlabel("Median values")
else:
plt.xlabel("flat$_{pipe}$ - flat$_{calc}$")
plt.ylabel("N")
xmin = min(delfg) - (max(delfg) - min(delfg))*0.1
xmax = max(delfg) + (max(delfg) - min(delfg))*0.1
plt.xlim(xmin, xmax)
if "all_slices" in title:
#x_median = r"$\mu$(medians) = {:0.5}".format(delfg_median)
x_stddev = r"$\sigma$(medians) = {:0.5}".format(delfg_std)
else:
#x_median = "median = {:0.3}".format(delfg_median)
x_stddev = "stddev = {:0.3}".format(delfg_std)
# add vertical line at mean and median
plt.axvline(delfg_mean, label="mean = %0.3e"%(delfg_mean), color="g")
plt.axvline(delfg_median, label="median = %0.3e"%(delfg_median), linestyle="-.", color="b")
plt.legend()
# add standard deviation
ax.text(0.74, 0.86, x_stddev, transform=ax.transAxes, fontsize=fontsize)
plt.tick_params(axis='both', which='both', bottom=True, top=True, right=True, direction='in', labelbottom=True)
binwidth = (xmax-xmin)/40.
_, _, _ = ax.hist(delfg, bins=np.arange(xmin, xmax + binwidth, binwidth), histtype='bar', ec='k', facecolor="red", alpha=alpha)
if save_figs:
if plot_name is None:
t = (title, ".pdf")
plot_name = "".join(t)
plt.savefig(plot_name)
print ('\n Plot saved: ', plot_name)
if show_figs:
plt.show()
plt.close()
def flattest(step_input_filename, dflatref_path=None, sfile_path=None, fflat_path=None, writefile=False,
mk_all_slices_plt=False, show_figs=True, save_figs=False, plot_name=None,
threshold_diff=1.0e-7, debug=False):
"""
This function calculates the difference between the pipeline and the calculated flat field values.
The functions uses the output of the compute_world_coordinates.py script.
Args:
step_input_filename: str, name of the output fits file from the 2d_extract step (with full path)
dflatref_path: str, path of where the D-flat reference fits files
sfile_path: str, path of where the S-flat reference fits files
fflat_path: str, path of where the F-flat reference fits files
msa_conf_root: str, path to where the MSA configuration fits file lives
writefile: boolean, if True writes the fits files of the calculated flat and difference images
show_figs: boolean, whether to show plots or not
save_figs: boolean, save the plots (the 3 plots can be saved or not independently with the function call)
plot_name: string, desired name (if name is not given, the plot function will name the plot by
default)
threshold_diff: float, threshold difference between pipeline output and ESA file
debug: boolean, if true a series of print statements will show on-screen
Returns:
- 1 plot, if told to save and/or show.
- median_diff: Boolean, True if smaller or equal to 1e-14
- log_msgs: list, all print statements are captured in this variable
"""
log_msgs = []
# start the timer
flattest_start_time = time.time()
# get info from the flat field file
file_path = step_input_filename.replace(os.path.basename(step_input_filename), "")
det = fits.getval(step_input_filename, "DETECTOR", 0)
exptype = fits.getval(step_input_filename, "EXP_TYPE", 0)
grat = fits.getval(step_input_filename, "GRATING", 0)
filt = fits.getval(step_input_filename, "FILTER", 0)
file_basename = os.path.basename(step_input_filename.replace(".fits", ""))
msg1 = 'step_input_filename='+step_input_filename
msg2 = "flat_field_file --> Grating:"+grat+" Filter:"+filt+" EXP_TYPE:"+exptype
print(msg1)
print(msg2)
log_msgs.append(msg1)
log_msgs.append(msg2)
# read in the on-the-fly flat image
flatfile = step_input_filename.replace("flat_field.fits", "interpolatedflat.fits")
pipeflat = fits.getdata(flatfile, "SCI")
# get the reference files
msg = "Getting and reading the D-, S-, and F-flats for this specific IFU configuration... "
print(msg)
log_msgs.append(msg)
# D-Flat
dflat_ending = "f_01.03.fits"
dfile = dflatref_path+"_nrs1_"+dflat_ending
if det == "NRS2":
dfile = dfile.replace("nrs1", "nrs2")
msg = "Using D-flat: "+dfile
print(msg)
log_msgs.append(msg)
dfim = fits.getdata(dfile, "SCI")#1)
dfimdq = fits.getdata(dfile, "DQ")#4)
# need to flip/rotate the image into science orientation
ns = np.shape(dfim)
dfim = np.transpose(dfim, (0, 2, 1)) # keep in mind that 0,1,2 = z,y,x in Python, whereas =x,y,z in IDL
dfimdq = np.transpose(dfimdq)
if det == "NRS2":
# rotate science data by 180 degrees for NRS2
dfim = dfim[..., ::-1, ::-1]
dfimdq = dfimdq[..., ::-1, ::-1]
naxis3 = fits.getval(dfile, "NAXIS3", "SCI")#1)
# get the wavelength values
dfwave = np.array([])
for i in range(naxis3):
keyword = "PFLAT_"+str(i+1)
dfwave = np.append(dfwave, fits.getval(dfile, keyword, "SCI"))#1))
dfrqe = fits.getdata(dfile, 2)
# S-flat
tsp = exptype.split("_")
mode = tsp[1]
if filt == "F070LP":
flat = "FLAT4"
elif filt == "F100LP":
flat = "FLAT1"
elif filt == "F170LP":
flat = "FLAT2"
elif filt == "F290LP":
flat = "FLAT3"
elif filt == "CLEAR":
flat = "FLAT5"
else:
msg = "No filter correspondence. Exiting the program."
print(msg)
log_msgs.append(msg)
# This is the key argument for the assert pytest function
msg = "Test skiped because there is no flat correspondence for the filter in the data: {}".format(filt)
median_diff = "skip"
return median_diff, msg
sflat_ending = "f_01.01.fits"
sfile = sfile_path+"_"+grat+"_OPAQUE_"+flat+"_nrs1_"+sflat_ending
if debug:
print("grat = ", grat)
print("flat = ", flat)
print("sfile used = ", sfile)
if det == "NRS2":
sfile = sfile.replace("nrs1", "nrs2")
msg = "Using S-flat: "+sfile
print(msg)
log_msgs.append(msg)
sfim = fits.getdata(sfile, "SCI")#1)
sfimdq = fits.getdata(sfile, "DQ")#3)
# need to flip/rotate image into science orientation
sfim = np.transpose(sfim)
sfimdq = np.transpose(sfimdq)
if det == "NRS2":
# rotate science data by 180 degrees for NRS2
sfim = sfim[..., ::-1, ::-1]
sfimdq = sfimdq[..., ::-1, ::-1]
sfv = fits.getdata(sfile, 5)
# F-Flat
fflat_ending = "_01.01.fits"
if mode in fflat_path:
ffile = fflat_path+"_"+filt+fflat_ending
else:
msg = "Wrong path in for mode F-flat. This script handles mode "+mode+"only."
print(msg)
log_msgs.append(msg)
# This is the key argument for the assert pytest function
result_msg = "Wrong path in for mode F-flat. Test skiped because mode is not IFU."
median_diff = "skip"
return median_diff, result_msg, log_msgs
msg = "Using F-flat: "+ffile
print(msg)
log_msgs.append(msg)
ffv = fits.getdata(ffile, "IFU")#1)
# now go through each pixel in the test data
if writefile:
# create the fits list to hold the calculated flat values for each slit
hdu0 = fits.PrimaryHDU()
outfile = fits.HDUList()
outfile.append(hdu0)
# create the fits list to hold the image of pipeline-calculated difference values
hdu0 = fits.PrimaryHDU()
complfile = fits.HDUList()
complfile.append(hdu0)
# get the datamodel from the assign_wcs output file
assign_wcs_file = step_input_filename.replace("_flat_field.fits", "_assign_wcs.fits")
model = datamodels.ImageModel(assign_wcs_file)
ifu_slits = nirspec.nrs_ifu_wcs(model)
# loop over the slices
all_delfg_mean, all_delfg_mean_arr, all_delfg_median, all_test_result = [], [], [], []
msg = "\n Now looping through the slices, this may take some time... "
print(msg)
log_msgs.append(msg)
for n_ext, slice in enumerate(ifu_slits):
if n_ext < 10:
pslice = "0"+repr(n_ext)
else:
pslice = repr(n_ext)
msg = "\nWorking with slice: "+pslice
print(msg)
log_msgs.append(msg)
# get the wavelength
# slice.x(y)start are 1-based, turn them to 0-based for extraction
x, y = wcstools.grid_from_bounding_box(slice.bounding_box, (1, 1), center=True)
ra, dec, wave = slice(x, y)
# get the subwindow origin (technically no subwindows for IFU, but need this for comparing to the
# full frame on-the-fly flat image).
px0 = model.meta.subarray.xstart - 1 + int(_toindex(slice.bounding_box[0][0])) + 1
py0 = model.meta.subarray.xstart - 1 + int(_toindex(slice.bounding_box[1][0])) + 1
n_p = np.shape(wave)
nx, ny = n_p[1], n_p[0]
nw = nx * ny
msg = " Subwindow origin: px0="+repr(px0)+" py0="+repr(py0)
print(msg)
log_msgs.append(msg)
if debug:
print("n_p = ", n_p)
print("nw = ", nw)
# initialize arrays of the right size
delf = np.zeros([nw]) + 999.0
flatcor = np.zeros([nw]) + 999.0
sffarr = np.zeros([nw])
calc_flat = np.zeros([2048, 2048]) + 999.0
# loop through the wavelengths
msg = " Looping through the wavelngth, this may take a little time ... "
print(msg)
log_msgs.append(msg)
flat_wave = wave.flatten()
wave_shape = np.shape(wave)
for j in range(0, nw):
if np.isfinite(flat_wave[j]): # skip if wavelength is NaN
# get the pixel indeces
jwav = flat_wave[j]
t=np.where(wave == jwav)
pind = [t[0][0]+py0-1, t[1][0]+px0-1] # pind =[pixel_y, pixe_x] in python, [x, y] in IDL
if debug:
print('j, jwav, px0, py0 : ', j, jwav, px0, py0)
print('pind[0], pind[1] = ', pind[0], pind[1])
# get the pixel bandwidth **this needs to be modified for prism, since the dispersion is not linear!**
delw = 0.0
if (j!=0) and (int((j-1)/nx)==int(j/nx)) and (int((j+1)/nx)==int(j/nx)) and np.isfinite(flat_wave[j+1]) and np.isfinite(flat_wave[j-1]):
delw = 0.5 * (flat_wave[j+1] - flat_wave[j-1])
if (j==0) or not np.isfinite(flat_wave[j-1]) or (int((j-1)/nx) != int(j/nx)):
delw = 0.5 * (flat_wave[j+1] - flat_wave[j])
if (j==nw-1) or not np.isfinite(flat_wave[j+1]) or (int((j+1)/nx) != int(j/nx)):
delw = 0.5 * (flat_wave[j] - flat_wave[j-1])
if debug:
#print("(j, (j-1), nx, (j-1)/nx, (j+1), (j+1)/nx)", j, (j-1), nx, int((j-1)/nx), (j+1), int((j+1)/nx))
#print("np.isfinite(flat_wave[j+1]), np.isfinite(flat_wave[j-1])", np.isfinite(flat_wave[j+1]), np.isfinite(flat_wave[j-1]))
#print("flat_wave[j+1], flat_wave[j-1] : ", np.isfinite(flat_wave[j+1]), flat_wave[j+1], flat_wave[j-1])
print("delw = ", delw)
# integrate over D-flat fast vector
dfrqe_wav = dfrqe.field("WAVELENGTH")
dfrqe_rqe = dfrqe.field("RQE")
iw = np.where((dfrqe_wav >= jwav-delw/2.0) & (dfrqe_wav <= jwav+delw/2.0))
if np.size(iw) == 0:
iw = -1
int_tab = auxfunc.idl_tabulate(dfrqe_wav[iw], dfrqe_rqe[iw])
if int_tab == 0:
int_tab = np.interp(dfrqe_wav[iw], dfrqe_wav, dfrqe_rqe)
dff = int_tab
else:
first_dfrqe_wav, last_dfrqe_wav = dfrqe_wav[iw][0], dfrqe_wav[iw][-1]
dff = int_tab/(last_dfrqe_wav - first_dfrqe_wav)
if debug:
#print("np.shape(dfrqe_wav) : ", np.shape(dfrqe_wav))
#print("np.shape(dfrqe_rqe) : ", np.shape(dfrqe_rqe))
#print("dfimdq[pind[0]][pind[1]] : ", dfimdq[pind[0]][pind[1]])
#print("np.shape(iw) =", np.shape(iw))
#print("np.shape(dfrqe_wav[iw[0]]) = ", np.shape(dfrqe_wav[iw[0]]))
#print("np.shape(dfrqe_rqe[iw[0]]) = ", np.shape(dfrqe_rqe[iw[0]]))
#print("int_tab=", int_tab)
print("np.shape(iw) = ", np.shape(iw))
print("iw = ", iw)
print("dff = ", dff)
# interpolate over D-flat cube
dfs = 1.0
if dfimdq[pind[0], pind[1]] == 0:
dfs = np.interp(jwav, dfwave, dfim[:, pind[0], pind[1]])
# integrate over S-flat fast vector
sfv_wav = sfv.field("WAVELENGTH")
sfv_dat = sfv.field("DATA")
if (jwav < 5.3) and (jwav > 0.6):
iw = np.where((sfv_wav >= jwav-delw/2.0) & (sfv_wav <= jwav+delw/2.0))
if np.size(iw) == 0:
iw = -1
if np.size(iw) > 1:
int_tab = auxfunc.idl_tabulate(sfv_wav[iw], sfv_dat[iw])
first_sfv_wav, last_sfv_wav = sfv_wav[iw][0], sfv_wav[iw][-1]
sff = int_tab/(last_sfv_wav - first_sfv_wav)
elif np.size(iw) == 1:
sff = float(sfv_dat[iw])
else:
sff = 999.0
# get s-flat pixel-dependent correction
sfs = 1.0
if sfimdq[pind[0], pind[1]] == 0:
sfs = sfim[pind[0], pind[1]]
if debug:
print("jwav-delw/2.0 = ", jwav-delw/2.0)
print("jwav+delw/2.0 = ", jwav+delw/2.0)
print("np.shape(sfv_wav), sfv_wav[-1] = ", np.shape(sfv_wav), sfv_wav[-1])
print("iw = ", iw)
print("sfv_wav[iw] = ", sfv_wav[iw])
print("int_tab = ", int_tab)
print("first_sfv_wav, last_sfv_wav = ", first_sfv_wav, last_sfv_wav)
print("sfs = ", sfs)
print("sff = ", sff)
# integrate over f-flat fast vector
# reference file blue cutoff is 1 micron, so need to force solution for shorter wavs
ffv_wav = ffv.field("WAVELENGTH")
ffv_dat = ffv.field("DATA")
fff = 1.0
if jwav-delw/2.0 >= 1.0:
iw = np.where((ffv_wav >= jwav-delw/2.0) & (ffv_wav <= jwav+delw/2.0))
if np.size(iw) == 0:
iw = -1
if np.size(iw) > 1:
int_tab = auxfunc.idl_tabulate(ffv_wav[iw], ffv_dat[iw])
first_ffv_wav, last_ffv_wav = ffv_wav[iw][0], ffv_wav[iw][-1]
fff = int_tab/(last_ffv_wav - first_ffv_wav)
elif np.size(iw) == 1:
fff = float(ffv_dat[iw])
flatcor[j] = dff * dfs * sff * sfs * fff
sffarr[j] = sff
# To visually compare between the pipeline flat and the calculated one (e.g. in ds9), <NAME>
# suggested using the following line:
calc_flat[pind[0], pind[1]] = flatcor[j]
# this line writes the calculated flat into a full frame array
# then this new array needs to be written into a file. This part has not been done yet.
# Difference between pipeline and calculated values
delf[j] = pipeflat[pind[0], pind[1]] - flatcor[j]
# Remove all pixels with values=1 (mainly inter-slit pixels) for statistics
if pipeflat[pind[0], pind[1]] == 1:
delf[j] = 999.0
if np.isnan(jwav):
flatcor[j] = 1.0 # no correction if no wavelength
if debug:
print("np.shape(iw) = ", np.shape(iw))
print("fff = ", fff)
print("flatcor[j] = ", flatcor[j])
print("delf[j] = ", delf[j])
# ignore outliers for calculating median
delfg = delf[np.where(delf != 999.0)]
#delfg_median, delfg_std = np.median(delfg), np.std(delfg)
msg = "Flat value differences for slice number: "+pslice
print(msg)
log_msgs.append(msg)
#print(" median = ", delfg_median, " stdev =", delfg_std)
stats_and_strings= auxfunc.print_stats(delfg, "Flat Difference", float(threshold_diff), abs=True)
stats, stats_print_strings = stats_and_strings
delfg_mean, delfg_median, delfg_std = stats
for msg in stats_print_strings:
log_msgs.append(msg)
if debug:
print("np.shape(delf) = ", np.shape(delf))
print("np.shape(delfg) = ", np.shape(delfg))
all_delfg_mean.append(delfg_mean)
all_delfg_median.append(delfg_median)
# make the slice plot
if np.isfinite(delfg_median) and (len(delfg)!=0):
if show_figs or save_figs:
msg = "Making the plot for this slice..."
print(msg)
log_msgs.append(msg)
# create histogram
t = (file_basename, det, pslice, "IFUflatcomp_histogram")
title = filt+" "+grat+" SLICE="+pslice+"\n"
plot_name = "".join((file_path, ("_".join(t))+".pdf"))
#mk_hist(title, delfg, delfg_mean, delfg_median, delfg_std, save_figs, show_figs, plot_name=plot_name)
bins = None # binning for the histograms, if None the function will select them automatically
title = title+"Residuals"
info_img = [title, "x (pixels)", "y (pixels)"]
xlabel, ylabel = "flat$_{pipe}$ - flat$_{calc}$", "N"
info_hist = [xlabel, ylabel, bins, stats]
if delfg[1] is np.nan:
msg = "Unable to create plot of relative wavelength difference."
print(msg)
log_msgs.append(msg)
else:
plt_name = os.path.join(file_path, plot_name)
difference_img = (pipeflat - calc_flat)#/calc_flat
in_slit = np.logical_and(difference_img<900.0, difference_img>-900.0) # ignore points out of the slit,
difference_img[~in_slit] = np.nan # Set values outside the slit to NaN
nanind = np.isnan(difference_img) # get all the nan indexes
difference_img[nanind] = np.nan # set all nan indexes to have a value of nan
plt_origin = None
limits = [px0-5, px0+1500, py0-5, py0+55]
vminmax = [-5*delfg_std, 5*delfg_std] # set the range of values to be shown in the image, will affect color scale
auxfunc.plt_two_2Dimgandhist(difference_img, delfg, info_img, info_hist, plt_name=plt_name, limits=limits,
vminmax=vminmax, plt_origin=plt_origin, show_figs=show_figs, save_figs=save_figs)
elif not save_figs and not show_figs:
msg = "Not making plots because both show_figs and save_figs were set to False."
print(msg)
log_msgs.append(msg)
elif not save_figs:
msg = "Not saving plots because save_figs was set to False."
print(msg)
log_msgs.append(msg)
if writefile:
# this is the file to hold the image of pipeline-calculated difference values
outfile_ext = fits.ImageHDU(flatcor.reshape(wave_shape), name=pslice)
outfile.append(outfile_ext)
# this is the file to hold the image of pipeline-calculated difference values
complfile_ext = fits.ImageHDU(delf.reshape(wave_shape), name=pslice)
complfile.append(complfile_ext)
# the file is not yet written, indicate that this slit was appended to list to be written
msg = "Extension "+repr(n_ext)+" appended to list to be written into calculated and comparison fits files."
print(msg)
log_msgs.append(msg)
# This is the key argument for the assert pytest function
median_diff = False
if abs(delfg_median) <= float(threshold_diff):
median_diff = True
if median_diff:
test_result = "PASSED"
else:
test_result = "FAILED"
msg = " *** Result of the test: "+test_result+"\n"
print(msg)
log_msgs.append(msg)
all_test_result.append(test_result)
# if the test is failed exit the script
if (delfg_median == 999.0) or not np.isfinite(delfg_median):
msg = "Unable to determine mean, meadian, and std_dev for the slice"+pslice
print(msg)
log_msgs.append(msg)
if mk_all_slices_plt:
if show_figs or save_figs:
# create histogram
t = (file_basename, det, "all_slices_IFU_flatcomp_histogram")
title = ("_".join(t))
# calculate median of medians and std_dev of medians
all_delfg_median_arr = np.array(all_delfg_median)
mean_of_delfg_mean = np.mean(all_delfg_mean_arr)
median_of_delfg_median = np.median(all_delfg_median_arr)
medians_std = np.std(median_of_delfg_median)
plot_name = "".join((file_path, title, ".pdf"))
mk_hist(title, all_delfg_median_arr, mean_of_delfg_mean, median_of_delfg_median, medians_std, save_figs, show_figs,
plot_name=plot_name)
elif not save_figs and not show_figs:
msg = "Not making plots because both show_figs and save_figs were set to False."
print(msg)
log_msgs.append(msg)
elif not save_figs:
msg = "Not saving plots because save_figs was set to False."
print(msg)
log_msgs.append(msg)
# create fits file to hold the calculated flat for each slice
if writefile:
outfile_name = step_input_filename.replace("flat_field.fits", det+"_flat_calc.fits")
complfile_name = step_input_filename.replace("flat_field.fits", det+"_flat_comp.fits")
# create the fits list to hold the calculated flat values for each slit
outfile.writeto(outfile_name, overwrite=True)
# this is the file to hold the image of pipeline-calculated difference values
complfile.writeto(complfile_name, overwrite=True)
msg = "Fits file with calculated flat values of each slice saved as: "
print(msg)
print(outfile_name)
log_msgs.append(msg)
log_msgs.append(outfile_name)
msg = "Fits file with comparison (pipeline flat - calculated flat) saved as: "
print(msg)
print(complfile_name)
log_msgs.append(msg)
log_msgs.append(complfile_name)
# If all tests passed then pytest will be marked as PASSED, else it will be FAILED
FINAL_TEST_RESULT = True
for t in all_test_result:
if t == "FAILED":
FINAL_TEST_RESULT = False
break
if FINAL_TEST_RESULT:
msg = "\n *** Final result for flat_field test will be reported as PASSED *** \n"
print(msg)
log_msgs.append(msg)
result_msg = "All slices PASSED flat_field test."
else:
msg = "\n *** Final result for flat_field test will be reported as FAILED *** \n"
print(msg)
log_msgs.append(msg)
result_msg = "One or more slices FAILED flat_field test."
# end the timer
flattest_end_time = time.time() - flattest_start_time
if flattest_end_time > 60.0:
flattest_end_time = flattest_end_time/60.0 # in minutes
flattest_tot_time = "* Script flattest_ifu.py script took ", repr(flattest_end_time)+" minutes to finish."
if flattest_end_time > 60.0:
flattest_end_time = flattest_end_time/60. # in hours
flattest_tot_time = "* Script flattest_ifu.py took ", repr(flattest_end_time)+" hours to finish."
else:
flattest_tot_time = "* Script flattest_ifu.py took ", repr(flattest_end_time)+" seconds to finish."
print(flattest_tot_time)
log_msgs.append(flattest_tot_time)
return FINAL_TEST_RESULT, result_msg, log_msgs
if __name__ == '__main__':
# print pipeline version
import jwst
print("\n ** using pipeline version: ", jwst.__version__, "** \n")
# This is a simple test of the code
pipeline_path = "/Users/pena/Documents/PyCharmProjects/nirspec/pipeline"
# input parameters that the script expects
#working_dir = "/Users/pena/Documents/PyCharmProjects/nirspec/pipeline/testing_data/IFU_CV3/PRISM_CLEAR/pipe_testing_files_and_reports/6007022859_491_processing"
working_dir = pipeline_path+"/testing_data/IFU_CV3/G140M_F100LP/pipe_testing_files_and_reports/491_processing"
step_input_filename = working_dir+"/gain_scale_NRS1_flat_field.fits"
dflatref_path = "/grp/jwst/wit4/nirspec/CDP3/04_Flat_field/4.2_D_Flat/nirspec_dflat"
sfile_path = "/grp/jwst/wit4/nirspec/CDP3/04_Flat_field/4.3_S_Flat/IFU/nirspec_IFU_sflat"
fflat_path = "/grp/jwst/wit4/nirspec/CDP3/04_Flat_field/4.1_F_Flat/IFU/nirspec_IFU_fflat"
#dflatref_path = "nirspec_dflat"
#sfile_path = "nirspec_IFU_sflat"
#fflat_path = "nirspec_IFU_fflat"
# name of the output images
writefile = False
# set the names of the resulting plots
plot_name = None#"IFU_flattest_histogram.pdf"
# Run the principal function of the script
median_diff = flattest(step_input_filename, dflatref_path=dflatref_path, sfile_path=sfile_path,
fflat_path=fflat_path, writefile=writefile, mk_all_slices_plt=True,
show_figs=False, save_figs=True, plot_name=plot_name, threshold_diff=1.0e-7, debug=False)
``` |
{
"source": "jhunkeler/pyregion",
"score": 2
} |
#### File: pyregion/tests/test_wcs_helper.py
```python
import numpy as np
#from pylab import savefig
import pyregion
try:
from astropy import wcs as pywcs
except ImportError:
from astropy import wcs as pywcs
def test_estimate_cdelt():
l,b=0,0
# This is the ra,dec of l,b=0,0
ra,dec=266.404497776,-28.9364329295
# This is 'almost' the ra,dec of l,b=0,0 - works
# ra,dec=266.40,-28.93
wcs = pywcs.WCS(naxis=2)
wcs.wcs.crpix = [5.5, 5.5]
wcs.wcs.cdelt = [0.1, -0.1]
wcs.wcs.crval = [l, b]
wcs.wcs.ctype = ["GLON-ZEA".encode("ascii"), "GLAT-ZEA".encode("ascii")]
import pyregion.wcs_helper as wcs_helper
proj = wcs_helper.get_kapteyn_projection(wcs)
cdelt = wcs_helper.estimate_cdelt(proj, 5.5, 5.5)
assert np.allclose([cdelt], [0.1])
region_string="fk5; circle(%s, %s, 0.5000)" % (ra,dec)
reg = pyregion.parse(region_string).as_imagecoord(wcs)
assert np.allclose([reg[0].coord_list[-1]], [0.5/0.1])
``` |
{
"source": "jhunkeler/pysiaf",
"score": 3
} |
#### File: pysiaf/iando/read.py
```python
from collections import OrderedDict
import numpy as np
import os
from astropy.table import Table
import lxml.etree as ET
from ..constants import HST_PRD_DATA_ROOT, JWST_PRD_DATA_ROOT, JWST_SOURCE_DATA_ROOT
def get_siaf(input_siaf, observatory='JWST'):
"""Return a Siaf object corresponding to input_siaf which can be a string path or a Siaf object.
Parameters
----------
input_siaf
observatory
Returns
-------
siaf_object: pysiaf.Siaf
Siaf object
"""
from pysiaf import siaf # runtime import to avoid circular import on startup
if type(input_siaf) == str:
aperture_collection = read_jwst_siaf(filename=input_siaf)
# initilize siaf as empty object
siaf_object = siaf.Siaf(None)
siaf_object.instrument = aperture_collection[list(aperture_collection.items())[0][0]].InstrName
if siaf_object.instrument == 'NIRCAM':
siaf_object.instrument = 'NIRCam'
elif siaf_object.instrument == 'NIRSPEC':
siaf_object.instrument = 'NIRSpec'
siaf_object.apertures = aperture_collection
siaf_object.description = os.path.basename(input_siaf)
siaf_object.observatory = observatory
elif type(input_siaf) == siaf.Siaf:
siaf_object = input_siaf
siaf_object.description = 'pysiaf.Siaf object'
else:
raise TypeError('Input has to be either a full path or a Siaf object.')
return siaf_object
def read_hst_siaf(file=None):#, AperNames=None):
"""Read apertures from HST SIAF file and return a collection.
This was partially ported from Lallo's plotap.f.
Parameters
----------
file : str
AperNames : str list
Returns
-------
apertures: dict
Dictionary of apertures
"""
from pysiaf import aperture # runtime import to avoid circular import on startup
if file is None:
file = os.path.join(HST_PRD_DATA_ROOT, 'siaf.dat')
# read all lines
siaf_stream = open(file)
data = siaf_stream.readlines()
siaf_stream.close()
# initialize dict of apertures
apertures = OrderedDict()
# inspect SIAF and populate Apertures
CAJ_index = 0
CAK_index = 0
for l, text in enumerate(data):
skip_aperture = False
if (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 0):
a = aperture.HstAperture()
# Process the first 'CAJ' record.
a.ap_name = text[0:10].strip() # Aperture Identifier.
a.v2_cent = np.float(text[10:25]) # SICS V2 Center. (same as a_v2_ref)
a.v3_cent = np.float(text[25:40]) # SICS V3 Center. (same as a_v3_ref)
a.a_shape = text[40:44] # Aperture Shape.
try:
a.maj = np.float(text[44:59]) # Major Axis Dimension.
except ValueError: # when field is empty
a.maj = None
a.Mac_Flag = text[59] # !SI Macro Aperture Flag.
a.BR_OBJ_Flag = text[60] # !Bright Object Alert Flag.
a.brt_obj_thres = text[61:66] # !Bright Object Alert Threshold.
a.Macro_ID = text[66:70] # !SI Macro Aperture Identifier.
rec_type = text[70:73] # !Record type.
CAJ_index = 1
aperture_name = a.ap_name
elif (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 1):
# Process the second 'CAJ' record.
try:
a.min = np.float(text[0:15]) # !Minor Axis Dimension.
except ValueError: # when field is empty
a.min = None
a.plate_scale = np.float(text[15:30]) # !Arcsecond per Pixel plate scale.
a.a_area = np.float(text[30:45]) # !Area of SI Aperture.
a.theta = np.float(text[45:60]) # !Aperture Rotation Angle.
a.SIAS_Flag = text[60] # !SIAS coordinate system flag. (If set then AK rec.)
rec_type = text[70:73] # !Record type.
CAJ_index = 2
elif (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 2):
# Process the third 'CAJ' record.
a.im_par = np.int(text[0:2]) # Image Parity.
a.ideg = np.int(text[2]) # !Polynomial Degree.
a.xa0 = np.float(text[3:18]) # !SIAS X Center. -> like JWST SCIENCE frame
a.ya0 = np.float(text[18:33]) # !SIAS Y Center.
a.xs0 = np.float(text[33:48]) # !SICS X Center. -> like JWST IDEAL frame
a.ys0 = np.float(text[48:63]) # !SICS Y Center.
rec_type = text[70:73] # !Record type.
CAJ_index = 0
elif text.rstrip()[-2::] == 'AJ':
a.SI_mne = text[0:4].strip() # !Science Instrument Mnemonic
a.Tlm_mne = text[4] # !SI Telemetry Mnemonic.
a.Det_mne = text[5] # !SI Detector Mnemonic.
a.A_mne = text[6:10] # !SI Aperture Mnemonic.
a.APOS_mne = text[10] # !SI Aperture Position Mnemonic.
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-3::] == 'CAQ':
a.v1x = np.float(text[0:15]) # !SICS Vertex 1_X -> like JWST IDEAL frame
a.v1y = np.float(text[15:30]) # !SICS Vertex 1_Y
a.v2x = np.float(text[30:45]) # !SICS Vertex 2_X
a.v2y = np.float(text[45:60]) # !SICS Vertex 2_Y
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AQ':
a.v3x = np.float(text[0:15]) # !SICS Vertex 3_X
a.v3y = np.float(text[15:30]) # !SICS Vertex 3_Y
a.v4x = np.float(text[30:45]) # !SICS Vertex 4_X
a.v4y = np.float(text[45:60]) # !SICS Vertex 4_Y
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AP':
# FGS pickles
a.pi_angle = np.float(text[0:15]) # !Inner Radius Orientation Angle.
a.pi_ext = np.float(text[15:30]) # !Angular Extent of the Inner Radius.
a.po_angle = np.float(text[30:45]) # !Outer Radius Orientation Angle.
a.po_ext = np.float(text[45:60]) # !Angular Extent of the Outer Radius.
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AM':
a.a_v2_ref = np.float(text[0:15]) # !V2 Coordinate of Aperture Reference Point. (same as v2_cent)
a.a_v3_ref = np.float(text[15:30]) # !V3 Coordinate of Aperture Reference Point. (same as v3_cent)
a.a_x_incr = np.float(text[30:45]) # !First Coordinate Axis increment.
a.a_y_incr = np.float(text[45:60]) # !Second Coordinate Axis increment.
elif text.rstrip()[-2::] == 'AN':
if (a.a_shape == 'PICK') and ('FGS' in a.ap_name):
# HST FGS are special in the sense that the idl_to_tel transformation is implemented via the TVS matrix
# and not the standard way
# a.set_fgs_tel_reference_point(a.a_v2_ref, a.a_v2_ref)
a.set_idl_reference_point(a.a_v2_ref, a.a_v3_ref, verbose=False)
# pass
if (a.a_shape == 'PICK') | (a.a_shape == 'CIRC'):
# TO BE IMPLEMENTED
# FGS pickle record ends here
# apertures.append(a)
# read(10,1250)Beta1, !Angle of increasing first coordinate axis.
# * Beta2, !Angle of increasing second coordinate axis.
# * a_x_ref, !X reference.
# * a_y_ref, !Y reference.
# * X_TOT_PIX, !Total X-axis pixels.
# * Y_TOT_PIX, !Total Y-axis pixels.
# * rec_type !Record type.
# 1250 format(4(G15.8),2(I5),a3)
# apertures.append(a)
apertures[a.AperName] = a
elif (text.rstrip()[-3::] == 'CAK') & (CAK_index == 0):
# Process the first 'CAK' record.
n_polynomial_coefficients = np.int(((a.ideg + 1) * (a.ideg + 2)) / 2)
# the order is
# SIAS to SICS X Transformation.
# SIAS to SICS Y Transformation.
# SICS to SIAS X Transformation.
# SICS to SIAS X Transformation.
polynomial_coefficients = np.ones((n_polynomial_coefficients, 4)) * -99
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
CAK_index += 1
elif (text.rstrip()[-3::] == 'CAK') & (CAK_index != 0):
# Process the remaining 'CAK' records
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
CAK_index += 1
elif text.rstrip()[-2::] == 'AK':
# Process the last polynomial coefficient record.
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
a.polynomial_coefficients = polynomial_coefficients
CAK_index = 0
apertures[a.AperName] = a
# apertures.append(a)
return apertures
def get_jwst_siaf_instrument(tree):
"""Return the instrument specified in the first aperture of a SIAF xml tree.
Returns
-------
instrument : str
All Caps instrument name, e.g. NIRSPEC
"""
for entry in tree.getroot().iter('SiafEntry'):
for node in entry.iterchildren():
if node.tag == 'InstrName':
return node.text
def read_jwst_siaf(instrument=None, filename=None, basepath=None):
"""Read the JWST SIAF and return a collection of apertures.
Parameters
----------
instrument
filename
basepath
Returns
-------
apertures : dict
dictionary of apertures
"""
from pysiaf import aperture # runtime import to avoid circular import on startup
if (filename is None) and (instrument is None):
raise ValueError('Specify either input instrument or filename')
if filename is None:
if basepath is None:
basepath = JWST_PRD_DATA_ROOT
if not os.path.isdir(basepath):
raise RuntimeError("Could not find SIAF data "
"in {}".format(basepath))
filename = os.path.join(basepath, instrument + '_SIAF.xml')
else:
filename = filename
apertures = OrderedDict()
file_seed, file_extension = os.path.splitext(filename)
if file_extension == '.xml':
tree = ET.parse(filename)
instrument = get_jwst_siaf_instrument(tree)
# generate Aperture objects from SIAF XML file, parse the XML
for entry in tree.getroot().iter('SiafEntry'):
if instrument.upper() == 'NIRSPEC':
jwst_aperture = aperture.NirspecAperture()
else:
jwst_aperture = aperture.JwstAperture()
for node in entry.iterchildren():
if (node.tag in aperture.ATTRIBUTES_THAT_CAN_BE_NONE) and (node.text is None):
value = node.text
elif node.tag in aperture.INTEGER_ATTRIBUTES:
try:
value = int(node.text)
except TypeError:
print('{}: {}'.format(node.tag, node.text))
raise TypeError
elif node.tag in aperture.STRING_ATTRIBUTES:
value = node.text
else:
try:
value = float(node.text)
except TypeError:
print('{}: {}'.format(node.tag, node.text))
raise TypeError
# except (ValueError, TypeError):
# value = node.text
setattr(jwst_aperture, node.tag, value)
apertures[jwst_aperture.AperName] = jwst_aperture
else:
raise NotImplementedError
# handle special case of NIRSpec, where auxiliary TRANSFORM apertures are defined and hold transformation parameters
# simple workaround is to attach the TRANSFORM aperture as attribute to the respective NIRSpec aperture
if instrument.upper() == 'NIRSPEC':
for AperName in apertures:
jwst_aperture = apertures[AperName]
if jwst_aperture.AperType in ['FULLSCA', 'OSS']:
for transform_aperture_name in 'CLEAR_GWA_OTE F110W_GWA_OTE F140X_GWA_OTE'.split():
setattr(jwst_aperture, '_{}'.format(transform_aperture_name), apertures[transform_aperture_name])
apertures[AperName] = jwst_aperture
return apertures
def read_siaf_alignment_parameters(instrument):
"""Return astropy table.
Parameters
----------
instrument
Returns
-------
: astropy table
"""
filename = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_siaf_alignment.txt'.format(instrument.lower()))
return Table.read(filename, format='ascii.basic', delimiter=',')
def read_siaf_aperture_definitions(instrument):
"""Return astropy table.
Parameters
----------
instrument
Returns
-------
: astropy table
"""
filename = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_siaf_aperture_definition.txt'.format(instrument.lower()))
# converters = {'XDetRef': [ascii.convert_numpy(np.float32)]}
# , converters = converters, guess = False
return Table.read(filename, format='ascii.basic', delimiter=',', fill_values=('None', 0))
def read_siaf_ddc_mapping_reference_file(instrument):
"""Return dictionary with the DDC mapping.
Parameters
----------
instrument
Returns
-------
: astropy table
"""
ddc_mapping_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,
'{}_siaf_ddc_apername_mapping.txt'.format(instrument.lower()))
ddc_mapping_table = Table.read(ddc_mapping_file, format='ascii.basic', delimiter=',')
#generate dictionary
_ddc_apername_mapping = {}
for j, siaf_name in enumerate(ddc_mapping_table['SIAF_NAME'].data):
_ddc_apername_mapping[siaf_name] = ddc_mapping_table['DDC_NAME'][j]
return _ddc_apername_mapping
def read_siaf_detector_layout():
"""Return the SIAF detector layout read from the SIAF reference file.
Returns
-------
: astropy table
"""
layout_file = os.path.join(JWST_SOURCE_DATA_ROOT, 'siaf_detector_layout.txt')
return Table.read(layout_file, format='ascii.basic', delimiter=',')
def read_siaf_detector_reference_file(instrument):
"""Return astropy table.
Parameters
----------
instrument
Returns
-------
: astropy table
"""
filename = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_siaf_detector_parameters.txt'.format(instrument.lower()))
return Table.read(filename, format='ascii.basic', delimiter=',')
def read_siaf_distortion_coefficients(instrument, aperture_name):
"""Return astropy table.
Parameters
----------
instrument
aperture_name
Returns
-------
: astropy table
"""
distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,
'{}_siaf_distortion_{}.txt'.format(instrument.lower(), aperture_name.lower()))
return Table.read(distortion_reference_file_name, format='ascii.basic', delimiter=',')
def read_siaf_xml_field_format_reference_file(instrument):
"""Return astropy table.
Parameters
----------
instrument
Returns
-------
: astropy table
"""
filename = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}_siaf_xml_field_format.txt'.format(instrument.lower()))
return Table.read(filename, format='ascii.basic', delimiter=',')
```
#### File: pysiaf/tests/test_aperture.py
```python
import numpy as np
import pytest
from ..aperture import HstAperture
from ..iando import read
from ..siaf import Siaf, get_jwst_apertures
from ..utils.tools import get_grid_coordinates
@pytest.fixture(scope='module')
def siaf_objects():
"""Return list of Siaf objects.
:return:
"""
# for instrument in 'NIRISS NIRCam MIRI FGS NIRSpec'.split():
siafs = []
for instrument in 'NIRCam NIRISS FGS MIRI'.split():
siaf = Siaf(instrument)
siafs.append(siaf)
return siafs
def test_hst_aperture_init():
"""Test the initialization of an HstAperture object."""
hst_aperture = HstAperture()
hst_aperture.a_v2_ref = -100.
assert hst_aperture.a_v2_ref == hst_aperture.V2Ref #, 'HST aperture initialisation failed')
def test_jwst_aperture_transforms(siaf_objects, verbose=False):
"""Test transformations between frames.
Transform back and forth between frames and verify that input==output.
Parameters
----------
siaf_objects
verbose
"""
labels = ['X', 'Y']
from_frame = 'sci'
to_frames = 'det idl tel'.split()
x_sci = np.linspace(-10, 10, 3)
y_sci = np.linspace(10, -10, 3)
for siaf in siaf_objects:
if siaf.instrument in ['MIRI']:
threshold = 0.2
elif siaf.instrument in ['NIRCam']:
threshold = 42.
else:
threshold = 0.1
for aper_name in siaf.apertures.keys():
skip = False
# aperture
aperture = siaf[aper_name]
if (aperture.AperType in ['COMPOUND', 'TRANSFORM']) or (
siaf.instrument in ['NIRCam', 'MIRI', 'NIRSpec'] and
aperture.AperType == 'SLIT'):
skip = True
if skip is False:
# test transformations
if verbose:
print('testing {} {}'.format(siaf.instrument, aper_name))
for to_frame in to_frames:
forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))
backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))
x_out, y_out = backward_transform(*forward_transform(x_sci, y_sci))
x_mean_error = np.mean(np.abs(x_sci - x_out))
y_mean_error = np.mean(np.abs(y_sci - y_out))
for i, error in enumerate([x_mean_error, y_mean_error]):
if verbose:
print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(
siaf.instrument, aper_name, from_frame, to_frame, labels[i], error))
assert error < threshold
def test_jwst_aperture_vertices(siaf_objects):
"""Test the JwstAperture vertices by rederiving them and comparing to SIAF.
Rederive Idl vertices and compare with content of SIAFXML
"""
verbose = False
threshold = 0.2
labels = ['X', 'Y']
for siaf in siaf_objects:
for aper_name in siaf.apertures.keys():
skip = False
#aperture
aperture = siaf[aper_name]
if (aperture.AperType in ['COMPOUND', 'TRANSFORM']) or \
(siaf.instrument in ['NIRCam', 'MIRI', 'NIRSpec']
and aperture.AperType == 'SLIT'):
skip = True
if skip is False:
if verbose:
print('testing {} {}'.format(siaf.instrument, aper_name))
# Idl corners from Sci attributes (XSciRef, XSciSize etc.)
x_idl_vertices_rederived, y_idl_vertices_rederived = aperture.corners('idl',
rederive=True)
# Idl corners from SIAFXML
x_idl_vertices = np.array([getattr(aperture, 'XIdlVert{:d}'.format(j)) for j in [1, 2, 3, 4]])
y_idl_vertices = np.array([getattr(aperture, 'YIdlVert{:d}'.format(j)) for j in [1, 2, 3, 4]])
if verbose:
print(x_idl_vertices, x_idl_vertices_rederived)
print(y_idl_vertices, y_idl_vertices_rederived)
x_mean_error = np.abs(np.mean(x_idl_vertices) - np.mean(x_idl_vertices_rederived))
y_mean_error = np.abs(np.mean(y_idl_vertices) - np.mean(y_idl_vertices_rederived))
if verbose:
for i, error in enumerate([x_mean_error, y_mean_error]):
print('{} {}: Error in {}Idl_vertices is {:02.6f})'.format(siaf.instrument, aper_name, labels[i], error))
assert x_mean_error < threshold
assert y_mean_error < threshold
def test_raw_transformations(verbose=False):
"""Test raw_to_sci and sci_to_raw transformations"""
siaf_detector_layout = read.read_siaf_detector_layout()
master_aperture_names = siaf_detector_layout['AperName'].data
apertures_dict = {'instrument': siaf_detector_layout['InstrName'].data}
apertures_dict['pattern'] = master_aperture_names
apertures = get_jwst_apertures(apertures_dict, exact_pattern_match=True)
grid_amplitude = 2048
x_raw, y_raw = get_grid_coordinates(10, (grid_amplitude/2, grid_amplitude/2), grid_amplitude)
labels = ['X', 'Y']
threshold = 0.1
from_frame = 'raw'
to_frame = 'sci'
# compute roundtrip error
for aper_name, aperture in apertures.apertures.items():
forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))
backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))
x_out, y_out = backward_transform(*forward_transform(x_raw, y_raw))
x_mean_error = np.mean(np.abs(x_raw - x_out))
y_mean_error = np.mean(np.abs(y_raw - y_out))
for i, error in enumerate([x_mean_error, y_mean_error]):
if verbose:
print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(
aperture.InstrName, aper_name, from_frame, to_frame, labels[i], error))
assert error < threshold
```
#### File: pysiaf/tests/test_nirspec.py
```python
import os
from astropy.io import fits
from astropy.table import Table
import numpy as np
import pylab as pl
# import pytest
from ..constants import JWST_TEMPORARY_DATA_ROOT, TEST_DATA_ROOT
from ..siaf import Siaf
instrument = 'NIRSpec'
def test_against_test_data():
"""NIRSpec test data comparison.
Mean and RMS difference between the IDT computations and the pysiaf computations are
computed and compared against acceptable thresholds.
"""
siaf = Siaf(instrument)
# directory that holds SIAF XML file
# test_dir = os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'generate_test')
# siaf_xml_file = os.path.join(test_dir, '{}_SIAF.xml'.format(instrument))
# siaf = Siaf(instrument, filename=siaf_xml_file)
test_data_dir = os.path.join(TEST_DATA_ROOT, instrument)
include_tilt = False
if include_tilt is False:
ta_transform_data_dir = os.path.join(test_data_dir, 'testDataSet_TA', 'testDataNoTilt')
filter_list = 'CLEAR F110W F140X'.split()
sca_list = ['SCA491', 'SCA492']
# filter_list = 'CLEAR'.split()
# sca_list = ['SCA491']
# sca_list = ['SCA492']
difference_metrics = {}
index = 0
for sca_name in sca_list:
for filter_name in filter_list:
test_data_file = os.path.join(ta_transform_data_dir, 'testDataTA_{}{}.fits'.format(sca_name, filter_name))
test_data = Table(fits.getdata(test_data_file))
if sca_name == 'SCA491':
AperName = 'NRS1_FULL_OSS'
elif sca_name == 'SCA492':
AperName = 'NRS2_FULL_OSS'
aperture = siaf[AperName]
if 0:
pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k'); pl.clf()
aperture.plot(name_label=True)
siaf['NRS2_FULL_OSS'].plot(name_label=True)
pl.plot(test_data['XAN']*u.deg.to(u.arcsecond), test_data['YAN']*u.deg.to(u.arcsecond), 'b.')
pl.show()
1/0
# SCI to GWA detector side (Step 1. in Sections 2.3.3, 5.5.2 of JWST-STScI-005921 , see also Table 4.7.1)
test_data['pysiaf_GWAout_X'], test_data['pysiaf_GWAout_Y'] = aperture.sci_to_gwa(test_data['SCA_X'], test_data['SCA_Y'])
# effect of mirror, transform from GWA detector side to GWA skyward side
if include_tilt is False:
# last equation in Secion 5.5.2
test_data['pysiaf_GWAin_X'] = -1 * test_data['pysiaf_GWAout_X']
test_data['pysiaf_GWAin_Y'] = -1 * test_data['pysiaf_GWAout_Y']
# transform to OTE frame (XAN, YAN)
test_data['pysiaf_XAN'], test_data['pysiaf_YAN'] = aperture.gwa_to_ote(
test_data['pysiaf_GWAin_X'], test_data['pysiaf_GWAin_Y'], filter_name)
for axis_name in ['X', 'Y']:
for parameter_name in ['{}AN'.format(axis_name)]:
# compute differences between SIAF implementation and IDT test dataset
test_data['difference_{}'.format(parameter_name)] = test_data['pysiaf_{}'.format(parameter_name)] - test_data['{}'.format(parameter_name)]
for key_seed in ['mean', 'rms']:
key_name = 'diff_{}_{}'.format(parameter_name, key_seed)
if key_name not in difference_metrics.keys():
difference_metrics[key_name] = []
if key_seed == 'mean':
difference_metrics[key_name].append(np.mean(test_data['difference_{}'.format(parameter_name)]))
elif key_seed == 'rms':
difference_metrics[key_name].append(np.std(test_data['difference_{}'.format(parameter_name)]))
print('{} {} SCA_to_OTE transform comparison to {:>10} {:>10} MEAN={:+1.3e} RMS={:1.3e}'.format(sca_name, filter_name, AperName, parameter_name, difference_metrics['diff_{}_{}'.format(parameter_name, 'mean')][index], difference_metrics['diff_{}_{}'.format(parameter_name, 'rms')][index]))
assert difference_metrics['diff_{}_{}'.format(parameter_name, 'mean')][index] < 1e-9
assert difference_metrics['diff_{}_{}'.format(parameter_name, 'rms')][index] < 5e-9
if 0:
threshold = 1e-6
if (difference_metrics['diff_{}_{}'.format(parameter_name, 'rms')][index] > threshold):
pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k'); pl.clf()
pl.quiver(test_data['SCA_X'], test_data['SCA_Y'], test_data['difference_XAN'], test_data['difference_YAN'], angles='xy')
pl.title('Difference IDT and pysiaf')
pl.show()
index += 1
def test_nirspec_aperture_transforms(verbose=False):
"""Test transformations between frames.
Transform back and forth between frames and verify that input==output.
Parameters
----------
verbose
"""
siaf = Siaf(instrument)
labels = ['X', 'Y']
threshold = 0.2
from_frame = 'sci'
to_frames = 'det gwa idl tel'.split()
x_sci = np.linspace(-10, 10, 3)
y_sci = np.linspace(10, -10, 3)
for aper_name in siaf.apertures.keys():
skip = False
# aperture
aperture = siaf[aper_name]
if (aperture.AperType in ['COMPOUND', 'TRANSFORM', 'SLIT']) or ('_FULL' not in aper_name):
skip = True
# if (aperture.AperType in ['COMPOUND', 'TRANSFORM']) or (
# siaf.instrument in ['NIRCam', 'MIRI', 'NIRSpec'] and
# aperture.AperType == 'SLIT'):
# skip = True
if skip is False:
# test transformations
if verbose:
print('testing {} {}'.format(siaf.instrument, aper_name))
for to_frame in to_frames:
forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))
backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))
x_out, y_out = backward_transform(*forward_transform(x_sci, y_sci))
x_mean_error = np.mean(np.abs(x_sci - x_out))
y_mean_error = np.mean(np.abs(y_sci - y_out))
for i, error in enumerate([x_mean_error, y_mean_error]):
if verbose:
print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(
siaf.instrument, aper_name, from_frame, to_frame, labels[i], error))
assert error < threshold
```
#### File: pysiaf/utils/polynomial.py
```python
from __future__ import absolute_import, print_function, division
import numpy as np
import pylab as pl
import scipy as sp
from scipy import linalg
def choose(n, r):
"""The number of ways of choosing r items from n"""
if n < 0 or r < 0:
print('Negative values not allowed')
return 0
if r > n:
print('r must not be greater than n')
return 0
combin = 1
if r > n / 2:
r1 = n - r
else:
r1 = r
for k in range(r1):
combin = combin * (n - k) // (k + 1)
return combin
def dpdx(a, x, y, order=4):
"""Differential with respect to x
:param a:
:param x:
:param y:
:param order:
:return:
"""
dpdx = 0.0
k = 1 # index for coefficients
for i in range(1, order + 1):
for j in range(i + 1):
if i - j > 0:
dpdx = dpdx + (i - j) * a[k] * x ** (i - j - 1) * y ** j
k += 1
return dpdx
def dpdy(a, x, y, order=4):
"""Differential with respect to y
:param a:
:param x:
:param y:
:param order:
:return:
"""
dpdy = 0.0
k = 1 # index for coefficients
for i in range(1, order + 1):
for j in range(i + 1):
if j > 0:
dpdy = dpdy + j * a[k] * x ** (i - j) * y ** (j - 1)
k += 1
return dpdy
def flatten(A, order):
"""Convert triangular layout to linear array"""
terms = (order+1)*(order+2)//2
AF = sp.zeros(terms)
k = 0
for i in range(order+1):
for j in range(i+1):
AF[k] = A[i, j]
k += 1
return AF
def FlipX(A, order=4):
"""Change sign of all coefficients with odd x power"""
terms = (order+1)*(order+2)//2
AF = sp.zeros(terms)
k = 0
for i in range(order+1):
for j in range(i+1):
AF[k] = (-1)**(i-j)*A[k]
k += 1
return AF
def FlipXY(A, order=4):
"Change sign for coeffs where sum of x and y powers is odd"
terms = (order+1)*(order+2)//2
AF = sp.zeros(terms)
k = 0
for i in range(order+1):
for j in range(i+1):
AF[k] = (-1)**(i)*A[k]
k += 1
return AF
def FlipY(A, order = 4):
"""Change sign of all coefficients with odd y power"""
terms = (order+1)*(order+2)//2
AF = sp.zeros(terms)
k = 0
for i in range(order+1):
for j in range(i+1):
AF[k] = (-1)**(j)*A[k]
k += 1
return AF
def invert(a, b, u, v, n, verbose=False):
"""Given that order n polynomials of (x,y) have the result (u,v), find (x,y)
Newton Raphson method in two dimensions"""
tol = 1.0e-6
err = 1.0
# Initial guesses - Linear approximation
det = a[1] * b[2] - a[2] * b[1]
x0 = (b[2] * u - a[2] * v) / det
y0 = (-b[1] * u + a[1] * v) / det
if verbose:
print('Initial guesses', x0, y0)
x = x0
y = y0
X = sp.array([x, y])
iter = 0
while err > tol:
f1 = sp.array([poly(a, x, y, n) - u, poly(b, x, y, n) - v])
j = sp.array([[dpdx(a, x, y, n), dpdy(a, x, y, n)], [dpdx(b, x, y, n), dpdy(b, x, y, n)]])
invj = sp.linalg.inv(j)
X = X - sp.dot(invj, f1)
if verbose:
print('[X1,Y1]', X)
x1 = X[0]
y1 = X[1]
err = sp.hypot(x - x1, y - y1)
if verbose:
print('Error %10.2e' % err)
[x, y] = [x1, y1]
iter += 1
return x, y, err, iter
def jacob(a, b, x, y, order=4):
"""Calculation of Jacobean, or relative area"""
j = dpdx(a, x, y,order)*dpdy(b, x, y,order) - dpdx(b, x, y,order)*dpdy(a, x, y,order)
j = sp.fabs(j)
return j
def nircam_reorder(A, B, order):
"""Changes coefficient order from y**2 xy x**2 to x**2 xy y**2
:param A:
:param B:
:param order:
:return:
"""
terms = (order + 1) * (order + 2) // 2
A2 = np.zeros((terms))
B2 = np.zeros((terms))
for i in range(order + 1):
ti = i * (i + 1) // 2
for j in range(i + 1):
A2[ti + j] = A[ti + i - j]
B2[ti + j] = B[ti + i - j]
return (A2, B2)
def poly(a, x, y, order=4):
"""Return polynomial
:param a:
:param x:
:param y:
:param order:
:return:
"""
pol = 0.0
k = 0 # index for coefficients
for i in range(order+1):
for j in range(i+1):
pol = pol + a[k]*x**(i-j)*y**j
k+=1
return pol
def polyfit(u, x, y, order):
"""Fit polynomial to a set of u values on an x,y grid
u is a function u(x,y) being a polynomial of the form
u = a[i, j] x**(i-j) y**j. x and y can be on a grid or be arbitrary values"""
# First set up x and y powers for each coefficient
px = []
py = []
for i in range(order + 1):
for j in range(i + 1):
px.append(i - j)
py.append(j)
terms = len(px)
# print terms, ' terms for order ', order
# print px
# print py
# Make up matrix and vector
vector = sp.zeros((terms))
mat = sp.zeros((terms, terms))
for i in range(terms):
vector[i] = (u * x ** px[i] * y ** py[i]).sum()
for j in range(terms):
mat[i, j] = (x ** px[i] * y ** py[i] * x ** px[j] * y ** py[j]).sum()
# print 'Vector', vector
# print 'Matrix'
# print mat
imat = linalg.inv(mat)
# print 'Inverse'
# print imat
# Check that inversion worked
# print sp.dot(mat,imat)
coeffs = sp.dot(imat, vector)
return coeffs
def polyfit2(u, x, y, order):
"""Fit polynomial to a set of u values on an x,y grid
u is a function u(x,y) being a polynomial of the form
u = a[i, j]x**(i-j)y**j. x and y can be on a grid or be arbitrary values
This version uses solve instead of matrix inversion"""
# First set up x and y powers for each coefficient
px = []
py = []
for i in range(order + 1):
for j in range(i + 1):
px.append(i - j)
py.append(j)
terms = len(px)
# print terms, ' terms for order ', order
# print px
# print py
# Make up matrix and vector
vector = sp.zeros((terms))
mat = sp.zeros((terms, terms))
for i in range(terms):
vector[i] = (u * x ** px[i] * y ** py[i]).sum() # Summing over all x,y
for j in range(terms):
mat[i, j] = (x ** px[i] * y ** py[i] * x ** px[j] * y ** py[j]).sum()
coeffs = linalg.solve(mat, vector)
return coeffs
def reorder(A, B, verbose=False) :
"""Reorder Sabatke coefficients to Cox convention"""
order = 5
terms = (order+1)*(order+2)//2
Aarray = sp.zeros((order+1,order+1))
Barray = sp.zeros((order+1,order+1))
k1 = 0
for i in range(order+1):
for j in range(order+1-i):
Aarray[j,i] = A[k1]
Barray[j,i] = B[k1]
k1 += 1
A2 = sp.zeros((terms))
B2 = sp.zeros((terms))
k2 = 0
for i in range(order+1):
for j in range(i+1):
A2[k2] = Aarray[j,i-j]
B2[k2] = Barray[j,i-j]
k2 += 1
if verbose:
print('A')
triangle(A2, order)
print('\nB')
triangle(B2, order)
return (A2, B2)
def rescale(A, B, C, D, order, scale):
"""
Change coefficients to arcsec scale
Ported here from makeSIAF.py
<NAME> 2018-01-03
<NAME> 2018-01-04: fixed side-effect on ABCD variables
:param A:
:param B:
:param C:
:param D:
:param order:
:param scale:
:return:
"""
A_scaled = scale*A
B_scaled = scale*B
number_of_coefficients = np.int((order + 1) * (order + 2) / 2)
C_scaled = np.zeros(number_of_coefficients)
D_scaled = np.zeros(number_of_coefficients)
k = 0
for i in range(order+1):
factor = scale**i
for j in range(i+1):
C_scaled[k] = C[k]/factor
D_scaled[k] = D[k]/factor
k += 1
return A_scaled, B_scaled, C_scaled, D_scaled
def Rotate(A,B,theta):
"""
Ported to here from makeSIAF.py
<NAME> 2018-01-03
:param A:
:param B:
:param theta:
:return:
"""
A2 = A*np.cos(theta) + B*np.sin(theta)
B2 = - A*np.sin(theta) + B*np.cos(theta)
return (A2,B2)
def rotate_coefficients(A, B, angle_deg):
""" <NAME>: this version of rotate_coeffs is used in nircam_get_polynomial_both
:param A:
:param B:
:param angle_deg:
:return:
"""
AR = A * np.cos(np.deg2rad(angle_deg)) - B * np.sin(np.deg2rad(angle_deg))
BR = A * np.sin(np.deg2rad(angle_deg)) + B * np.cos(np.deg2rad(angle_deg))
return AR, BR
def RotateCoeffs(a, theta, order=4, verbose=False):
"""Rotate axes of coefficients by theta degrees"""
c = np.cos(np.deg2rad(theta))
s = np.sin(np.deg2rad(theta))
# First place in triangular layout
at = sp.zeros([order+1,order+1])
k = 0
for m in range(order+1):
for n in range(m+1):
at[m, n] = a[k]
k+=1
# Apply rotation
atrotate = sp.zeros([order+1,order+1])
arotate = sp.zeros([len(a)]) # Copy shape of a
for m in range(order+1):
for n in range(m+1):
for mu in range(0,m-n+1):
for j in range(m-n-mu, m-mu+1):
factor = (-1)**(m-n-mu)*choose(m-j, mu)*choose(j, m-n-mu)
cosSin = c**(j+2*mu-m+n)*s**(2*m-2*mu-j-n)
atrotate[m, n] = atrotate[m, n] + factor*cosSin*at[m, j]
if verbose: print(m, n, j, factor, 'cos^', j+2*mu-m+n, 'sin^',2*m-2*mu-j-n, ' A',m, j)
# Put back in linear layout
k = 0
for m in range(order+1):
for n in range(m+1):
arotate[k] = atrotate[m, n]
k+=1
return arotate
def ShiftCoeffs(a, xshift, yshift, order=4, verbose=False):
"""Calculate coefficients of polynomial when shifted to new origin"""
# First place in triangular layout
at = sp.zeros([order + 1, order + 1])
atshift = sp.zeros([order + 1, order + 1])
ashift = sp.zeros([len(a)]) # Copy shape of a
k = 0
for p in range(order + 1):
for q in range(p + 1):
at[p, q] = a[k]
k += 1
# Apply shift
for p in range(order + 1):
for q in range(p + 1):
if verbose:
print("A'%1d%1d" % (p, q))
for i in range(p, order + 1):
for j in range(q, i + 1 - (p - q)):
f = choose(j, q) * choose(i - j, p - q)
atshift[p, q] = atshift[p, q] + f * xshift ** ((i - j) - (p - q)) * yshift ** (
j - q) * at[i, j]
if verbose:
print('%2d A(%1d,%1d) x^%1d y^%1d' % (f, i, j, i - j - (p - q), (j - q)))
if verbose:
print()
# Put back in linear layout
k = 0
for p in range(order + 1):
for q in range(p + 1):
ashift[k] = atshift[p, q]
k += 1
return ashift
def testpoly():
[x, y] = sp.mgrid[0:10, 0:10]
# print 'X'
# print x
# print 'Y'
# print y
u = sp.zeros((10, 10))
v = sp.zeros((10, 10))
# Random polynomials
a0 = sp.random.rand(1)
a1 = 0.1 * (sp.random.rand(2) - 0.5)
a2 = 0.01 * (sp.random.rand(3) - 0.5)
a = sp.concatenate((a0, a1))
a = sp.concatenate((a, a2))
a[2] = 0.01 * a[2]
print('A coefficients')
print(a)
b0 = sp.random.rand(1)
b1 = 0.1 * (sp.random.rand(2) - 0.5)
b2 = 0.01 * (sp.random.rand(3) - 0.5)
b = sp.concatenate((b0, b1))
b = sp.concatenate((b, b2))
b[1] = 0.01 * b[1]
print('B coeffcicients')
print(b)
for i in range(10):
for j in range(10):
u[i, j] = poly(a, x[i, j], y[i, j], 2) # + sp.random.normal(0.0, 0.01)
v[i, j] = poly(b, x[i, j], y[i, j], 2) # + sp.random.normal(0.0,0.01)
# print z
s1 = polyFit2(u, x, y, 2)
s2 = polyFit2(v, x, y, 2)
print('S1', s1)
print('S2', s2)
uc = poly(s1, x, y, 2)
vc = poly(s2, x, y, 2)
pl.figure(1)
pl.clf()
pl.grid(True)
pl.plot(u, v, 'gx')
pl.plot(uc, vc, 'r+')
def TransCoeffs(A, a, b, c, d, order=4, verbose=False):
"""Transform polynomial coefficients to allow for
xp = a*x + b*y
yp = c*x + d*y"""
A1 = sp.zeros((order + 1, order + 1))
A2 = sp.zeros((order + 1, order + 1))
ncoeffs = (order + 1) * (order + 2) // 2
if verbose:
print(ncoeffs, 'coefficients for order', order)
AT = sp.zeros((ncoeffs))
# First place A in triangular layout
k = 0
for i in range(order + 1):
for j in range(i + 1):
A1[i, j] = A[k]
k += 1
for m in range(order + 1):
for n in range(m + 1):
if verbose:
print('\nM,N', m, n)
for mu in range(m - n + 1):
for j in range(m - n - mu, m - mu + 1):
if verbose:
print('J, MU', j, mu)
if verbose:
print('Choose', m - j, mu, 'and', j, m - n - mu)
factor = choose(m - j, mu) * choose(j, m - n - mu)
A2[m, n] += factor * a ** mu * b ** (m - j - mu) * c ** (m - n - mu) * d ** (
mu + j - m + n) * A1[m, j]
if verbose:
print(m, j, ' Factor', factor)
# Restore A2 to flat layout in AT
k = 0
for m in range(order + 1):
for n in range(m + 1):
AT[k] = A2[m, n]
k += 1
return AT
def triangle(A, order=4):
"""Print coefficients in triangular layout"""
k = 0
for i in range(order + 1):
for j in range(i + 1):
print('%12.5e' % A[k], end=' ')
k += 1
print()
def triangulate(A, order):
"""Convert linear array to 2-D array with triangular coefficient layout"""
AT = sp.zeros((order + 1, order + 1))
k = 0
for i in range(order + 1):
for j in range(i + 1):
AT[i, j] = A[k]
k += 1
return AT
def two_step(A, B, a, b, order):
"""
Change coefficients when
xp = a[0] + a[1].x + a[2].y
yp = b[0] + b[1].x + b[2].y
:param A:
:param B:
:param a:
:param b:
:param order:
:return:
"""
terms = (order+1)*(order+2)//2
A2 = sp.zeros((order+1,order+1))
B2 = sp.zeros((order+1,order+1))
k=0
for i in range(order+1):
for j in range(i+1):
for alpha in range(i-j+1):
for beta in range(i-j-alpha+1):
f1 = choose(i-j,alpha)*choose(i-j-alpha, beta)*a[0]**(i-j-alpha-beta)*a[1]**alpha*a[2]**beta
for gamma in range(j+1):
for delta in range(j-gamma+1):
f2 = choose(j,gamma)*choose(j-gamma,delta)*b[0]**(j-gamma-delta)*b[1]**gamma*b[2]**delta
A2[alpha+beta+gamma+delta, beta+delta] += A[k]*f1*f2
B2[alpha+beta+gamma+delta, beta+delta] += B[k]*f1*f2
k += 1
# Flatten A@ and B2
k = 0
Aflat = sp.zeros(terms)
Bflat = sp.zeros(terms)
for i in range(order+1):
for j in range(i+1):
Aflat[k] = A2[i, j]
Bflat[k] = B2[i, j]
k += 1
return (Aflat, Bflat)
# def TestTwoStep():
# A = sp.array([10.0, 2.0, 0.1, 0.01, -0.02, 0.03])
# B = sp.array([4.0, 1.8, 0.2, 0.02, 0.03, -0.02])
# a = sp.array([1.0, 0.5, 0.1])
# b = sp.array([2.0, 0.2, 0.6])
# print('\nA')
# triangle(A,2)
# print('B')
# triangle(B,2)
# print('a\n',a)
# print('b\n', b)
# (A2, B2) = TwoStep(A,B,a, b,2)
# print('\nA2')
# triangle(A2,2)
# print('B2')
# triangle(B2,2)
#
# # Now do a test calculation
# (x,y) = (10,5)
# xp = a[0] + a[1]*x + a[2]*y
# yp = b[0] + b[1]*x + b[2]*y
# print('x,y', x,y)
# print('xp,yp', xp,yp)
#
# u = poly(A, xp, yp, 2)
# v = poly(B, xp, yp, 2)
# up = poly(A2, x, y,2)
# vp = poly(B2, x, y,2)
# print('Two step', u, v)
# print('One step', up, vp)
# return
``` |
{
"source": "jhunkeler/shebeshebe",
"score": 3
} |
#### File: jhunkeler/shebeshebe/corner.py
```python
from __future__ import division
import time
def tps(samples):
values = []
x = 1000000
for _ in range(samples):
start = time.time()
for i in range(x):
pass
stop = time.time() - start
ops = 60 / (stop / 1)
values.append(ops)
return sum(values) / len(values)
def mps(samples):
values = []
barr = bytearray(1)
x = 1000000
for _ in range(samples):
start = time.time()
for i in range(x):
barr[0] = 1
stop = time.time() - start
ops = 60 / (stop / 1)
values.append(ops)
return sum(values) / len(values)
def dps(samples, buffering=1):
values = []
barr = bytearray(1)
x = 1000000
with open('bloat.dat', 'wb+', buffering) as fp:
for _ in range(samples):
start = time.time()
for i in range(x):
fp.write(b'\0')
stop = time.time() - start
fp.seek(0, 0)
ops = 60 / (stop / 1)
values.append(ops)
return sum(values) / len(values)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--samples', '-s', default=100, type=int)
args = parser.parse_args()
t = int(tps(args.samples))
print('cpu: {0} mHz'.format(t))
m = int(mps(args.samples))
print('ram: {0} mHz'.format(m))
db = int(dps(args.samples))
print('iop (buffered): {0}'.format(db))
du = int(dps(args.samples, 0))
print('iop (unbuffered): {0}'.format(du))
``` |
{
"source": "jhunkeler/stueurmann",
"score": 3
} |
#### File: stueurmann/steuermann/rexecd.py
```python
import os
import time
import CGIHTTPServer
import BaseHTTPServer
import SocketServer
import platform
import subprocess
import urllib
import urlparse
os.chdir('/')
print(os.getcwd())
#####
#
valid_client_ip = (
'192.168.3.11', # arzach
'172.16.31.10', # ssb
'192.168.127.12', # banana
'192.168.3.11', # vxp-dukat
)
#####
#
# This uses the python stock http server. Here is a request handler
# that services GET requests by executing the command passed in.
# All other requests are invalid.
password = "<PASSWORD>"
class my_handler( CGIHTTPServer.CGIHTTPRequestHandler ) :
def __init__(self, request, client_address, server) :
# init the superclass
CGIHTTPServer.CGIHTTPRequestHandler.__init__(self, request, client_address, server)
def reject_client(self) :
print(self.client_address)
if not ( self.client_address[0] in valid_client_ip ) :
self.bad_client('a')
return 1
if self.path.startswith('/' + password + '/' ) :
self.path = self.path[len(password)+2:]
else :
self.bad_client('p')
return 1
return 0
def do_GET(self) :
# GET /password/command...
# run the command
if self.reject_client() :
return
path = self.path
print("GET",path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("Hello world: %s\n"%path)
self.wfile.flush()
def bad_client(self, why) :
self.send_response(500)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("bad client: %s\n"%why)
self.wfile.flush()
return
def do_INVALID(self) :
self.send_response(500)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("error\n")
self.wfile.flush()
return
def do_POST(self) :
# POST password=<PASSWORD>&name=filename&data=filedata
# upload data to a file
if self.reject_client() :
return
print(self.path)
length = int(self.headers['Content-Length'])
data = self.rfile.read(length)
d = urlparse.parse_qs(data)
for x in sorted([ x for x in d]) :
print(x,d[x])
if d['password'][0] != password :
self.bad_client('p')
return
dirname = d['dirname'][0]
print("CD",dirname)
os.chdir(dirname)
if self.path == 'upload' :
filename = d['filename'][0]
mode = 'wb'
if 'mode' in d :
t = d['mode'][0]
if t == 't' or t == 'text' :
mode = 'w'
elif t == 'b' or t == 'binary' :
mode = 'wb'
else :
return self.bad_client('mode')
f = open(filename,'wb')
f.write(d['data'][0])
f.close()
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("uploaded %s\n"%filename)
self.wfile.flush()
elif self.path == 'run' :
cmd = d['cmd'][0]
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("Hello world: %s\n"%cmd)
run_child(cmd, self.wfile)
self.wfile.write("done\n")
self.wfile.flush()
else :
return self.bad_client('c')
def do_HEAD(self) :
self.do_invalid()
class MultiThreadedHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
####
windows = platform.system() == 'Windows'
def run_child(path, wfile) :
env = os.environ
cmd = urllib.unquote_plus(path)
print("COMMAND",cmd)
# bug: implement timeouts
if windows :
status = subprocess_windows( cmd, wfile, env )
else :
status = subprocess_unix( cmd, wfile, env )
# subprocess gives you weird status values
if status > 0 :
t_status="exit %d"%(status >> 8)
if status != 0 :
return_status = 1
else :
return_status = 1
t_status="signal %d" % ( - status )
# subprocess does not tell you if there was a core
# dump, but there is nothing we can do about it.
print("COMMAND EXIT:",status,t_status)
def subprocess_windows(cmd, wfile, env ) :
# You might think that subprocess.Popen() would be portable,
# but you would be wrong.
#
# On Windows, sockets are NOT file descriptors. Since we are in a web server, wfile here is a socket.
# print wfile.fileno()
# import msvcrt
# print msvcrt.get_osfhandle(wfile.fileno())
p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, env = env, creationflags = subprocess.CREATE_NEW_PROCESS_GROUP )
while 1 :
n = p.stdout.read(256)
if n == '' :
break
wfile.write(n)
return p.wait()
def subprocess_unix( cmd, wfile, env ) :
p = subprocess.Popen( cmd, stdout=wfile, stderr=wfile, shell=True, env = env, preexec_fn=os.setpgrp )
return p.wait()
#####
def run( args = [ ] ) :
# you could parse args here if you wanted to. I don't care to spend
# the time. This is just here for people who can't (or don't want to)
# install a full featured web server just to try things out.
if len(args) > 0 :
ip = args[0]
else :
ip = platform.node()
port = 7070
print("http://%s:%s/"%(str(ip),str(port)))
httpd = MultiThreadedHTTPServer( (ip, port) , my_handler)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
while 1 :
httpd.handle_request()
if __name__ == '__main__' :
run()
##
# in case we need to kill child processes:
# http://code.activestate.com/recipes/347462-terminating-a-subprocess-on-windows/
```
#### File: stueurmann/tests/misc.py
```python
from steuermann import nodes
yes_list = (
( 'a:b/c', 'a:b/c' ),
( 'a:b/*', 'a:b/c' ),
( 'a:*/c', 'a:b/c' ),
( 'a:*/*', 'a:b/c' ),
( '*:b/c', 'a:b/c' ),
( '*:b/*', 'a:b/c' ),
( '*:*/c', 'a:b/c' ),
( '*:*/*', 'a:b/c' ),
( '[a-z]:*/*', 'a:b/c' ),
( 'a?:*/*', 'ax:b/c' ),
( 'a?:*/*', 'ay:b/c' ),
( '*:*/*', 'xasdadsf:agasdgg/asdgasdg' ),
)
no_list = (
( '[A-Z]:*/*', 'a:b/c' ),
( 'a?:*/*', 'a:b/c' ),
( 'a:b/*', 'a:x/y' ),
( '*:c/*', 'a:b/y' ),
( '*:b/y', 'a:b/c' ),
( 'a:b/y', 'a:b/c' ),
)
def test_wildcard_name() :
def yes( a, b ) :
assert nodes.wildcard_name( a, b )
def no( a, b ) :
assert not nodes.wildcard_name( a, b )
for x in yes_list :
yield yes, x[0], x[1]
for x in no_list :
yield no, x[0], x[1]
``` |
{
"source": "jhunkeler/tweakwcs",
"score": 2
} |
#### File: tweakwcs/tests/test_linearfit.py
```python
from itertools import product
import math
import pytest
import numpy as np
from tweakwcs import linearfit, linalg
_LARGE_SAMPLE_SIZE = 1000
_SMALL_SAMPLE_SIZE = 10
_BAD_DATA_FRACTION = 0.2
_TRANSFORM_SELECTOR = {
'rscale': linearfit.fit_rscale,
'general': linearfit.fit_general,
'shift': linearfit.fit_shifts,
}
_ATOL = 10 * _LARGE_SAMPLE_SIZE * np.sqrt(
np.finfo(linalg._MAX_LINALG_TYPE).eps
)
@pytest.fixture(scope="module")
def ideal_small_data(request):
# rscale data with proper rotations and no noise
uv = np.random.random((_SMALL_SAMPLE_SIZE, 2))
xy = np.random.random((_SMALL_SAMPLE_SIZE, 2))
wuv = np.random.random(_SMALL_SAMPLE_SIZE)
wxy = np.random.random(_SMALL_SAMPLE_SIZE)
return uv, xy, wuv, wxy
@pytest.fixture(scope="function", params=[
'shifts', 'rscale', 'rscale-flip-x', 'rscale-flip-y', 'affine'
])
def ideal_large_data(request):
# rscale data with proper rotations and no noise
uv = np.random.random((_LARGE_SAMPLE_SIZE, 2))
# assume an image size of 4096x2048:
uv[:, 0] *= 2047.0
uv[:, 1] *= 4095.0
# rotation angle(s):
angle = 360.0 * np.random.random() # 0 ... 360
if request.param == 'shifts':
angle = (0, 0)
scale = (1, 1)
proper = True
transform = 'shift'
elif request.param == 'rscale':
angle = (angle, angle)
scale = 2 * (0.8 + 0.4 * np.random.random(), ) # 0.8 ... 1.2
proper = True
transform = 'rscale'
elif request.param == 'rscale-flip-x':
angle = ((angle + 180.0) % 360.0, angle)
scale = 2 * (0.8 + 0.4 * np.random.random(), ) # 0.8 ... 1.2
proper = False
transform = 'rscale'
elif request.param == 'rscale-flip-y':
angle = (angle, (angle + 180.0) % 360.0)
scale = 2 * (0.8 + 0.4 * np.random.random(), ) # 0.8 ... 1.2
proper = False
transform = 'rscale'
elif request.param == 'affine':
# rotation angles:
offset = 150.0 * (np.random.random() - 0.5) # -75 ... 75
offset += 180.0 * np.random.choice([0.0, 1.0]) # add random axis flip
angle = (angle, (angle + offset) % 360.0)
# scales:
scale = 0.8 + 0.4 * np.random.random(2) # 0.8 ... 1.2
# proper:
rad = np.deg2rad(angle)
proper = (np.prod(np.cos(rad)) + np.prod(np.sin(rad))) > 0
transform = 'general'
shift = 200.0 * (np.random.random(2) - 0.5) # -100 ... +100
rmat = linearfit.build_fit_matrix(angle, scale)
skew = angle[1] - angle[0]
# apply rscale
xy = np.dot(uv, rmat.T) + shift
return uv, xy, angle, scale, shift, rmat, proper, skew, transform
@pytest.fixture(scope="function",
params=[v for v in product(*(2 * [[False, True]]))])
def weight_data(request):
nbd = int(_BAD_DATA_FRACTION * _LARGE_SAMPLE_SIZE)
minv = 1000.0
maxv = 1.0e6
if not any(request.param):
wxy = None
wuv = None
idx_xy = (np.array([], dtype=np.int), )
idx_uv = (np.array([], dtype=np.int), )
bd_xy = np.zeros((0, 2))
bd_uv = np.zeros((0, 2))
elif all(request.param):
wxy = np.random.random(_LARGE_SAMPLE_SIZE)
wuv = np.random.random(_LARGE_SAMPLE_SIZE)
# split indices into (almost) equal parts:
nbdxy = int((0.3 + 0.4 * np.random.random()) * nbd)
nbduv = nbd - nbdxy
idx_xy = (np.random.choice(np.arange(_LARGE_SAMPLE_SIZE),
nbdxy, replace=False), )
idx_uv = (np.random.choice(np.arange(_LARGE_SAMPLE_SIZE),
nbduv, replace=False), )
wxy[idx_xy] = 0.0
wuv[idx_uv] = 0.0
sign = np.random.choice([-1, 1], (nbdxy, 2))
bd_xy = sign * (minv + (maxv - minv) * np.random.random((nbdxy, 2)))
sign = np.random.choice([-1, 1], (nbduv, 2))
bd_uv = sign * (minv + (maxv - minv) * np.random.random((nbduv, 2)))
elif request.param[0] and not request.param[1]:
idx = np.random.choice(np.arange(_LARGE_SAMPLE_SIZE),
nbd, replace=False)
idx_xy = (idx, )
idx_uv = (np.array([], dtype=np.int), )
wxy = np.random.random(_LARGE_SAMPLE_SIZE)
wxy[idx_xy] = 0.0
wuv = None
sign = np.random.choice([-1, 1], (nbd, 2))
bd_xy = sign * (minv + (maxv - minv) * np.random.random((nbd, 2)))
bd_uv = np.zeros((0, 2))
else:
idx = np.random.choice(np.arange(_LARGE_SAMPLE_SIZE), nbd,
replace=False)
idx_uv = (idx, )
idx_xy = (np.array([], dtype=np.int), )
wuv = np.random.random(_LARGE_SAMPLE_SIZE)
wuv[idx_uv] = 0.0
wxy = None
sign = np.random.choice([-1, 1], (nbd, 2))
bd_uv = sign * (minv + (maxv - minv) * np.random.random((nbd, 2)))
bd_xy = np.zeros((0, 2))
return wxy, wuv, idx_xy, idx_uv, bd_xy, bd_uv
@pytest.fixture(scope="module")
def tiny_zero_data():
""" Return a tuple of (xy, uv, wxy, wuv)"""
return np.zeros((3, 2)), np.zeros((3, 2)), np.zeros(3), np.zeros(3)
@pytest.fixture(scope="function", params=[
linearfit.fit_shifts, linearfit.fit_rscale, linearfit.fit_general
])
def fit_functions(request):
return request.param
def test_build_fit_matrix_identity():
i = np.identity(2)
r = linearfit.build_fit_matrix(0) # also test that default scale value = 1
assert np.allclose(i, r, rtol=0, atol=_ATOL)
r = linearfit.build_fit_matrix((0, 0), (1, 1))
assert np.allclose(i, r, rtol=0, atol=_ATOL)
@pytest.mark.parametrize('rot', [1, 35, 75, 95, 155, 189, 261, 299, 358])
def test_build_fit_matrix_rot(rot):
i = np.identity(2)
m = linearfit.build_fit_matrix(rot)
minv = linearfit.build_fit_matrix(360 - rot)
assert np.allclose(i, np.dot(m, minv), rtol=0, atol=_ATOL)
@pytest.mark.parametrize('rot, scale', [
((1, 4), (2.4, 5.6)),
((31, 78), (0.9, 1.3)),
])
def test_build_fit_matrix_generalized(rot, scale):
i = np.identity(2)
m = linearfit.build_fit_matrix(rot, scale)
# check scale:
assert np.allclose(np.sqrt(np.sum(m**2, axis=0)), scale,
rtol=0, atol=_ATOL)
ms = np.diag(scale)
# check rotations:
mr = linearfit.build_fit_matrix(rot, 1)
mrinv = linearfit.build_fit_matrix(rot[::-1], 1).T
assert np.allclose(np.linalg.det(mr) * i, np.dot(mr, mrinv),
rtol=0, atol=_ATOL)
assert np.allclose(m, np.dot(mr, ms), rtol=0, atol=_ATOL)
@pytest.mark.parametrize('uv, xy, wuv, wxy', [
(np.zeros(10), np.zeros(10), None, None),
(np.zeros((10, 2, 2)), np.zeros(10), None, None),
(np.zeros((10, 2)), np.zeros((11, 2)), None, None),
3 * (np.zeros((10, 2)), ) + (None, ),
2 * (np.zeros((10, 2)), ) + (None, np.zeros((10, 2))),
2 * (np.zeros((10, 2)), ) + (None, np.zeros((5, 2))),
2 * (np.zeros((10, 2)), ) + (np.zeros((5, 2)), None),
])
def test_iter_linear_fit_invalid_shapes(uv, xy, wuv, wxy):
# incorrect coordinate array dimensionality:
with pytest.raises(ValueError):
linearfit.iter_linear_fit(xy, uv, wxy=wxy, wuv=wuv)
@pytest.mark.parametrize('nclip, sigma', [
(3, None), (-3, None), (3, -1), (-1, 3), (3, (1.0, 'invalid')),
])
def test_iter_linear_fit_invalid_sigma_nclip(ideal_small_data, nclip, sigma):
uv, xy, _, _ = ideal_small_data
with pytest.raises(ValueError):
linearfit.iter_linear_fit(xy, uv, nclip=nclip, sigma=sigma)
def test_iter_linear_fit_invalid_fitgeom(ideal_small_data):
uv, xy, _, _ = ideal_small_data
with pytest.raises(ValueError):
linearfit.iter_linear_fit(xy, uv, fitgeom='invalid')
@pytest.mark.parametrize('nclip, sigma, clip_accum, weights, noise', [
(None, 2, True, False, False),
(None, 2, True, True, False),
(2, 0.05, False, True, True),
])
def test_iter_linear_fit_special_cases(ideal_large_data, nclip, sigma,
clip_accum, weights, noise):
uv, xy, _, _, shift, rmat, _, _, fitgeom = ideal_large_data
if weights:
wxy, wuv = 0.1 + 0.9 * np.random.random((2, xy.shape[0]))
else:
wxy = None
wuv = None
if noise:
xy = xy + np.random.normal(0, 0.01, xy.shape)
atol = 0.01
else:
atol = _ATOL
fit = linearfit.iter_linear_fit(xy, uv, wxy, wuv, fitgeom=fitgeom,
nclip=nclip, center=(0, 0), sigma=1,
clip_accum=clip_accum)
assert np.allclose(fit['shift'], shift, rtol=0, atol=atol)
assert np.allclose(fit['matrix'], rmat, rtol=0, atol=atol)
@pytest.mark.parametrize('weights', [False, True])
def test_iter_linear_fit_1point(weights):
xy = np.array([[1.0, 2.0]])
shifts = 20 * (np.random.random(2) - 0.5)
if weights:
wxy, wuv = 0.1 + 0.9 * np.random.random((2, xy.shape[0]))
else:
wxy, wuv = None, None
fit = linearfit.iter_linear_fit(xy, xy + shifts, wxy=wxy, wuv=wuv,
fitgeom='shift', nclip=0)
assert np.allclose(fit['shift'], -shifts, rtol=0, atol=_ATOL)
assert np.allclose(fit['matrix'], np.identity(2), rtol=0, atol=_ATOL)
def test_iter_linear_fit_fitgeom_clip_all_data(ideal_large_data):
# Test that clipping is interrupted if number of sources after clipping
# is below minobj for a given fit:
xy, uv, _, _, _, _, _, _, fitgeom = ideal_large_data
ndata = xy.shape[0]
uv = uv + np.random.normal(0, 0.01, (ndata, 2))
wxy, wuv = 0.1 + 0.9 * np.random.random((2, ndata))
fit = linearfit.iter_linear_fit(
xy, uv, wxy, wuv, fitgeom=fitgeom, sigma=1e-50, nclip=100
)
assert np.count_nonzero(fit['fitmask']) == len(xy)
assert fit['eff_nclip'] == 0
def test_compute_stat_invalid_weights(ideal_small_data):
pts, _, _, _ = ideal_small_data
weights = np.zeros(pts.shape[0])
fit = {}
linearfit._compute_stat(fit, pts, weights)
assert math.isnan(fit['rmse'])
assert math.isnan(fit['mae'])
assert math.isnan(fit['std'])
@pytest.mark.parametrize('fit_function', [
linearfit.fit_rscale, linearfit.fit_general,
])
def test_fit_detect_colinear_points(fit_function, tiny_zero_data):
xy, uv, _, _ = tiny_zero_data
xy = xy + [1, 2]
with pytest.raises(linearfit.SingularMatrixError):
fit_function(xy, uv)
def test_fit_detect_zero_weights(fit_functions, tiny_zero_data):
xy, uv, wxy, _ = tiny_zero_data
# all weights are zero:
with pytest.raises(ValueError):
fit_functions(xy, uv, wxy=wxy)
def test_fit_detect_negative_weights(fit_functions, tiny_zero_data):
xy, uv, wuv, _ = tiny_zero_data
wuv.copy()
wuv[0] = -1
# some weights are negative (=invalid):
with pytest.raises(ValueError):
fit_functions(xy, uv, wuv=wuv)
@pytest.mark.parametrize('fit_function, npts', [
(linearfit.fit_shifts, 0),
(linearfit.fit_rscale, 1),
(linearfit.fit_general, 2),
])
def test_fit_general_too_few_points(fit_function, npts):
with pytest.raises(linearfit.NotEnoughPointsError):
fit_function(np.zeros((npts, 2)), np.zeros((npts, 2)))
@pytest.mark.parametrize(
'clip_accum, noise',
[v for v in product(*(2 * [[False, True]]))]
)
def test_iter_linear_fit_clip_style(ideal_large_data, weight_data,
clip_accum, noise):
""" Test clipping behavior. Test that weights exclude "bad" data. """
uv, xy, angle, scale, shift, rmat, proper, skew, fitgeom = ideal_large_data
wxy, wuv, idx_xy, idx_uv, bd_xy, bd_uv = weight_data
noise_sigma = 0.01
npts = xy.shape[0]
# add noise to data
if noise:
xy = xy + np.random.normal(0, noise_sigma, (npts, 2))
atol = 10 * noise_sigma
nclip = 3
else:
atol = _ATOL
nclip = 0
if wxy is not None:
xy[idx_xy] += bd_xy
if wuv is not None:
uv = uv.copy()
uv[idx_uv] += bd_uv
fit = linearfit.iter_linear_fit(
xy, uv, wxy=wxy, wuv=wuv, fitgeom=fitgeom, sigma=2,
clip_accum=clip_accum, nclip=nclip
)
shift_with_center = np.dot(rmat, fit['center']) - fit['center'] + shift
assert np.allclose(fit['shift'], shift_with_center, rtol=0, atol=atol)
assert np.allclose(fit['matrix'], rmat, rtol=0, atol=atol)
assert np.allclose(fit['rmse'], 0, rtol=0, atol=atol)
assert np.allclose(fit['mae'], 0, rtol=0, atol=atol)
assert np.allclose(fit['std'], 0, rtol=0, atol=atol)
assert fit['proper'] == proper
if nclip:
assert fit['eff_nclip'] > 0
assert fit['fitmask'].sum(dtype=np.int) < npts
else:
assert fit['eff_nclip'] == 0
assert (fit['fitmask'].sum(dtype=np.int) == npts -
np.union1d(idx_xy[0], idx_uv[0]).size)
```
#### File: tweakwcs/tests/test_multichip_fitswcs.py
```python
import numpy as np
from astropy.io import fits
from astropy import table
from astropy import wcs
from astropy.utils.data import get_pkg_data_filename
import tweakwcs
def _match(x, y):
lenx = len(x)
leny = len(y)
if lenx == leny:
return (np.arange(lenx), np.arange(leny))
elif lenx < leny:
lenx, leny = leny, lenx
x, y = y, x
match = (np.arange(leny) + (0 if y.meta['name'] == 'ext1' else leny),
np.arange(leny))
return match
def test_multichip_fitswcs_alignment():
h1 = fits.Header.fromfile(get_pkg_data_filename('data/wfc3_uvis1.hdr'))
w1 = wcs.WCS(h1)
imcat1 = tweakwcs.FITSWCS(w1)
imcat1.meta['catalog'] = table.Table.read(
get_pkg_data_filename('data/wfc3_uvis1.cat'),
format='ascii.csv',
delimiter=' ',
names=['x', 'y']
)
imcat1.meta['group_id'] = 1
imcat1.meta['name'] = 'ext1'
h2 = fits.Header.fromfile(get_pkg_data_filename('data/wfc3_uvis2.hdr'))
w2 = wcs.WCS(h2)
imcat2 = tweakwcs.FITSWCS(w2)
imcat2.meta['catalog'] = table.Table.read(
get_pkg_data_filename('data/wfc3_uvis2.cat'),
format='ascii.csv',
delimiter=' ',
names=['x', 'y']
)
imcat2.meta['group_id'] = 1
imcat2.meta['name'] = 'ext4'
refcat = table.Table.read(
get_pkg_data_filename('data/ref.cat'),
format='ascii.csv', delimiter=' ',
names=['RA', 'DEC']
)
tweakwcs.align_wcs([imcat1, imcat2], refcat, match=_match, nclip=None,
sigma=3, fitgeom='general')
fi1 = imcat1.meta['fit_info']
fi2 = imcat2.meta['fit_info']
w1 = imcat1.wcs
w2 = imcat2.wcs
assert np.allclose(w1.wcs.crval, (83.206917667519, -67.73275818507248), rtol=0)
assert np.allclose(
w1.wcs.cd,
np.array(
[
[3.93222694902149e-06, -1.0106698270131359e-05],
[-1.0377001075437075e-05, -4.577945148472431e-06]
]),
atol=0.0,
rtol=1e-8
)
assert np.allclose(w2.wcs.crval, (83.15167050722597, -67.74220306069903), rtol=0)
assert np.allclose(
w2.wcs.cd,
np.array(
[
[3.834449806681195e-06, -9.996495217498745e-06],
[-1.0348147451241423e-05, -4.503496019301529e-06]
]),
atol=0.0,
rtol=1e-8
)
assert np.allclose(fi1['<scale>'], 1.0025, rtol=0, atol=2e-8)
assert np.allclose(fi2['<scale>'], 1.0025, rtol=0, atol=2e-8)
assert fi1['rmse'] < 5e-5
assert fi2['rmse'] < 5e-5
cat1 = imcat1.meta['catalog']
ra1, dec1 = w1.all_pix2world(cat1['x'], cat1['y'], 0)
cat2 = imcat2.meta['catalog']
ra2, dec2 = w2.all_pix2world(cat2['x'], cat2['y'], 0)
ra = np.concatenate([ra1, ra2])
dec = np.concatenate([dec1, dec2])
rmse_ra = np.sqrt(np.mean((ra - refcat['RA'])**2))
rmse_dec = np.sqrt(np.mean((dec - refcat['DEC'])**2))
assert rmse_ra < 1e-9
assert rmse_dec < 1e-9
``` |
{
"source": "jhunkeler/webbpsf",
"score": 2
} |
#### File: webbpsf/webbpsf/utils.py
```python
from collections import OrderedDict
import os, sys
import astropy.io.fits as fits
from astropy.nddata import NDData
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as sciint
import logging
_log = logging.getLogger('webbpsf')
from . import conf
_DISABLE_FILE_LOGGING_VALUE = 'none'
_Strehl_perfect_cache = {} # dict for caching perfect images used in Strehl calcs.
### Helper routines for logging: ###
class FilterLevelRange(object):
def __init__(self, min_level, max_level):
self.min_level = min_level
self.max_level = max_level
def filter(self, record):
if record.levelno >= self.min_level and record.levelno <= self.max_level:
return 1
else:
return 0
def restart_logging(verbose=True):
"""Restart logging using the same settings as the last WebbPSF
session, as stored in the configuration system.
Parameters
----------
verbose : boolean
Should this function print the new logging targets to
standard output?
"""
level = str(conf.logging_level).upper()
lognames = ['webbpsf', 'poppy']
root_logger = logging.getLogger()
root_logger.handlers = []
if level in ['DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']:
level_id = getattr(logging, level) # obtain one of the DEBUG, INFO, WARN,
# or ERROR constants
if verbose:
print("WebbPSF log messages of level {0} and above will be shown.".format(level))
elif level == 'NONE':
root_logger.handlers = [] # n.b. this will clear any handlers other libs/users configured
return
else:
raise ValueError("Invalid logging level: {}".format(level))
for name in lognames:
logger = logging.getLogger(name)
logger.setLevel(level_id)
# set up screen logging
stdout_handler = logging.StreamHandler(stream=sys.stdout)
stdout_handler.addFilter(FilterLevelRange(
min_level=logging.DEBUG,
max_level=logging.INFO
))
stderr_handler = logging.StreamHandler(stream=sys.stderr)
stderr_handler.addFilter(FilterLevelRange(
min_level=logging.WARNING,
max_level=logging.CRITICAL
))
formatter = logging.Formatter(conf.logging_format_screen)
stderr_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
root_logger.addHandler(stdout_handler)
root_logger.addHandler(stderr_handler)
if verbose:
print("WebbPSF log outputs will be directed to the screen.")
# set up file logging
filename = conf.logging_filename
if filename is None or filename.strip().lower() != _DISABLE_FILE_LOGGING_VALUE:
hdlr = logging.FileHandler(filename)
formatter = logging.Formatter(conf.logging_format_file)
hdlr.setFormatter(formatter)
root_logger.addHandler(hdlr)
if verbose:
print("WebbPSF log outputs will also be saved to file {}".format(filename))
def setup_logging(level='INFO', filename=None):
"""Allows selection of logging detail and output locations
(screen and/or file)
This is a convenience wrapper to Python's built-in logging package,
as used by `webbpsf` and `poppy`. By default, this sets up log
messages to be written to the screen, but the user can also
request logging to a file.
Editing the WebbPSF config file to set `autoconfigure_logging = True`
(and any of the logging settings you wish to persist) instructs
WebbPSF to apply your settings on import. (This is not
done by default in case you have configured `logging` yourself
and don't wish to overwrite your configuration.)
For more advanced log handling, see the Python logging module's
own documentation.
Parameters
-------------
level : str
Name of log output to show. Defaults to 'INFO', set to 'DEBUG'
for more extensive messages, or to 'WARN' or 'ERROR' for fewer.
filename : str, optional
Filename to write the log output to. If not set, output will
just be displayed on screen. (Default: None)
Examples
-----------
>>> webbpsf.setup_logging(filename='webbpsflog.txt')
This will save all log messages to 'webbpsflog.txt' in the current
directory. If you later start another copy of webbpsf in a
different directory, that session will also write to
'webbpsflog.txt' in *that* directory. Alternatively you can specify
a fully qualified absolute path to save all your logs to one
specific file.
>>> webbpsf.setup_logging(level='WARN')
This will show only WARNING or ERROR messages on screen, and not
save any logs to files at all (since the filename argument is None)
"""
# implementation note: All this function actually does is apply the
# defaults into the configuration system, then calls restart_logging to
# do the actual work.
level = str(level).upper()
# The astropy config system will enforce the limited set of values for the logging_level
# parameter by raising a TypeError on this next line if we feed in an invalid string.
conf.logging_level = level
if filename is None:
# Use the string 'none' as a sentinel value for astropy.config
filename = _DISABLE_FILE_LOGGING_VALUE
conf.logging_filename = filename
restart_logging(verbose=True)
### Helper routines for data handling and system setup: ###
MISSING_WEBBPSF_DATA_MESSAGE = """
*********** ERROR ****** ERROR ****** ERROR ****** ERROR ***********
* *
* WebbPSF requires several data files to operate. *
* These files could not be located automatically at this time, or this *
* version of the software requires a newer set of reference files than *
* you have installed. For more details see: *
* *
* https://webbpsf.readthedocs.io/en/stable/installation.html *
* *
* under "Installing the Required Data Files". *
* WebbPSF will not be able to function properly until the appropriate *
* reference files have been downloaded to your machine and installed. *
* *
****************************************************************************
"""
def get_webbpsf_data_path(data_version_min=None, return_version=False):
"""Get the WebbPSF data path
Simply checking an environment variable is not always enough, since
for packaging this code as a Mac .app bundle, environment variables are
not available since .apps run outside the Terminal or X11 environments.
Therefore, check first the environment variable WEBBPSF_PATH, and secondly
check the configuration file in the user's home directory.
If data_version_min is supplied (as a 3-tuple of integers), it will be
compared with the version number from version.txt in the WebbPSF data
package.
"""
import os
path_from_config = conf.WEBBPSF_PATH # read from astropy configuration
if path_from_config == 'from_environment_variable':
path = os.getenv('WEBBPSF_PATH')
if path is None:
sys.stderr.write(MISSING_WEBBPSF_DATA_MESSAGE)
raise EnvironmentError("Environment variable $WEBBPSF_PATH is not set!")
else:
path = path_from_config
# at minimum, the path must be a valid directory
if not os.path.isdir(path):
sys.stderr.write(MISSING_WEBBPSF_DATA_MESSAGE)
raise IOError("WEBBPSF_PATH ({}) is not a valid directory path!".format(path))
if data_version_min is not None:
# Check if the data in WEBBPSF_PATH meet the minimum data version
version_file_path = os.path.join(path, 'version.txt')
try:
with open(version_file_path) as f:
version_contents = f.read().strip()
# keep only first 3 elements for comparison (allows "0.3.4.dev" or similar)
parts = version_contents.split('.')[:3]
version_tuple = tuple(map(int, parts))
except (IOError, ValueError):
sys.stderr.write(MISSING_WEBBPSF_DATA_MESSAGE)
raise EnvironmentError(
"Couldn't read the version number from {}. (Do you need to update the WebbPSF "
"data? See https://webbpsf.readthedocs.io/en/stable/installation.html#data-install "
"for a link to the latest version.)".format(version_file_path)
)
if not version_tuple >= data_version_min:
sys.stderr.write(MISSING_WEBBPSF_DATA_MESSAGE)
raise EnvironmentError(
"WebbPSF data package has version {cur}, but {min} is needed. "
"See https://webbpsf.readthedocs.io/en/stable/installation.html#data-install "
"for a link to the latest version.".format(
cur=version_contents,
min='{}.{}.{}'.format(*data_version_min)
)
)
if return_version:
return (path, version_contents)
return path
DIAGNOSTIC_REPORT = """
OS: {os}
CPU: {cpu}
Python version: {python}
numpy version: {numpy}
scipy version: {scipy}
astropy version: {astropy}
pysynphot version: {pysyn}
numexpr version: {numexpr}
pyFFTW version: {pyfftw}
Anaconda Accelerate version: {accelerate}
poppy version: {poppy}
webbpsf version: {webbpsf}
tkinter version: {tkinter}
wxpython version: {wxpython}
***************************************************************
Floating point type information for numpy.float:
{finfo_float}
Floating point type information for numpy.complex:
{finfo_complex}
***************************************************************
Numpy compilation and linking:
{numpyconfig}
"""
def system_diagnostic():
""" return various helpful/informative information about the
current system. For instance versions of python & available packages.
Mostly undocumented function...
"""
# There is probably a more clever way to do the following via introspection?
import platform
import os
import poppy
import numpy
import scipy
from .version import version
try:
import ttk
ttk_version = ttk.__version__
except ImportError:
ttk_version = 'not found'
try:
import wx
wx_version = wx.__version__
except ImportError:
wx_version = 'not found'
try:
import pyfftw
pyfftw_version = pyfftw.version
except ImportError:
pyfftw_version = 'not found'
try:
import pysynphot
pysynphot_version = pysynphot.__version__
except ImportError:
pysynphot_version = 'not found'
try:
import astropy
astropy_version = astropy.__version__
except ImportError:
astropy_version = 'not found'
try:
import numexpr
numexpr_version = numexpr.__version__
except ImportError:
numexpr_version = 'not found'
try:
import accelerate
accelerate_version = accelerate.__version__
except ImportError:
accelerate_version = 'not found'
try:
import psutil
cpu_info = """
Hardware cores: {hw}
Logical core: {logical}
Frequency: {freq} GHz
Currently {percent}% utilized.
""".format(hw=psutil.cpu_count(logical=False),
logical=psutil.cpu_count(logical=True),
freq=psutil.cpu_freq()[0] / 1000,
percent=psutil.cpu_percent())
except:
try:
import multiprocessing
cpu_info = " Cores: {}".format(multiprocessing.cpu_count())
except:
cpu_info = "No CPU info available"
# Get numpy config - the following is a modified version of
# numpy.__config__.show()
numpyconfig = ""
for name, info_dict in numpy.__config__.__dict__.items():
if name[0] == "_" or type(info_dict) is not type({}): continue
numpyconfig += name + ":\n"
if not info_dict:
numpyconfig += " NOT AVAILABLE\n"
for k, v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
numpyconfig += " %s = %s\n" % (k, v)
result = DIAGNOSTIC_REPORT.format(
os=platform.platform(),
numpy=numpy.__version__,
python=sys.version.replace("\n", " "),
poppy=poppy.__version__,
webbpsf=version,
tkinter=ttk_version,
wxpython=wx_version,
pyfftw=pyfftw_version,
pysyn=pysynphot_version,
astropy=astropy_version,
finfo_float=numpy.finfo(numpy.float),
finfo_complex=numpy.finfo(numpy.complex),
numexpr=numexpr_version,
scipy=scipy.__version__,
accelerate=accelerate_version,
numpyconfig=numpyconfig,
cpu=cpu_info
)
return result
### Helper routines for image manipulation: ###
def measure_strehl(HDUlist_or_filename=None, ext=0, slice=0, center=None, display=True, verbose=True, cache_perfect=False):
""" Estimate the Strehl ratio for a PSF.
This requires computing a simulated PSF with the same
properties as the one under analysis.
Note that this calculation will not be very accurate unless both PSFs are well sampled,
preferably several times better than Nyquist. See
`Roberts et al. 2004 SPIE 5490 <http://adsabs.harvard.edu/abs/2004SPIE.5490..504R>`_
for a discussion of the various possible pitfalls when calculating Strehl ratios.
WARNING: This routine attempts to infer how to calculate a perfect reference
PSF based on FITS header contents. It will likely work for simple direct imaging
cases with WebbPSF but will not work (yet) for more complicated cases such as
coronagraphy, anything with image or pupil masks, etc. Code contributions to add
such cases are welcomed.
Parameters
----------
HDUlist_or_filename : string
Either a fits.HDUList object or a filename of a FITS file on disk
ext : int
Extension in that FITS file
slice : int, optional
If that extension is a 3D datacube, which slice (plane) of that datacube to use
center : tuple
center to compute around. Default is image center. If the center is on the
crosshairs between four pixels, then the mean of those four pixels is used.
Otherwise, if the center is in a single pixel, then that pixel is used.
verbose, display : bool
control whether to print the results or display plots on screen.
cache_perfect : bool
use caching for perfect images? greatly speeds up multiple calcs w/ same config
Returns
---------
strehl : float
Strehl ratio as a floating point number between 0.0 - 1.0
"""
from .webbpsf_core import Instrument
from poppy import display_psf
if isinstance(HDUlist_or_filename, str):
HDUlist = fits.open(HDUlist_or_filename)
elif isinstance(HDUlist_or_filename, fits.HDUList):
HDUlist = HDUlist_or_filename
else:
raise ValueError("input must be a filename or HDUlist")
image = HDUlist[ext].data
header = HDUlist[ext].header
if image.ndim >= 3: # handle datacubes gracefully
image = image[slice, :, :]
if center is None:
# get exact center of image
# center = (image.shape[1]/2, image.shape[0]/2)
center = tuple((a - 1) / 2.0 for a in image.shape[::-1])
# Compute a comparison image
_log.info("Now computing image with zero OPD for comparison...")
inst = Instrument(header['INSTRUME'])
inst.filter = header['FILTER']
inst.pupilopd = None # perfect image
inst.include_si_wfe = False # perfect image
inst.pixelscale = header['PIXELSCL'] * header['OVERSAMP'] # same pixel scale pre-oversampling
cache_key = (header['INSTRUME'], header['FILTER'], header['PIXELSCL'], header['OVERSAMP'], header['FOV'], header['NWAVES'])
try:
comparison_psf = _Strehl_perfect_cache[cache_key]
except KeyError:
comparison_psf = inst.calc_psf(fov_arcsec=header['FOV'], oversample=header['OVERSAMP'], nlambda=header['NWAVES'])
if cache_perfect: _Strehl_perfect_cache[cache_key] = comparison_psf
comparison_image = comparison_psf[0].data
if (int(center[1]) == center[1]) and (int(center[0]) == center[0]):
# individual pixel
meas_peak = image[center[1], center[0]]
ref_peak = comparison_image[center[1], center[0]]
else:
# average across a group of 4
bot = [int(np.floor(f)) for f in center]
top = [int(np.ceil(f) + 1) for f in center]
meas_peak = image[bot[1]:top[1], bot[0]:top[0]].mean()
ref_peak = comparison_image[bot[1]:top[1], bot[0]:top[0]].mean()
strehl = (meas_peak / ref_peak)
if display:
plt.clf()
plt.subplot(121)
display_PSF(HDUlist, title="Observed PSF")
plt.subplot(122)
display_PSF(comparison_psf, title="Perfect PSF")
plt.gcf().suptitle("Strehl ratio = %.3f" % strehl)
if verbose:
print("Measured peak: {0:.3g}".format(meas_peak))
print("Reference peak: {0:.3g}".format(ref_peak))
print(" Strehl ratio: {0:.3f}".format(strehl))
return strehl
### Helper routines for display customization: ###
# use via poppy's display_annotate feature by assigning these to
# the display_annotate attribute of an OpticalElement class
def annotate_ote_entrance_coords(self, ax):
""" Draw OTE V frame axes on first optical plane """
color = 'yellow'
loc = 3
ax.arrow(-loc, -loc, .2, 0, color=color, width=0.005)
ax.arrow(-loc, -loc, 0, .2, color=color, width=0.005)
ax.text(-loc, -loc + 0.4, '+V3', color=color, size='small',
horizontalalignment='center', verticalalignment='bottom')
ax.text(-loc + 0.4, -loc, '+V2', color=color, size='small',
horizontalalignment='left', verticalalignment='center')
def annotate_sky_pupil_coords(self, ax, show_NE=False, north_angle=45.):
""" Draw OTE V frame axes projected onto the sky
Optionally also draw a compass for north and east at some given
position angle
"""
color = 'yellow'
loc = 2.9
ax.arrow(-loc + 0.5, -loc, -.2, 0, color=color, width=0.005)
ax.arrow(-loc + 0.5, -loc, 0, .2, color=color, width=0.005)
ax.text(-loc + 0.5, -loc + 0.3, '+V3 on sky', color=color, size='small',
horizontalalignment='center', verticalalignment='bottom')
ax.text(-loc + 0.5 + 0.3, -loc, '+V2 on sky', color=color, size='small',
horizontalalignment='left', verticalalignment='center')
if show_NE:
color2 = 'cyan'
angle = np.deg2rad(north_angle) # arbitrary
dl = 0.3
dx = np.sin(angle) * dl
dy = np.cos(angle) * dl
ax.arrow(-loc + 0.5, -loc, -dx, dy, color=color2, width=0.005)
ax.arrow(-loc + 0.5, -loc, -dy, -dx, color=color2, width=0.005)
ax.text(-loc + 0.5 - 2.3 * dx, -loc + 2.3 * dy, 'N', color=color2, size='small',
horizontalalignment='center', verticalalignment='center')
ax.text(-loc + 0.5 - 1.3 * dy, -loc - 1.3 * dx, 'E', color=color2, size='small',
horizontalalignment='center', verticalalignment='center')
def _run_benchmark(timer, iterations=1):
""" Common benchmarking core. Called from benchmark_imaging and benchmark_coronagraphy
"""
import poppy
defaults = (poppy.conf.use_fftw, poppy.conf.use_numexpr, poppy.conf.use_cuda, poppy.conf.use_opencl)
# Time baseline performance in numpy
print("Timing performance in plain numpy:")
poppy.conf.use_fftw, poppy.conf.use_numexpr, poppy.conf.use_cuda, poppy.conf.use_opencl = (False, False, False, False)
time_numpy = timer.timeit(number=iterations) / iterations
print(" {:.2f} s".format(time_numpy))
if poppy.accel_math._FFTW_AVAILABLE:
print("Timing performance with FFTW:")
poppy.conf.use_fftw = True
time_fftw = timer.timeit(number=iterations) / iterations
print(" {:.2f} s".format(time_fftw))
else:
time_fftw = np.NaN
if poppy.accel_math._NUMEXPR_AVAILABLE:
print("Timing performance with Numexpr:")
poppy.conf.use_fftw = False
poppy.conf.use_numexpr = True
time_numexpr = timer.timeit(number=iterations) / iterations
print(" {:.2f} s".format(time_numexpr))
else:
time_numexpr = np.NaN
if poppy.accel_math._CUDA_AVAILABLE:
print("Timing performance with CUDA + Numexpr:")
poppy.conf.use_cuda = True
poppy.conf.use_opencl = False
time_cuda = timer.timeit(number=iterations) / iterations
print(" {:.2f} s".format(time_cuda))
else:
time_cuda = np.NaN
if poppy.accel_math._OPENCL_AVAILABLE:
print("Timing performance with OpenCL + Numexpr:")
poppy.conf.use_opencl = True
poppy.conf.use_cuda = False
time_opencl = timer.timeit(number=iterations) / iterations
print(" {:.2f} s".format(time_opencl))
else:
time_opencl = np.NaN
poppy.conf.use_fftw, poppy.conf.use_numexpr, poppy.conf.use_cuda, poppy.conf.use_opencl = defaults
return {'numpy': time_numpy,
'fftw': time_fftw,
'numexpr': time_numexpr,
'cuda': time_cuda,
'opencl': time_opencl}
def benchmark_imaging(iterations=1, nlambda=1, add_distortion=True):
""" Performance benchmark function for standard imaging """
import poppy
import timeit
timer = timeit.Timer("psf = nc.calc_psf(nlambda=nlambda, add_distortion={})".format(add_distortion),
setup="""
import webbpsf
nc = webbpsf.NIRCam()
nc.filter='F360M'
nlambda={nlambda:d}""".format(nlambda=nlambda))
print("Timing performance of NIRCam F360M with {} wavelengths, {} iterations".format(nlambda, iterations))
return _run_benchmark(timer, iterations=iterations)
def benchmark_nircam_coronagraphy(iterations=1, nlambda=1, add_distortion=True):
""" Performance benchmark function for standard imaging """
import poppy
import timeit
timer = timeit.Timer("psf = nc.calc_psf(nlambda=nlambda, add_distortion={})".format(add_distortion),
setup="""
import webbpsf
nc = webbpsf.NIRCam()
nc.filter='F335M'
nc.image_mask='MASK335R'
nc.pupil_mask='MASKRND'
nlambda={nlambda:d}""".format(nlambda=nlambda))
print("Timing performance of NIRCam MASK335R with {} wavelengths, {} iterations".format(nlambda, iterations))
return _run_benchmark(timer, iterations=iterations)
def benchmark_miri_coronagraphy(iterations=1, nlambda=1):
""" Performance benchmark function for standard imaging """
import poppy
import timeit
timer = timeit.Timer("psf = miri.calc_psf(nlambda=nlambda)",
setup="""
import webbpsf
miri = webbpsf.MIRI()
miri.filter='F1065C'
miri.image_mask='FQPM1065'
miri.pupil_mask='MASKFQPM'
nlambda={nlambda:d}""".format(nlambda=nlambda))
print("Timing performance of MIRI F1065C with {} wavelengths, {} iterations".format(nlambda, iterations))
return _run_benchmark(timer, iterations=iterations)
def combine_docstrings(cls):
""" Combine the docstrings of a method and earlier implementations of the same method in parent classes """
for name, func in cls.__dict__.items():
# Allow users to see the Poppy calc_psf docstring along with the JWInstrument version
if name == 'calc_psf':
jwinstrument_class = cls
spacetelescope_class = cls.__base__
ind0 = getattr(jwinstrument_class, 'calc_psf').__doc__.index("add_distortion") # pull the new parameters
ind1 = getattr(spacetelescope_class, 'calc_psf').__doc__.index("Returns") # end of parameters
func.__doc__ = getattr(spacetelescope_class, 'calc_psf').__doc__[0:ind1] + \
getattr(jwinstrument_class, 'calc_psf').__doc__[ind0:] + \
getattr(spacetelescope_class, 'calc_psf').__doc__[ind1:]
return cls
def to_griddedpsfmodel(HDUlist_or_filename=None, ext=0):
"""
Create a photutils GriddedPSFModel object from either a FITS file or
an HDUlist object. The input must have header keywords "DET_YX{}" and
"OVERSAMP" (will be present if psf_grid() is used to create the
file).
Parameters
----------
HDUlist_or_filename : string
Either a fits.HDUList object or a filename of a FITS file on disk
ext : int
Extension in that FITS file
Returns
-------
model : GriddedPSFModel
Photutils object with 3D data array and metadata with specified
grid_xypos and oversampling keys
"""
try:
from photutils import GriddedPSFModel
except ImportError:
raise ImportError("This method requires photutils >= 0.6")
if isinstance(HDUlist_or_filename, str):
HDUlist = fits.open(HDUlist_or_filename)
elif isinstance(HDUlist_or_filename, fits.HDUList):
HDUlist = HDUlist_or_filename
else:
raise ValueError('Input must be a filename or HDUlist')
data = HDUlist[ext].data
header = HDUlist[ext].header
# Check necessary keys are there
if not any("DET_YX" in key for key in header.keys()):
raise KeyError("You are missing 'DET_YX{}' keys: which are the detector locations of the PSFs")
if 'OVERSAMP' not in header.keys():
raise KeyError("You are missing 'OVERSAMP' key: which is the oversampling factor of the PSFs")
# Convert header to meta dict
header = header.copy(strip=True)
header.remove('COMMENT', remove_all=True)
header.remove('', remove_all=True)
meta = OrderedDict((a, (b, c)) for (a, b, c) in header.cards)
ndd = NDData(data, meta=meta, copy=True)
# Edit meta dictionary for GriddedPSFLibrary specifics
ndd.meta['grid_xypos'] = [((float(ndd.meta[key][0].split(',')[1].split(')')[0])),
(float(ndd.meta[key][0].split(',')[0].split('(')[1])))
for key in ndd.meta.keys() if "DET_YX" in key] # from (y,x) to (x,y)
if 'oversampling' not in ndd.meta:
ndd.meta['oversampling'] = ndd.meta['OVERSAMP'][0] # pull the value
# Remove keys with duplicate information
ndd.meta = {key.lower(): ndd.meta[key] for key in ndd.meta if 'DET_YX' not in key and 'OVERSAMP' not in key}
# Create model
model = GriddedPSFModel(ndd)
return model
``` |
{
"source": "jhunkeler/wiimatch",
"score": 2
} |
#### File: wiimatch/tests/test_utils.py
```python
import pytest
import numpy as np
from wiimatch import utils
def test_utils_coordinates():
image_shape = (3, 5, 4)
center = (0, 0, 0)
c = utils.create_coordinate_arrays(image_shape, center=center)
ind = np.indices(image_shape, dtype=np.float)[::-1]
assert np.allclose(c[0], ind, rtol=1.e-8, atol=1.e-12)
assert np.allclose(c[1], center, rtol=1.e-8, atol=1.e-12)
def test_utils_coordinates_no_center():
image_shape = (3, 5, 4)
c = utils.create_coordinate_arrays(image_shape, center=None)
ind = np.indices(image_shape, dtype=np.float)[::-1]
center = tuple(i // 2 for i in image_shape)
for orig, cc, i in zip(center, c[0], ind):
assert(i - orig, cc)
assert np.allclose(c[1], center, rtol=1.e-8, atol=1.e-12)
``` |
{
"source": "jhunken/auth0-python",
"score": 3
} |
#### File: v3/management/blacklists.py
```python
from .rest import RestClient
class Blacklists(object):
"""Auth0 blacklists endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
"""
def __init__(self, domain, token, telemetry=True):
self.url = 'https://{}/api/v2/blacklists/tokens'.format(domain)
self.client = RestClient(jwt=token, telemetry=telemetry)
def get(self, aud=None):
"""Retrieves the jti and aud of all tokens in the blacklist.
Args:
aud (str, optional): The JWT's aud claim. The client_id of the
application for which it was issued.
See: https://auth0.com/docs/api/management/v2#!/Blacklists/get_tokens
"""
params = {
'aud': aud
}
return self.client.get(self.url, params=params)
def create(self, jti, aud=''):
"""Adds a token to the blacklist.
Args:
jti (str): the jti of the JWT to blacklist.
aud (str, optional): The JWT's aud claim. The client_id of the
application for which it was issued.
body (dict):
See: https://auth0.com/docs/api/management/v2#!/Blacklists/post_tokens
"""
return self.client.post(self.url, data={'jti': jti, 'aud': aud})
``` |
{
"source": "jhuntwork/runsteps",
"score": 3
} |
#### File: tests/unit/test_Runner.py
```python
import glob
import io
import json
import os
import random
import shutil
import stat
import string
import tempfile
import unittest
import runsteps
def fill_directory(path, files):
"""Populate a directory with empty files
Args:
path: The name of a directory for storing the empty files
files: A dictionary where the keys are file names and the value is
a string of content to fill the file. None or False as values
will create the file without content and without the executable
bit.
Returns: A list of the full paths of added files
"""
paths = []
for file, executable in files.items():
fullpath = '/'.join([path, file])
open(fullpath, 'w').close()
if executable:
with open(fullpath, 'w') as newfile:
newfile.write(str(executable))
filestat = os.stat(fullpath)
os.chmod(fullpath, filestat.st_mode | stat.S_IXUSR)
paths.append(fullpath)
return paths
class RunnerTest(unittest.TestCase):
"""Tests for the runsteps.Runner Class"""
def setUp(self):
"""Set some state for each tests in this class"""
self.steps = ['ls', 'pwd']
self.keys = ['step', 'start_time', 'end_time', 'duration',
'return_code']
self.runner = runsteps.Runner()
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""clean up the state after each test"""
shutil.rmtree(self.tempdir, ignore_errors=True)
shutil.rmtree(self.runner.datadir, ignore_errors=True)
def test_init(self):
"""New instances of Runner will have initialized properties"""
self.assertEqual([], self.runner.history)
self.assertEqual([], self.runner._steps)
def test_init_creates_datadir_and_subdirs(self):
"""Ensure that the datadir and subdirs are created"""
shutil.rmtree(self.runner.datadir, ignore_errors=True)
runsteps.Runner()
self.assertTrue(os.path.isdir(self.runner.datadir))
self.assertTrue(os.path.isdir(self.runner.envdir))
self.assertTrue(os.path.isdir(self.runner.logdir))
def test_load_only_allows_lists(self):
"""New instances will raise an Exception if given a non-list object"""
steps = 'This is not a list'
# noinspection PyTypeChecker
with self.assertRaises((runsteps.RunnerException, SystemExit)):
self.runner.load(steps)
def test_run_history(self):
"""At the end of a run, the history property should exist and contain a
list of dicts describing the steps that were executed
"""
self.runner.load(self.steps)
self.runner.run()
num = len(self.steps)
self.assertEqual(num, len(self.runner.history))
for i in range(0, num):
self.assertEqual(self.steps[i], self.runner.history[i]['step'])
for key in self.keys:
self.assertTrue(key in self.runner.history[i])
def test_run_from_a_file_path(self):
"""Call run with the path specified as a file"""
steps = ['ls', 'pwd', 'env']
tempfile_name = '/'.join([self.tempdir, 'steps.json'])
with open(tempfile_name, 'w') as tmpfile:
json.dump(steps, tmpfile)
self.runner.load_from_path(tempfile_name)
self.runner.run()
self.assertEqual(steps, self.runner._steps)
def test_load_from_path_with_a_bad_path(self):
"""Call run with a bad path"""
# noinspection PyTypeChecker
with self.assertRaises((runsteps.RunnerException, SystemExit)):
self.runner.load_from_path('this_is_not_a_file')
def test_run_creates_logs(self):
"""Test that logfiles are created for each step with appropriate
output
"""
files = {'test1.sh': '#!/bin/sh\n'
'echo "blah"\n'}
out = io.StringIO()
fill_directory(self.tempdir, files)
self.runner.load_from_path(self.tempdir)
self.runner.run(out=out)
logfiles = glob.glob('{}/*test1.sh.log'.format(self.runner.logdir))
self.assertTrue(logfiles)
with open(logfiles[0], 'r') as logfile:
output_fromfile = logfile.read()
self.assertEqual(output_fromfile, out.getvalue())
self.assertEqual(output_fromfile, 'blah\n')
def test_run_without_logs(self):
"""run should not create log files when specified"""
self.runner.load(self.steps)
self.runner.run(logs=False)
self.assertFalse(glob.glob('{}/*.log'.format(self.runner.logdir)))
def test_run_halts_execution_when_a_command_fails(self):
"""run should not execute the rest of the steps when one fails"""
steps = ['true', 'false', 'ls']
self.runner.load(steps)
# noinspection PyTypeChecker
with self.assertRaises((runsteps.RunnerException, SystemExit)):
self.runner.run()
executed_steps = [x['step'] for x in self.runner.history]
self.assertEqual(['true', 'false'], executed_steps)
def test_run_inherit_env(self):
"""Test that the step receives existing environment variables when
inherit_env is true.
"""
val = ''.join(random.choice(string.ascii_lowercase) for _ in range(6))
os.putenv('RUNSTEPS_TESTENV_VAR', val)
self.runner.load(self.steps)
self.runner.run(inherit_env=True)
self.assertTrue('RUNSTEPS_TESTENV_VAR' in self.runner._env)
self.assertEqual(self.runner._env['RUNSTEPS_TESTENV_VAR'], val)
def test_run_saved_keypairs_are_preserved_as_environment_variables(self):
"""Test that environment variables stored as keypairs in the
RUNSTEPS_DATA directory are passed from step to step.
Newer steps can overwrite data from previous steps.
"""
files = {'test1': '#!/bin/sh\n'
'. "$RUNSTEPS_ENV/runsteps_helpers.sh"\n'
'runsteps_save_keypair RUNSTEPS_TESTING YES\n',
'test2': '#!/bin/sh\n'
'[ "$RUNSTEPS_TESTING" = "YES" ] || exit 1\n'
'. "$RUNSTEPS_ENV/runsteps_helpers.sh"\n'
'runsteps_save_keypair RUNSTEPS_TESTING NO\n'
'runsteps_save_keypair RUNSTEPS_TESTING MAYBE\n'}
fill_directory(self.tempdir, files)
self.runner.load_from_dir(self.tempdir)
self.runner.run()
self.assertTrue('RUNSTEPS_TESTING' in self.runner._env)
self.assertEqual(self.runner._env['RUNSTEPS_TESTING'], 'MAYBE')
def test_dryrun(self):
"""Test that a dryrun only prints output"""
self.runner.load(self.steps)
out = io.StringIO()
# noinspection PyTypeChecker
with self.assertRaises(SystemExit):
self.runner.dryrun(out=out)
self.assertFalse(self.runner.history)
output = json.loads(out.getvalue())
self.assertEqual(output, self.steps)
def test_dryrun_noout(self):
"""Test dryrun just exits if passed a bad out fd"""
self.runner.load(self.steps)
self.runner.dryrun(out=None)
def test_run_badfiles_are_handled_properly(self):
"""Test that encountering a bad executable raises RunnerException"""
steps = ['step1.sh', 'step2.sh', 'step3.sh']
tempfile_name = '/'.join([self.tempdir, 'steps.json'])
with open(tempfile_name, 'w') as tmpfile:
json.dump(steps, tmpfile)
self.runner.load_from_path(tempfile_name)
# noinspection PyTypeChecker
with self.assertRaises((runsteps.RunnerException, SystemExit)):
self.runner.run(logs=False)
self.assertTrue(len(self.runner.history) == 1)
self.assertTrue('step1.sh' == self.runner.history[0]['step'])
self.assertTrue(
self.runner.history[0]['return_code'] == 1)
def test_load_from_dir_fails_on_missing_directory(self):
"""load_from_dir should fail on missing directories"""
with self.assertRaises(IOError):
self.runner.load_from_dir('some fake directory')
def test_load_from_dir_populates_steps(self):
"""load_from_dir will fill self._steps with files found"""
files = {'one': True, 'two': True, 'three': True}
paths = fill_directory(self.tempdir, files)
self.runner.load_from_dir(self.tempdir)
for step in paths:
self.assertTrue(step in self.runner._steps)
def test_load_from_dir_excludes_non_executable_files(self):
"""load_from_dir should only find executable files"""
files = {'one': True, 'two': False, 'three': True}
badpath = '/'.join([self.tempdir, 'two'])
fill_directory(self.tempdir, files)
self.runner.load_from_dir(self.tempdir)
self.assertFalse(badpath in self.runner._steps)
self.assertEqual(len(self.runner._steps), 2)
def test_load_from_dir_sorts_order_alphabetically(self):
"""load_from_dir should choose order in a predictable way, alphabetically
ascending
"""
files = {'03_testing': True, 'aardvark': True, '01_ztesting': True}
expected = ['{}/01_ztesting'.format(self.tempdir),
'{}/03_testing'.format(self.tempdir),
'{}/aardvark'.format(self.tempdir)]
fill_directory(self.tempdir, files)
self.runner.load_from_dir(self.tempdir)
self.assertEqual(self.runner._steps, expected)
def test_load_from_file_populates_steps(self):
"""load_from_file will fill self._steps with data from a json file"""
steps = ['step1.sh', 'step2.sh', 'step3.sh']
tempfile_name = '/'.join([self.tempdir, 'steps.json'])
with open(tempfile_name, 'w') as tmpfile:
json.dump(steps, tmpfile)
self.runner.load_from_file(tempfile_name)
self.assertEqual(steps, self.runner._steps)
def test_print_history(self):
"""Test the print history method"""
self.runner.load(self.steps)
self.runner.run()
out = io.StringIO()
self.runner.print_history(out=out)
output = out.getvalue()
self.assertTrue(output is not None)
self.assertEqual(type(json.loads(output)), type([]))
def test_print_history_IOError(self):
"""Test that an exit occurs when the history file cannot be read"""
shutil.rmtree(self.runner.datadir)
# noinspection PyTypeChecker
with self.assertRaises((runsteps.RunnerException, SystemExit)):
self.runner.print_history()
def test_main_help(self):
"""Execute the main function with the help flag.
Currently this test is pretty meaningless.
"""
# noinspection PyTypeChecker
with self.assertRaises(SystemExit):
runsteps.main(args=['-h'])
def test_main_dryrun(self):
"""Execute the main function with the dryrun flag.
Currently this test is pretty meaningless.
"""
files = {'test1.sh': '#!/bin/sh\n'
'echo "blah"\n'}
fill_directory(self.tempdir, files)
# noinspection PyTypeChecker
with self.assertRaises(SystemExit):
runsteps.main(args=['-d', self.tempdir])
def test_main_verbose(self):
"""Execute the main function with the verbose flag.
Currently this test is pretty meaningless.
"""
files = {'test1.sh': '#!/bin/sh\n'
'echo "blah"\n'}
fill_directory(self.tempdir, files)
runsteps.main(args=['-v', self.tempdir])
def test_main_double_verbose(self):
"""Execute the main function with the verbose flag twice.
Currently this test is pretty meaningless.
"""
files = {'test1.sh': '#!/bin/sh\n'
'echo "blah"\n'}
fill_directory(self.tempdir, files)
runsteps.main(args=['-vv', self.tempdir])
``` |
{
"source": "jhunufernandes/iot",
"score": 3
} |
#### File: jhunufernandes/iot/teste.py
```python
import asyncio
import websockets
async def hello(uri):
async with websockets.connect(uri) as websocket:
await websocket.send("Hello world!")
asyncio.get_event_loop().run_until_complete(
hello('ws://ec2-18-225-10-77.us-east-2.compute.amazonaws.com:8765'))
``` |
{
"source": "jhunyee/misy350-s18-exercises",
"score": 2
} |
#### File: misy350-s18-exercises/songbase/manage.py
```python
from flask_script import Manager
from songbase import app, db, Artist
manager = Manager(app)
@manager.command
def deploy():
print "resetting database..."
db.drop_all()
db.create_all()
print "inserting initial data..."
coldplay = Artist(name="coldplay", about="this is coldplay")
maroon5 = Artist(name="maroon 5", about="this is maroon 5")
db.session.add(coldplay)
db.session.add(maroon5)
db.session.commit()
if __name__ == '__main__':
manager.run()
``` |
{
"source": "jhuo42/nand2tetris-solutions",
"score": 3
} |
#### File: nand2tetris-solutions/Scripts/assembler.py
```python
import re
import argparse
import os
class parseline(object):
""" Parses line to A- or C-type instruction.
Attributes:
line (str): line from asm, usually sanitized.
line_loc (int): optional line number specification.
symbolics (symboltable): optional reference to symbolic table
instance.
type (str): a_type or c_type instruction
binary (str(016b)): binary string of parsed instruction.
Todo:
"""
def __init__(self, line, line_loc=None, symbolics=None):
self.line = _sanitizeline(line)
self.line_loc = line_loc
self.type, self.binary = self.subclass(line, line_loc, symbolics)
def subclass(self, line, line_loc, symbolics):
""" Defines line type and parses to binary.
Code parsing can incorrectly succeed for instructions, e.g.:
'Memory=Address' will return line.binary=='1111110000000000'
Args:
line (str): line from asm, usually sanitized.
line_loc (int): optional line number specification.
symbolics (symboltable): optional reference to symbolic table
instance.
Returns:
type (str): instruction type identification as a_type or c_type.
binary (str(016b)): binary string of parsed instruction.
Raises:
ParseError of 'Unknown'-type when instruction fails all parsing.
"""
dest, comp, jmp = self.code_parse(line)
if line[0] == "@": # Address type instruction
return 'a_type', self.address_parse(line[1:], line_loc, symbolics)
elif comp: # Calculation type instruction
return 'c_type', comp + dest + jmp
# Following cases cover some poor entry line sanitization
elif line[0] == '(' and line[-1] == ')':
return None, ''
elif line == '':
return None, ''
else:
raise ParseError(line, line_loc)
return None, ''
def address_parse(self, line, line_loc, symbolics):
""" Parses address to binary value.
Resolves integer address to binary. If string and not in
symboltable class resolves to a new key, otherwise returns found
value.
Args:
line (str): line identified with @ as first character.
Can contain any characters, (usually) with whitespace and
comments removed.
line_loc (int): specifies line number. Accepts None.
symbolics (symboltable): reference to symbolic table instance.
Accepts None.
Returns:
binary (str(016b)): binary valued string of resolved address.
Raises:
ParseError when given a variable address with no symboltable.
"""
try:
binary = format(int(line), '016b')
except ValueError:
if symbolics is not None:
binary = symbolics.resolve(line)
else:
raise ParseError(line, self.line_loc, type="a-type")
binary = None
return binary
def code_parse(self, line):
"""Translates C-command type to binary representation.
Does not raise errors with failed parse, comp is not None
identifies succesful parse.
comp + dest + jmp creates a valid hack (016b) instruction.
Args:
line (str): Any string with possible c-type parsing.
Can contain any characters, (usually) with whitespace and
comments removed. Prefers sanitized strings.
Returns:
dest (str(010b)): binary representation of parsed destination
comp (str(03b)): binary representation of parsed computation
jmp (str(03b)): binary representation of parsed jump
"""
# Regular expression parsing, with longer matches first
# Re always succeeds at finding the matching instruction
_re_comp = (r"(D\|A|D&A|A-D|D-A|D\+A|A-1|D-1|A\+1|D\+1|"
r"D\|M|D&M|M-D|D-M|D\+M|M-1|M\+1|-M|!M|M|"
r"-A|-D|!A|!D|A|D|-1|1|0?)")
_re_dest = r"((AMD|AD|AM|MD|A|M|D)=)?"
_re_jmp = r"(;(JGT|JEQ|JGE|JLT|JNE|JLE|JMP))?"
_re_line = re.compile('{0}{1}{2}'.format(_re_dest, _re_comp, _re_jmp))
grouped_c = re.match(_re_line, line)
# C-command parsing dictionaries:
comp_table = {
'0': '1110101010', '1': '1110111111', '-1': '1110111010',
'D': '1110001100', 'A': '1110110000', '!D': '1110001101',
'!A': '1110110001', '-D': '1110001111', '-A': '1110110011',
'D+1': '1110011111', 'A+1': '1110110111', 'D-1': '1110001110',
'A-1': '1110110010', 'D+A': '1110000010', 'D-A': '1110010011',
'A-D': '1110000111', 'D&A': '1110000000', 'D|A': '1110010101',
'M': '1111110000', '!M': '1111110001', '-M': '1111110011',
'M+1': '1111110111', 'M-1': '1111110010', 'D+M': '1111000010',
'D-M': '1111010011', 'M-D': '1111000111', 'D&M': '1111000000',
'D|M': '1111010101'
}
dest_table = {
'None': '000', 'M': '001', 'D': '010', 'MD': '011',
'A': '100', 'AM': '101', 'AD': '110', 'AMD': '111'
}
jmp_table = {
'None': '000', 'JGT': '001', 'JEQ': '010', 'JGE': '011',
'JLT': '100', 'JNE': '101', 'JLE': '110', 'JMP': '111'
}
# Parse according to dictionaries
dest = dest_table[str(grouped_c.group(2))]
if grouped_c.group(3):
comp = comp_table[str(grouped_c.group(3))]
else:
comp = None
jmp = jmp_table[str(grouped_c.group(5))]
return dest, comp, jmp
class symboltable(object):
""" Maintains symbolic table of true label locations.
Attributes:
lines (list of str): List of (sanitized) commands with comments, empty
lines and spaces removed. Can be directly parsed.
table (dict of str: str(016b)): Dictionary of pre-initialized symbolic
label values.
used (int): Number of registries used, including 0-registry.
Todo:
*Check for exceeding memory space for variables, exception
"""
def __init__(self, lines):
self.lines = self._sanitizeasm(lines)
self.table = self.inittable(self.lines)
def __getitem__(self, i):
""" Defines instance[symbolic key] syntax for class """
return self.table[i]
def resolve(self, label):
""" Resolves label by returning dictionary value or creating new key.
New keys are assigned binary valued addresses after pre-assigned
register values (starting from register 16).
Args:
label (str): Symbolic label or variable to resolve.
Returns:
binary (str(016b)): Resolved binary valued address.
"""
try:
binary = self.table[label]
except KeyError:
self.used += 1
self.table[label] = format(int(self.used), '016b')
binary = self.table[label]
return binary
@staticmethod
def _sanitizeasm(lines):
""" Fully sanitizes all lines, removing leftover empty lines. """
sane_lines = []
for line in lines:
sane_line = _sanitizeline(line)
if sane_line:
sane_lines.append(sane_line)
return sane_lines
def inittable(self, lines):
""" Initializes symbolic table with pre-set values and asm labels.
Args:
lines (list of str): list of fully sanitized asm instructions.
Returns:
table (dict): Dictionary with label|variable: address pairs.
"""
table = {'R' + str(i): format(i, '016b') for i in range(16)}
table['SP'] = format(0, '016b')
table['LCL'] = format(1, '016b')
table['ARG'] = format(2, '016b')
table['THIS'] = format(3, '016b')
table['THAT'] = format(4, '016b')
table['SCREEN'] = format(16384, '016b')
table['KBD'] = format(24576, '016b')
self.used = 15 # pre-used registers 0-15
c_idx = 0
c_lines = []
for i, line in enumerate(lines):
if line[0] == '(' and line[-1] == ')':
table[line[1:-1]] = format(c_idx, "016b")
else:
c_idx += 1
c_lines.append(i)
c_list = []
for c_line in c_lines:
c_list.append(lines[c_line])
self.lines = c_list
return table
class ParseError(Exception):
""" Exception raised for failed parse.
Outputs to log-file in working directory
"""
def __init__(self, line, line_loc, path='./log.txt', itype='Unknown'):
self.line = line
self.line_loc = line_loc
self.type = type
self.printlog(line, line_loc, path, itype)
self.path = path
def printlog(self, line, line_loc, path, itype):
""" Prints log-file """
with open(path, 'w') as log:
print("{2} Error parsing {0}: {1}".format(line_loc, line, itype),
file=log)
class InputError(Exception):
""" Exception raised for improper file input """
def __init__(self, file, message):
self.file = file
self.message = message
def _sanitizeline(line):
""" Sanitizes input asm lines by removing all whitespace and comments """
line = re.sub(r'\s', '', re.split(r'//', line)[0])
return line
def main(asmfile, outputdir=None):
""" Creates a symbolictable isntance and parseline instances.
Holds asm in memory while reading and hack while writing.
All parseline objects along with symbolic table are retained and
returned for ease of error handling. To avoid (possibly) large memory
footprint function can be written inside loop to facilitate garbage
collection and parseline instances discarded, but symboltable needs
to be adjusted to disgard sanitized lines.
parseline instances do not require strong sanitization.
(Assumes relatively small file sizes for input and output).
Args:
asmfile (str): Filepath to input asm.
outputdir (str): Optional filepath to output hack. If empty output is
placed in input directory.
Returns:
parsed (list of parseline): Instances of parsed instruction lines
symbolics (symbolictable): Filled instance of the instruction symbolics
"""
lines = [line.strip() for line in open(asmfile)]
symbolics = symboltable(lines)
line_loc = 0
parsed = []
for line in symbolics.lines:
line_loc += 1
try:
parsed.append(parseline(line, line_loc, symbolics))
except ParseError as err:
print("{2} Error parsing {0}: {1}".format(err.line_loc, err.line,
err.type))
if outputdir is None: # Use asm filepath to create hackfile
hackfile = re.sub(r'(asm)$', 'hack', asmfile)
else:
hackfile = outputdir + os.path.splitext(os.path.split(asmfile)[1])[0]\
+ '.hack'
with open(hackfile, 'w') as destfile:
for line in parsed:
print(line.binary, file=destfile)
# Returns for error handling
return parsed, symbolics
if __name__ == "__main__":
# Utility config
# Called from commandline with optional destination path:
# 'python assembler.py "path-to.asm" -d "path-to.hack"'
parser = argparse.ArgumentParser(description='Parse a hack assembly file '
'to machine code.')
parser.add_argument('filepath', type=str, help='path to source asm')
parser.add_argument('--destination', '-d', type=str, default=None,
metavar='OUTPUTDIR',
help='output directory, by default uses asm path')
args = parser.parse_args()
if os.path.splitext(args.filepath)[1] != '.asm':
raise InputError(args.filepath, "Not an asm-file")
if args.destination:
if args.destination[-1:] != '\\':
destdir = args.destination + '\\'
else:
destdir = args.destination
else:
destdir = args.destination
# Main function calls
main(args.filepath, destdir)
print('Assembly complete!')
``` |
{
"source": "JHU-PL-Lab/representation-types",
"score": 3
} |
#### File: representation-types/tests/matrix.py
```python
def mat4x4_col(m, i):
return tuple(v[i] for v in m)
def vec4_dot(v1, v2):
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2] + v1[3]*v2[3]
def mat4x4_mul(m1, m2):
def mat4x4_mul_row(v1, m2):
return tuple(vec4_dot(v1, mat4x4_col(m2, i)) for i in range(4))
return tuple(
mat4x4_mul_row(v, m2) for v in m1
)
def mat4x4_det(m):
return (
m[0][3] * m[1][2] * m[2][1] * m[3][0] - m[0][2] * m[1][3] * m[2][1] * m[3][0] - m[0][3] * m[1][1] * m[2][2] * m[3][0] +
m[0][1] * m[1][3] * m[2][2] * m[3][0] + m[0][2] * m[1][1] * m[2][3] * m[3][0] - m[0][1] * m[1][2] * m[2][3] * m[3][0] -
m[0][3] * m[1][2] * m[2][0] * m[3][1] + m[0][2] * m[1][3] * m[2][0] * m[3][1] + m[0][3] * m[1][0] * m[2][2] * m[3][1] -
m[0][0] * m[1][3] * m[2][2] * m[3][1] - m[0][2] * m[1][0] * m[2][3] * m[3][1] + m[0][0] * m[1][2] * m[2][3] * m[3][1] +
m[0][3] * m[1][1] * m[2][0] * m[3][2] - m[0][1] * m[1][3] * m[2][0] * m[3][2] - m[0][3] * m[1][0] * m[2][1] * m[3][2] +
m[0][0] * m[1][3] * m[2][1] * m[3][2] + m[0][1] * m[1][0] * m[2][3] * m[3][2] - m[0][0] * m[1][1] * m[2][3] * m[3][2] -
m[0][2] * m[1][1] * m[2][0] * m[3][3] + m[0][1] * m[1][2] * m[2][0] * m[3][3] + m[0][2] * m[1][0] * m[2][1] * m[3][3] -
m[0][0] * m[1][2] * m[2][1] * m[3][3] - m[0][1] * m[1][0] * m[2][2] * m[3][3] + m[0][0] * m[1][1] * m[2][2] * m[3][3]
)
def mat4x4_input():
return tuple(
tuple(map(int, input().split()))
for _ in range(4)
)
def main():
num_reps = int(input())
m1 = mat4x4_input()
m2 = mat4x4_input()
m3 = mat4x4_input()
for _ in range(num_reps):
det = mat4x4_det(mat4x4_mul(m1, mat4x4_mul(m2, m3)))
print(det)
if __name__ == "__main__":
main()
``` |
{
"source": "J-Hurd/instaparser",
"score": 3
} |
#### File: J-Hurd/instaparser/example.py
```python
import sys
import argparse
import configparser
import logging
from instaparser import Instaparser
def init_parser():
"""."""
parser = argparse.ArgumentParser(description='Get instaparser api key')
parser.add_argument('api_key', metavar='a', type=str, nargs='?', help='Your instaparser API key')
return parser.parse_args()
def main():
"""."""
logging.debug("Starting main function")
args = init_parser()
# if there was an api key passed in as an argument, use that
if (args.api_key):
i = Instaparser(args.api_key)
# if there was no api_key parameter initialize a class
else:
i = Instaparser()
# use the text parser
i.parse("https://www.biblegateway.com/passage/?search=John+1&version=NIV", "text")
print(i.response)
print(i.response_text['text'] + "\n\n")
# use the article parser
i.parse("http://genius.com/Propaganda-be-present-live-from-catalyst-atlanta-lyrics")
print(i.response)
print(i.response_text['description'] + "\n\n")
# use the document parser
i.parse("http://test.com", "document", """<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<title>test</title>
</head>
<body class="basic">
<div>test-html</div>
</body>
</html>
""")
print(i.response)
print(i.response_text['description'])
if __name__ == '__main__':
main()
``` |
{
"source": "jhurd-tc/tcex",
"score": 3
} |
#### File: tcex/tcex/tcex_logger.py
```python
import time
from logging import Formatter, Handler
class TcExLogFormatter(Formatter):
"""Logger formatter for ThreatConnect Exchange API logging."""
def __init__(self, task_name=None):
"""Initialize Class properties."""
self.task_name = task_name
super(TcExLogFormatter, self).__init__()
def format(self, record):
"""Format log record for ThreatConnect API.
Example log event::
[{
"timestamp": 1478907537000,
"message": "Test Message",
"level": "DEBUG"
}]
"""
return {
'timestamp': int(float(record.created or time.time()) * 1000),
'message': record.msg or '',
'level': record.levelname or 'DEBUG'
}
class TcExLogHandler(Handler):
"""Logger handler for ThreatConnect Exchange API logging."""
def __init__(self, session, flush_limit=100):
"""Initialize Class properties.
Args:
session (Request.Session): The preconfigured instance of Session for ThreatConnect API.
flush_limit (int): The limit to flush batch logs to the API.
"""
super(TcExLogHandler, self).__init__()
self.session = session
self.flush_limit = flush_limit
self.entries = []
def close(self):
"""Close the logger and flush entries."""
self.log_to_api()
self.entries = []
def emit(self, record):
"""Emit the log record."""
self.entries.append(self.format(record))
if len(self.entries) > self.flush_limit and not self.session.auth.renewing:
self.log_to_api()
self.entries = []
def log_to_api(self):
"""Best effort API logger.
Send logs to the ThreatConnect API and do nothing if the attempt fails.
"""
if self.entries:
try:
headers = {'Content-Type': 'application/json'}
self.session.post('/v2/logs/app', headers=headers, json=self.entries)
# self.entries = [] # clear entries
except Exception:
# best effort on api logging
pass
``` |
{
"source": "jhurd-tc/threatconnect-python",
"score": 2
} |
#### File: examples/retrieve/owners_retrieve.py
```python
import ConfigParser
from random import randint
import sys
""" custom """
from threatconnect import ThreatConnect
from threatconnect.Config.FilterOperator import FilterSetOperator
# configuration file
config_file = "tc.conf"
# retrieve configuration file
config = ConfigParser.RawConfigParser()
config.read(config_file)
try:
api_access_id = config.get('threatconnect', 'api_access_id')
api_secret_key = config.get('threatconnect', 'api_secret_key')
api_default_org = config.get('threatconnect', 'api_default_org')
api_base_url = config.get('threatconnect', 'api_base_url')
api_result_limit = int(config.get('threatconnect', 'api_result_limit'))
except ConfigParser.NoOptionError:
print('Could not retrieve configuration file.')
sys.exit(1)
tc = ThreatConnect(api_access_id, api_secret_key, api_default_org, api_base_url)
tc.set_api_result_limit(api_result_limit)
tc.report_enable()
""" Get Owners """
enable_example1 = False
enable_example2 = False
enable_example3 = False
# shared method to display results from examples below
def show_data(result_obj):
""" """
for obj in result_obj:
print('\n{0!s:_^80}'.format(obj.name))
print('{0!s:<20}{1!s:<50}'.format('ID', obj.id))
print('{0!s:<20}{1!s:<50}'.format('Type', obj.type))
#
# api_uris
#
if len(obj.request_uris) > 0:
print('\n{0!s:-^40}'.format(' Request URIs '))
for request_uri in obj.request_uris:
print('{0!s:<20}{1!s:<50}'.format('URI', request_uri))
#
# matched filters
#
if len(obj.matched_filters) > 0:
print('\n{0!s:-^40}'.format(' API Matched Filters '))
for api_filter in obj.matched_filters:
print('{0!s:<20}{1!s:<50}'.format('Filter', api_filter))
#
# print report
#
print(tc.report.stats)
def main():
""" """
# set threat connect log (tcl) level
tc.set_tcl_file('log/tc.log', 'debug')
tc.set_tcl_console_level('critical')
if enable_example1:
""" This is a basic example that pull all owners. """
# optionally set the max results the api should return in one request
tc.set_api_result_limit(500)
# get owner object
owners = tc.owners()
# retrieve owners
try:
owners.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# show owner data
show_data(owners)
if enable_example2:
""" This example retrieves all owners that a particular indicator appears. """
# get owner object
owners = tc.owners()
# filter results
try:
filter1 = owners.add_filter()
filter1.add_indicator('10.20.30.40')
filter1.add_pf_name('Example Community')
filter1.add_pf_type('Community') # Organization, Community, Source
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# retrieve owners
try:
owners.retrieve()
except RuntimeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# show owner data
show_data(owners)
"""
Method:
get_owners() -> This method can be used to get a object containing owners filtered by indicator.
"""
if enable_example3:
# get owner object
owners = tc.owners()
# filter results
try:
filter1 = owners.add_filter()
filter1.add_indicator('10.20.30.40')
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
try:
filter2 = owners.add_filter()
filter2.add_filter_operator(FilterSetOperator.AND)
filter2.add_indicator('<EMAIL>')
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# retrieve owners
try:
owners.retrieve()
except AttributeError as e:
print('Error: {0!s}'.format(e))
sys.exit(1)
# show owner data
show_data(owners)
if __name__ == "__main__":
main()
```
#### File: threatconnect-python/tests/test_owner.py
```python
import pytest
import threatconnect
import utility
# initialize mock
utility.start()
@pytest.fixture
def tc():
"""Initialize instance of TC SDK."""
tc = threatconnect.ThreatConnect('accessId', 'secretKey', 'System', '//')
return tc
def test_owner_retrieval(tc):
"""Retrieve all owners."""
owners = tc.owners()
owners.retrieve()
assert len(owners) == 4
def test_general_owner_metrics_retrieval(tc):
"""Retrieve metrics for all owners."""
owners = tc.owners()
owners.retrieve_metrics()
# TODO: Is it possible to get metrics for a specific owner? (3)
# def test_specific_owner_metrics_retrieval(tc):
# """."""
# owners = tc.owners()
# filter1 = owners.add_filter()
# filter1.add_id(0)
# owners.retrieve()
# owners[0].retrieve_metrics()
```
#### File: threatconnect-python/threatconnect/ApiProperties.py
```python
def g_properties(group_uri):
""" """
properties = {
'add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri,
},
# 'association_custom_indicator_add': {
# 'http_method': 'POST',
# 'owner_allowed': True,
# 'pagination': False,
# 'uri': '/v2/indicators/{0}/{1}/groups/' + group_uri + '/{2}', # indicator type, indicator id, group id
# },
# 'association_custom_indicators': {
# 'http_method': 'GET',
# 'owner_allowed': True,
# 'pagination': True,
# 'uri': '/v2/indicators/{0}/{1}/groups/' + group_uri, # indicator type, indicator id
# },
'association_groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/groups', # group id
},
'association_group_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/groups/{1}/{2}', # group id, group type, group id
},
'association_group_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/groups/{1}/{2}', # group id, group type, group id
},
'association_indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/indicators', # group id
},
'association_indicator_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/groups/' + group_uri + '/{2}', # indicator type, indicator_value
},
'association_indicator_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/groups/' + group_uri + '/{2}', # indicator type, indicator_value
},
'association_tasks': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/tasks', # group id
},
'association_task_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/tasks/{1}', # group id, task id
},
'association_task_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/tasks/{1}', # group id, task id
},
'association_victims': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/victims', # group id
},
'association_victim_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/victims/{1}', # group id, victim id
},
'association_victim_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/victims/{1}', # group id, victim id
},
'attributes': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/attributes', # group id
},
'attribute_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/attributes', # group id
},
'attribute_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/attributes/{1}', # group id, attribute id
},
'attribute_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/attributes/{1}', # group id, attribute id
},
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '',
},
'delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}', # group id
},
'document_download': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/documents/{0}/download', # document id
},
'document_upload': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/documents/{0}/upload', # document id
},
'filters': [
'add_adversary_id',
'add_campaign_id',
'add_email_id',
'add_document_id',
'add_id',
'add_incident_id',
'add_indicator',
'add_security_label',
'add_signature_id',
'add_tag',
'add_task_id',
'add_threat_id',
'add_victim_id',
# post filters
'add_pf_name',
'add_pf_date_added',
'add_pf_file_type',
],
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/groups/' + group_uri # group type, group id
},
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}', # group id
},
'indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/{0}/{1}/groups/' + group_uri, # group id
},
'signature_download': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/signatures/{0}/download', # signature id
},
'signature_upload': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/signatures/{0}/upload', # signature id
},
'security_label_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/securityLabels/{1}', # group id, security label
},
'security_label_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/securityLabels/{1}', # group id, security label
},
'security_label_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/securityLabels', # group id
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/groups/' + group_uri # security labels
},
'tag_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/tags/{1}', # group id, security label
},
'tag_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/tags/{1}', # group id, security label
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/groups/' + group_uri, # tag name
},
'tags_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/tags', # group id
},
'tasks': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/groups/' + group_uri, # task id
},
'update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}', # group id
},
'victims': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/groups/' + group_uri # victim id
},
}
return properties
#
# i_properties
#
def i_properties(indicator_uri):
""" """
properties = {
'add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri,
},
'association_groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/groups', # indicator value
},
'association_group_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/groups/{1}/{2}', # indicator value, group type, group id
},
'association_group_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/groups/{1}/{2}', # indicator value, group type, group id
},
'association_indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/indicators', # indicator value
},
'association_tasks': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tasks', # indicator value
},
'association_task_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tasks/{1}', # indicator value, task id
},
'association_task_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tasks/{1}/{2}', # indicator value, tasks id
},
'association_victims': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/victims', # indicator value
},
'attributes': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/attributes', # indicator value
},
'attribute_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/attributes', # indicator value
},
'attribute_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/attributes/{1}', # indicator value, attribute id
},
'attribute_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/attributes/{1}', # indicator value, attribute id
},
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '',
},
'delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}', # indicator value
},
'false_positive_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/falsePositive'
},
'filters': [
'add_adversary_id',
'add_campaign_id',
'add_document_id',
'add_email_id',
'add_incident_id',
'add_indicator',
'add_security_label',
# 'add_signature_id',
'add_tag',
'add_task_id',
'add_threat_id',
'add_victim_id',
# post filters
'add_pf_attribute',
'add_pf_confidence',
'add_pf_date_added',
'add_pf_last_modified',
'add_pf_rating',
'add_pf_tag',
'add_pf_threat_assess_confidence',
'add_pf_threat_assess_rating',
'add_pf_type'],
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/indicators/' + indicator_uri # group type, group value
},
'indicator': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}', # indicator value
},
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}', # indicator value
},
'observation_count_get': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/observationCount' # indicator value
},
'observations_get': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/observations' # indicator value
},
'observations_add': {
'http_method': 'POST',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/observations' # indicator value
},
'security_label_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/securityLabels/{1}', # indicator value, security label
},
'security_label_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/securityLabels/{1}', # indicator value, security label
},
'security_label_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/securityLabels', # indicator value
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/indicators/' + indicator_uri # security labels
},
'tag_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tags/{1}', # indicator value, security label
},
'tag_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tags/{1}', # indicator value, security label
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/indicators/' + indicator_uri, # tag name
},
'tags_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tags', # indicator value
},
'tasks': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/indicators/' + indicator_uri, # task id
},
'update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}', # indicator value
},
'victims': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/indicators/' + indicator_uri # victim id
},
}
if indicator_uri == 'files':
properties['file_occurrence'] = {
'http_method': 'GET',
'uri': '/v2/indicators/files/{0}/fileOccurrences/{1}', # hash, occurrence id
'owner_allowed': True,
'pagination': False
}
properties['file_occurrence_add'] = {
'http_method': 'POST',
'uri': '/v2/indicators/files/{0}/fileOccurrences', # hash
'owner_allowed': True,
'pagination': False,
}
properties['file_occurrence_delete'] = {
'http_method': 'DELETE',
'uri': '/v2/indicators/files/{0}/fileOccurrences/{1}', # hash, occurrence id
'owner_allowed': True,
'pagination': False,
}
properties['file_occurrence_update'] = {
'http_method': 'PUT',
'uri': '/v2/indicators/files/{0}/fileOccurrences/{1}', # hash, occurrence id
'owner_allowed': True,
'pagination': False,
}
properties['file_occurrences'] = {
'http_method': 'GET',
'uri': '/v2/indicators/files/{0}/fileOccurrences', # hash
'owner_allowed': True,
'pagination': False,
}
if indicator_uri == 'hosts':
properties['dns_resolution'] = {
'http_method': 'GET',
'uri': '/v2/indicators/hosts/{0}/dnsResolutions', # indicator value
'owner_allowed': True,
'pagination': True,
}
return properties
#
# groups
#
groups_properties = {
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups',
},
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/groups', # group type, group value
},
'filters': [
'add_adversary_id',
'add_campaign_id',
'add_document_id',
'add_email_id',
'add_incident_id',
'add_indicator',
'add_security_label',
'add_signature_id',
'add_tag',
'add_task_id',
'add_threat_id',
'add_victim_id',
# post filters
'add_pf_name',
'add_pf_date_added',
'add_pf_type'
],
'indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/groups', # indicator type, indicator value
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/groups', # tag name
},
'tasks': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/groups', # task id
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/groups', # security labels
},
'victims': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/groups', # victim id
},
}
#
# indicators
#
indicators_properties = {
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators',
},
'bulk': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/bulk/json',
},
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/indicators', # group type, group value
},
'filters': [
'add_adversary_id',
'add_campaign_id',
'add_email_id',
'add_incident_id',
'add_indicator',
'add_security_label',
'add_signature_id',
'add_tag',
'add_task_id',
'add_threat_id',
'add_victim_id',
'add_pf_attribute',
'add_pf_confidence',
'add_pf_date_added',
'add_pf_last_modified',
'add_pf_rating',
'add_pf_tag',
'add_pf_threat_assess_confidence',
'add_pf_threat_assess_rating',
'add_pf_type'],
'indicator': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}', # indicator type, indicator value
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/indicators', # tag name
},
'tasks': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/indicators', # task id
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/indicators', # security labels
},
'victims': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/indicators', # victim id
},
}
#
# owners
#
owners_properties = {
'base': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/owners',
},
'filters': [
'add_id',
'add_indicator',
'add_pf_name',
'add_pf_type',
],
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/owners/{0}', # owner id
},
'indicators': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/owners', # indicator type, indicator value
},
'individual_metrics': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/owners/{0}/metrics',
},
'members': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/owners/mine/members',
},
'metrics': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/owners/metrics',
},
'mine': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/owners/mine',
},
}
#
# tasks
#
tasks_properties = {
'add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks',
},
'assignee_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/assignees/{1}', # task id, assignee account
},
'assignee_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/assignees/{1}', # task id, assignee account
},
'association_groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/groups', # task id
},
'association_group_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/groups/{1}/{2}', # task id, group type, group id
},
'association_group_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/groups/{1}/{2}', # task id, group type, group id
},
'association_indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/indicators', # task id
},
'association_indicator_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
# 'uri': '/v2/indicators/{0}/{1}/tasks/{2}', # indicator type, indicator_value, task_id
'uri': '/v2/tasks/{0}/indicators/{1}/{2}', # task id, indicator type, indicator_value
},
'association_indicator_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
# 'uri': '/v2/indicators/{0}/{1}/tasks/{2}', # indicator type, indicator_value, task_id
'uri': '/v2/tasks/{0}/indicators/{1}/{2}', # task id, indicator type, indicator_value
},
'association_victims': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/victims', # task id
},
'association_victim_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/victims/{1}', # task id, victim id
},
'association_victim_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/victims/{1}', # task id, victim id
},
'attributes': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/attributes', # task id
},
'attribute_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/attributes', # task id
},
'attribute_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/attributes/{1}', # tasks id, attribute id
},
'attribute_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/attributes/{1}', # task id, attribute id
},
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks',
},
'escalatee_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/escalatees/{1}', # task id, assignee account
},
'escalatee_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/escalatees/{1}', # task id, assignee account
},
'delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}',
},
'groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/tasks', # group type, group value
},
'filters': [
'add_adversary_id',
'add_campaign_id',
'add_document_id',
'add_email_id',
'add_id',
'add_incident_id',
'add_indicator',
'add_security_label',
'add_signature_id',
'add_threat_id',
'add_tag',
'add_victim_id',
# post filters
'add_pf_attribute',
'add_pf_name',
'add_pf_date_added',
],
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}', # task id
},
'indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/tasks', # indicator type, indicator value
},
'security_label_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/securityLabels/{1}', # task id, security label
},
'security_label_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/securityLabels/{1}', # task id, security label
},
'security_label_load': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/securityLabels', # task id
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/tasks', # security labels
},
'tag_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/tags/{1}', # task id, security label
},
'tag_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}/tags/{1}', # task id, security label
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/tasks', # tag name
},
'tags_load': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/tags', # tasks id
},
'update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/tasks/{0}', # task id
},
'victims': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims/{0}/tasks', # victim id
},
}
#
# victims
#
victims_properties = {
'add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims',
},
'assets': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/victimAssets', # victim id
},
'asset_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/victimAssets/{1}', # victim id, asset type
},
'asset_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/victimAssets/{1}/{2}', # victim id, asset type, asset id
},
'asset_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/victimAssets/{1}/{2}', # victim id, asset type, asset id
},
'association_groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims/{0}/groups', # victim id
},
'association_group_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/groups/{1}/{2}', # victim id, group type, group id
},
'association_group_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/groups/{1}/{2}', # victim id, group type, group id
},
'association_indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims/{0}/indicators', # victim id
},
'association_tasks': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims/{0}/tasks', # victim id
},
'association_task_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/tasks/{1}', # victim id, task id
},
'association_task_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/task/{1}', # victim id, tasks id
},
'attributes': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/attributes', # victim id
},
'attribute_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/attributes', # victim id
},
'attribute_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/attributes/{1}', # victim id, attribute id
},
'attribute_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/attributes/{1}', # victim id, attribute id
},
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims',
},
'delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}',
},
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/victims', # group type, group id
},
'filters': [
'add_adversary_id',
'add_campaign_id',
'add_document_id',
'add_email_id',
'add_id',
'add_incident_id',
'add_indicator',
'add_signature_id',
'add_security_label',
'add_tag',
'add_task_id',
'add_threat_id',
# post filters
'add_pf_attribute',
'add_pf_date_added',
'add_pf_name',
'add_pf_type',
],
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}', # victim id
},
'indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/victims', # indicator type, indicator value
},
'security_label_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims/{0}/securityLabels/{1}', # victim id, security label
},
'security_label_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims/{0}/securityLabels/{1}', # victim id, security label
},
'security_label_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/securityLabels', # victim id
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/victims', # security labels
},
'tag_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/tags/{1}', # victim id, security label
},
'tag_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/tags/{1}', # victim id, security label
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/victims', # tag name
},
'tags_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/tags', # victim id
},
'tasks': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tasks/{0}/victims', # task id
},
'update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}',
},
}
#
# batch jobs
#
batch_job_properties = {
'add': {
'http_method': 'POST',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/batch',
},
'id': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/batch/{0}', # batch id
},
'batch_error_download': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/batch/{0}/errors', # batch id
},
'batch_job_upload': {
'http_method': 'POST',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/batch/{0}', # batch id
},
'filters': [
'add_id'
]
}
#
# attributes
#
attribute_properties = {
'load_security_labels': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '{0}/attributes/{1}/securityLabels'
},
'delete_security_label': {
'http_method': 'DELETE',
'owner_allowed': False,
'pagination': False,
'uri': '{0}/attributes/{1}/securityLabels/{2}'
},
'add_security_label': {
'http_method': 'POST',
'owner_allowed': False,
'pagination': False,
'uri': '{0}/attributes/{1}/securityLabels/{2}'
},
}
api_properties = {
'ADDRESSES': {
'properties': i_properties('addresses'),
'resource_key': 'address',
'uri_attribute': 'addresses',
},
'ADVERSARIES': {
'properties': g_properties('adversaries'),
'resource_key': 'adversary',
'uri_attribute': 'adversaries',
},
'CAMPAIGNS': {
'properties': g_properties('campaigns'),
'resource_key': 'campaign',
'uri_attribute': 'campaigns',
},
'DOCUMENTS': {
'properties': g_properties('documents'),
'resource_key': 'document',
'uri_attribute': 'documents',
},
'EMAIL_ADDRESSES': {
'properties': i_properties('emailAddresses'),
'resource_key': 'emailAddress',
'uri_attribute': 'emailAddresses',
},
'EMAILS': {
'properties': g_properties('emails'),
'resource_key': 'email',
'uri_attribute': 'emails',
},
'FILES': {
'properties': i_properties('files'),
'resource_key': 'file',
'uri_attribute': 'files',
},
'GROUPS': {
'properties': groups_properties,
'resource_key': 'group',
'uri_attribute': 'groups',
},
'HOSTS': {
'properties': i_properties('hosts'),
'resource_key': 'host',
'uri_attribute': 'hosts',
},
'INCIDENTS': {
'properties': g_properties('incidents'),
'resource_key': 'incident',
'uri_attribute': 'incidents',
},
'INDICATORS': {
'properties': indicators_properties,
'resource_key': 'indicator',
'uri_attribute': 'indicators',
},
'OWNERS': {
'properties': owners_properties,
'resource_key': 'owner',
'uri_attribute': 'owners',
},
# 'SECURITY_LABELS': {
# 'properties': 'security_labels_properties',
# 'resource_key': 'securityLabel',
# 'uri_attribute': 'securityLabels',
# },
# 'TAGS': {
# 'properties': 'tags_properties',
# 'resource_key': 'tag',
# 'uri_attribute': 'tags',
# },
'SIGNATURES': {
'properties': g_properties('signatures'),
'resource_key': 'signature',
'uri_attribute': 'signatures',
},
'TASKS': {
'properties': tasks_properties,
'resource_key': 'task',
'uri_attribute': 'tasks',
},
'THREATS': {
'properties': g_properties('threats'),
'resource_key': 'threat',
'uri_attribute': 'threats',
},
'URLS': {
'properties': i_properties('urls'),
'resource_key': 'url',
'uri_attribute': 'urls',
},
'VICTIMS': {
'properties': victims_properties,
'resource_key': 'victim',
'uri_attribute': 'victims',
},
'BATCH_JOBS': {
'properties': batch_job_properties,
'resource_key': 'batchJob',
'uri_attribute': 'batchJobs'
},
'ATTRIBUTES': {
'properties': attribute_properties,
'resource_key': 'attribute',
'uri_attribute': 'attributes'
}
}
# ===========
# Custom
# ===========
""" ApiProperties """
#
# i_properties
#
def custom_i_properties(api_branch):
""" """
properties = {
'add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch,
},
# bcs - check with Mohammad
'association_groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + api_branch + '/{0}/groups', # indicator value
},
'association_indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + api_branch + '/{0}/indicators', # indicator value
},
'attributes': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/attributes', # indicator value
},
'attribute_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/attributes', # indicator value
},
'attribute_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/attributes/{1}', # indicator value, attribute id
},
'attribute_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/attributes/{1}', # indicator value, attribute id
},
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + api_branch + '',
},
'delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}', # indicator value
},
'false_positive_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/falsePositive'
},
'filters': [
'add_adversary_id',
'add_campaign_id',
'add_document_id',
'add_email_id',
'add_incident_id',
'add_indicator',
'add_security_label',
# 'add_signature_id',
'add_tag',
'add_task_id',
'add_threat_id',
'add_victim_id',
# post filters
'add_pf_attribute',
'add_pf_confidence',
'add_pf_date_added',
'add_pf_last_modified',
'add_pf_rating',
'add_pf_tag',
'add_pf_threat_assess_confidence',
'add_pf_threat_assess_rating',
'add_pf_type'],
# 'groups': {
# 'http_method': 'GET',
# 'owner_allowed': False,
# 'pagination': True,
# 'uri': '/v2/groups/{0}/{1}/indicators/' + api_branch # group type, group value
# },
'indicator': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}', # indicator value
},
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}', # indicator value
},
'observation_count_get': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/observationCount' # indicator value
},
'observations_get': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + api_branch + '/{0}/observations' # indicator value
},
'observations_add': {
'http_method': 'POST',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/observations' # indicator value
},
'security_label_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + api_branch + '/{0}/securityLabels/{1}', # indicator value, security label
},
'security_label_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + api_branch + '/{0}/securityLabels/{1}', # indicator value, security label
},
'security_label_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/indicators/' + api_branch + '/{0}/securityLabels', # indicator value
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/indicators/' + api_branch # security labels
},
'tag_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/tags/{1}', # indicator value, security label
},
'tag_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}/tags/{1}', # indicator value, security label
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/indicators/' + api_branch, # tag name
},
'tags_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/indicators/' + api_branch + '/{0}/tags', # indicator value
},
# 'type_add': {
# 'http_method': 'POST',
# 'owner_allowed': False,
# 'pagination': True,
# 'uri': '??'
# },
'type_get': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': 'v2/types/indicatorTypes/{0}' # indicator type (api_entity for custom)
},
'types_get': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/types/indicatorTypes'
},
'update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + api_branch + '/{0}', # indicator value
},
}
# properties['add_custom_type'] = {
# 'http_method': 'GET',
# 'uri': '/v2/indicators/{0}',
# 'owner_allowed': True,
# 'pagination': False
# }
return properties
def get_custom_indicator_properties(api_entity, api_branch):
return {
'properties': custom_i_properties(api_branch),
'resource_key': api_entity,
'uri_attribute': api_branch
}
```
#### File: threatconnect-python/threatconnect/OwnerMetricsObject.py
```python
from Config.ResourceType import ResourceType
def parse_metrics(metric):
""" """
#
# standard values
#
omo = OwnerMetricsObject()
omo.set_average_indicator_confidence(metric['averageIndicatorConfidence'])
omo.set_average_indicator_rating(metric['averageIndicatorRating'])
omo.set_metric_date(metric['metricDate'])
omo.set_total_address(metric['totalAddress'])
omo.set_total_adversary(metric['totalAdversary'])
omo.set_total_campaign(metric['totalCampaign'])
omo.set_total_document(metric['totalDocument'])
omo.set_total_email(metric['totalEmail'])
omo.set_total_emailaddress(metric['totalEmailAddress'])
omo.set_total_enriched_indicator(metric['totalEnrichedIndicator'])
omo.set_total_false_positive(metric['totalFalsePositive'])
omo.set_total_false_positive_daily(metric['totalFalsePositiveDaily'])
omo.set_total_file(metric['totalFile'])
omo.set_total_group(metric['totalGroup'])
omo.set_total_groupAttribute(metric['totalGroupAttribute'])
omo.set_total_group_indicator(metric['totalGroupIndicator'])
omo.set_total_host(metric['totalHost'])
omo.set_total_incident(metric['totalIncident'])
omo.set_total_indicator(metric['totalIndicator'])
omo.set_total_indicatorAttribute(metric['totalIndicatorAttribute'])
omo.set_total_observation_address(metric['totalObservationAddress'])
omo.set_total_observation_daily(metric['totalObservationDaily'])
omo.set_total_observation_emailaddress(metric['totalObservationEmailAddress'])
omo.set_total_observation_file(metric['totalObservationFile'])
omo.set_total_observation_host(metric['totalObservationHost'])
omo.set_total_observation_indicator(metric['totalObservationIndicator'])
omo.set_total_observation_url(metric['totalObservationUrl'])
omo.set_total_result(metric['totalResult'])
omo.set_total_signature(metric['totalSignature'])
omo.set_total_tag(metric['totalTag'])
omo.set_total_task(metric['totalTask'])
omo.set_total_threat(metric['totalThreat'])
omo.set_total_track(metric['totalTrack'])
omo.set_total_url(metric['totalUrl'])
return omo
class OwnerMetricsObject(object):
__slots__ = (
'_average_indicator_confidence',
'_average_indicator_rating',
'_metric_date',
'_total_address',
'_total_adversary',
'_total_campaign',
'_total_document',
'_total_email',
'_total_emailaddress',
'_total_enriched_indicator',
'_total_false_positive',
'_total_false_positive_daily',
'_total_file',
'_total_group',
'_total_groupAttribute',
'_total_group_indicator',
'_total_host',
'_total_incident',
'_total_indicator',
'_total_indicatorAttribute',
'_total_observation_address',
'_total_observation_daily',
'_total_observation_emailaddress',
'_total_observation_file',
'_total_observation_host',
'_total_observation_indicator',
'_total_observation_url',
'_total_result',
'_total_signature',
'_total_tag',
'_total_task',
'_total_threat',
'_total_track',
'_total_url'
)
def __init__(self):
""" """
self._average_indicator_confidence = None
self._average_indicator_rating = None
self._metric_date = None
self._total_address = None
self._total_adversary = None
self._total_campaign = None
self._total_document = None
self._total_email = None
self._total_emailaddress = None
self._total_enriched_indicator = None
self._total_false_positive = None
self._total_false_positive_daily = None
self._total_file = None
self._total_group = None
self._total_groupAttribute = None
self._total_group_indicator = None
self._total_host = None
self._total_incident = None
self._total_indicator = None
self._total_indicatorAttribute = None
self._total_observation_address = None
self._total_observation_daily = None
self._total_observation_emailaddress = None
self._total_observation_file = None
self._total_observation_host = None
self._total_observation_indicator = None
self._total_observation_url = None
self._total_result = None
self._total_signature = None
self._total_tag = None
self._total_task = None
self._total_threat = None
self._total_track = None
self._total_url = None
#
# unicode
#
@staticmethod
def _uni(data):
""" """
if data is None or isinstance(data, (int, list, dict)):
return data
elif isinstance(data, unicode):
return unicode(data.encode('utf-8').strip(), errors='ignore') # re-encode poorly encoded unicode
elif not isinstance(data, unicode):
return unicode(data, 'utf-8', errors='ignore')
else:
return data
""" shared metric resolution methods """
#
# average_indicator_confidence
#
@property
def average_indicator_confidence(self):
""" """
return self._average_indicator_confidence
def set_average_indicator_confidence(self, data):
""" """
self._average_indicator_confidence = data
#
# average_indicator_rating
#
@property
def average_indicator_rating(self):
""" """
return self._average_indicator_rating
def set_average_indicator_rating(self, data):
""" """
self._average_indicator_rating = data
#
# metric_date
#
@property
def metric_date(self):
""" """
return self._metric_date
def set_metric_date(self, data):
""" """
self._metric_date = data
#
# total_address
#
@property
def total_address(self):
""" """
return self._total_address
def set_total_address(self, data):
""" """
self._total_address = data
#
# total_adversary
#
@property
def total_adversary(self):
""" """
return self._total_adversary
def set_total_adversary(self, data):
""" """
self._total_adversary = data
#
# total_campaign
#
@property
def total_campaign(self):
""" """
return self._total_campaign
def set_total_campaign(self, data):
""" """
self._total_campaign = data
#
# total_document
#
@property
def total_document(self):
""" """
return self._total_document
def set_total_document(self, data):
""" """
self._total_document = data
#
# total_email
#
@property
def total_email(self):
""" """
return self._total_email
def set_total_email(self, data):
""" """
self._total_email = data
#
# total_emailaddress
#
@property
def total_emailaddress(self):
""" """
return self._total_emailaddress
def set_total_emailaddress(self, data):
""" """
self._total_emailaddress = data
#
# total_enriched_indicator
#
@property
def total_enriched_indicator(self):
""" """
return self._total_enriched_indicator
def set_total_enriched_indicator(self, data):
""" """
self._total_enriched_indicator = data
#
# total_false_positive
#
@property
def total_false_positive(self):
""" """
return self._total_false_positive
def set_total_false_positive(self, data):
""" """
self._total_false_positive = data
#
# total_false_positive_daily
#
@property
def total_false_positive_daily(self):
""" """
return self._total_false_positive_daily
def set_total_false_positive_daily(self, data):
""" """
self._total_false_positive_daily = data
#
# total_file
#
@property
def total_file(self):
""" """
return self._total_file
def set_total_file(self, data):
""" """
self._total_file = data
#
# total_group
#
@property
def total_group(self):
""" """
return self._total_group
def set_total_group(self, data):
""" """
self._total_group = data
#
# total_groupAttribute
#
@property
def total_groupAttribute(self):
""" """
return self._total_groupAttribute
def set_total_groupAttribute(self, data):
""" """
self._total_groupAttribute = data
#
# total_group_indicator
#
@property
def total_group_indicator(self):
""" """
return self._total_group_indicator
def set_total_group_indicator(self, data):
""" """
self._total_group_indicator = data
#
# total_host
#
@property
def total_host(self):
""" """
return self._total_host
def set_total_host(self, data):
""" """
self._total_host = data
#
# total_incident
#
@property
def total_incident(self):
""" """
return self._total_incident
def set_total_incident(self, data):
""" """
self._total_incident = data
#
# total_indicator
#
@property
def total_indicator(self):
""" """
return self._total_indicator
def set_total_indicator(self, data):
""" """
self._total_indicator = data
#
# total_indicatorAttribute
#
@property
def total_indicatorAttribute(self):
""" """
return self._total_indicatorAttribute
def set_total_indicatorAttribute(self, data):
""" """
self._total_indicatorAttribute = data
#
# total_observation_address
#
@property
def total_observation_address(self):
""" """
return self._total_observation_address
def set_total_observation_address(self, data):
""" """
self._total_observation_address = data
#
# total_observation_daily
#
@property
def total_observation_daily(self):
""" """
return self._total_observation_daily
def set_total_observation_daily(self, data):
""" """
self._total_observation_daily = data
#
# total_observation_emailaddress
#
@property
def total_observation_emailaddress(self):
""" """
return self._total_observation_emailaddress
def set_total_observation_emailaddress(self, data):
""" """
self._total_observation_emailaddress = data
#
# total_observation_file
#
@property
def total_observation_file(self):
""" """
return self._total_observation_file
def set_total_observation_file(self, data):
""" """
self._total_observation_file = data
#
# total_observation_host
#
@property
def total_observation_host(self):
""" """
return self._total_observation_host
def set_total_observation_host(self, data):
""" """
self._total_observation_host = data
#
# total_observation_indicator
#
@property
def total_observation_indicator(self):
""" """
return self._total_observation_indicator
def set_total_observation_indicator(self, data):
""" """
self._total_observation_indicator = data
#
# total_observation_url
#
@property
def total_observation_url(self):
""" """
return self._total_observation_url
def set_total_observation_url(self, data):
""" """
self._total_observation_url = data
#
# total_result
#
@property
def total_result(self):
""" """
return self._total_result
def set_total_result(self, data):
""" """
self._total_result = data
#
# total_signature
#
@property
def total_signature(self):
""" """
return self._total_signature
def set_total_signature(self, data):
""" """
self._total_signature = data
#
# total_tag
#
@property
def total_tag(self):
""" """
return self._total_tag
def set_total_tag(self, data):
""" """
self._total_tag = data
#
# total_task
#
@property
def total_task(self):
""" """
return self._total_task
def set_total_task(self, data):
""" """
self._total_task = data
#
# total_threat
#
@property
def total_threat(self):
""" """
return self._total_threat
def set_total_threat(self, data):
""" """
self._total_threat = data
#
# total_track
#
@property
def total_track(self):
""" """
return self._total_track
def set_total_track(self, data):
""" """
self._total_track = data
#
# total_url
#
@property
def total_url(self):
""" """
return self._total_url
def set_total_url(self, data):
""" """
self._total_url = data
#
# add print method
#
def __str__(self):
"""allow object to be displayed with print"""
printable_string = '\n{0!s:_^80}\n'.format('Metric')
#
# retrievable methods
#
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('average_indicator_confidence', self.average_indicator_confidence))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('average_indicator_rating', self.average_indicator_rating))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('metric_date', self.metric_date))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_address', self.total_address))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_adversary', self.total_adversary))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_document', self.total_document))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_email', self.total_email))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_emailaddress', self.total_emailaddress))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_enriched_indicator', self.total_enriched_indicator))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_false_positive', self.total_false_positive))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_false_positive_daily', self.total_false_positive_daily))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_file', self.total_file))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_group', self.total_group))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_groupAttribute', self.total_groupAttribute))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_group_indicator', self.total_group_indicator))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_host', self.total_host))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_incident', self.total_incident))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_indicator', self.total_indicator))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_indicatorAttribute', self.total_indicatorAttribute))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_observation_address', self.total_observation_address))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_observation_daily', self.total_observation_daily))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_observation_emailaddress', self.total_observation_emailaddress))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_observation_file', self.total_observation_file))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_observation_host', self.total_observation_host))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_observation_indicator', self.total_observation_indicator))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_observation_url', self.total_observation_url))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_result', self.total_result))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_signature', self.total_signature))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_tag', self.total_tag))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_task', self.total_task))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_threat', self.total_threat))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_track', self.total_track))
printable_string += (' {0!s:<32}: {1!s:<50}\n'.format('total_url', self.total_url))
return printable_string
"""
"data": {
"ownerMetric": [
{
"metricDate": "2016-03-14",
"totalIndicator": 191362,
"totalHost": 12373,
"totalAddress": 145772,
"totalEmailAddress": 1190,
"totalFile": 4095,
"totalUrl": 27932,
"totalGroup": 110,
"totalThreat": 1,
"totalIncident": 107,
"totalEmail": 0,
"totalAdversary": 1,
"totalSignature": 0,
"totalTask": 4,
"totalDocument": 1,
"totalTag": 73,
"totalTrack": 0,
"totalResult": 0,
"totalIndicatorAttribute": 508609,
"totalGroupAttribute": 551,
"averageIndicatorRating": 2.876296,
"averageIndicatorConfidence": 47.8519,
"totalEnrichedIndicator": 190996,
"totalGroupIndicator": 107,
"totalObservationDaily": 8,
"totalObservationIndicator": 10,
"totalObservationAddress": 16,
"totalObservationEmailAddress": 0,
"totalObservationFile": 0,
"totalObservationHost": 0,
"totalObservationUrl": 0,
"totalFalsePositiveDaily": 2,
"totalFalsePositive": 2
},
snipped
"""
```
#### File: threatconnect/Resources/Signatures.py
```python
import re
import types
""" custom """
from threatconnect import ApiProperties
from threatconnect import GroupFilterMethods
from threatconnect.Config.ResourceType import ResourceType
from threatconnect.FilterObject import FilterObject
from threatconnect.GroupObject import GroupObjectAdvanced
from threatconnect.RequestObject import RequestObject
from threatconnect.Resource import Resource
class Signatures(Resource):
""" """
def __init__(self, tc_obj):
""" """
super(Signatures, self).__init__(tc_obj)
self._filter_class = SignatureFilterObject
self._resource_type = ResourceType.SIGNATURES
def _method_wrapper(self, resource_object):
""" """
return GroupObjectAdvanced(self.tc, self, resource_object)
@property
def default_request_object(self):
""" default request when no filters are provided """
resource_properties = ApiProperties.api_properties[self._resource_type.name]['properties']
# create default request object for non-filtered requests
request_object = RequestObject()
request_object.set_http_method(resource_properties['base']['http_method'])
request_object.set_owner_allowed(resource_properties['base']['owner_allowed'])
request_object.set_request_uri(resource_properties['base']['uri'])
request_object.set_resource_pagination(resource_properties['base']['pagination'])
request_object.set_resource_type(self._resource_type)
return request_object
class SignatureFilterObject(FilterObject):
""" """
def __init__(self, tc_obj):
""" """
super(SignatureFilterObject, self).__init__(tc_obj)
self._owners = []
# define properties for resource type
self._resource_type = ResourceType.SIGNATURES
self._resource_properties = ApiProperties.api_properties[self._resource_type.name]['properties']
#
# add_obj filter methods
#
for method_name in self._resource_properties['filters']:
if re.findall('add_pf_', method_name):
self.add_post_filter_names(method_name)
else:
self.add_api_filter_name(method_name)
method = getattr(GroupFilterMethods, method_name)
setattr(self, method_name, types.MethodType(method, self))
@property
def default_request_object(self):
""" default request when only a owner filter is provided """
request_object = RequestObject()
request_object.set_description('filter by owner')
request_object.set_http_method(self._resource_properties['base']['http_method'])
request_object.set_owner_allowed(self._resource_properties['base']['owner_allowed'])
request_object.set_request_uri(self._resource_properties['base']['uri'])
request_object.set_resource_pagination(self._resource_properties['base']['pagination'])
request_object.set_resource_type(self._resource_type)
return request_object
```
#### File: threatconnect-python/threatconnect/SecurityLabelObject.py
```python
def parse_security_label(sl_dict):
""" """
sl = SecurityLabelObject()
#
# standard values
#
sl.set_date_added(sl_dict['dateAdded'])
sl.set_description(sl_dict['description'])
sl.set_name(sl_dict['name'])
return sl
class SecurityLabelObject(object):
__slots__ = (
'_date_added',
'_description',
'_name',
)
def __init__(self):
self._date_added = None
self._description = None
self._name = None
#
# unicode
#
@staticmethod
def _uni(data):
""" """
if data is None or isinstance(data, (int, list, dict)):
return data
elif isinstance(data, unicode):
return unicode(data.encode('utf-8').strip(), errors='ignore') # re-encode poorly encoded unicode
elif not isinstance(data, unicode):
return unicode(data, 'utf-8', errors='ignore')
else:
return data
""" shared dns resolution methods """
#
# date_added
#
@property
def date_added(self):
""" """
return self._date_added
def set_date_added(self, data):
"""Read-Only dns resolution metadata"""
self._date_added = self._uni(data)
#
# description
#
@property
def description(self):
""" """
return self._description
def set_description(self, data):
"""Read-Only dns resolution metadata"""
self._description = self._uni(data)
#
# name
#
@property
def name(self):
""" """
return self._name
def set_name(self, data):
"""Read-Only dns name metadata"""
self._name = self._uni(data)
#
# add print method
#
def __str__(self):
"""allow object to be displayed with print"""
printable_string = '\n{0!s:_^80}\n'.format('Security Label Properties')
#
# retrievable methods
#
printable_string += '{0!s:40}\n'.format('Retrievable Methods')
printable_string += (' {0!s:<28}: {1!s:<50}\n'.format('name', self.name))
printable_string += (' {0!s:<28}: {1!s:<50}\n'.format('description', self.description))
printable_string += (' {0!s:<28}: {1!s:<50}\n'.format('date_added', self.date_added))
return printable_string
``` |
{
"source": "JhurgenMrz/python-course",
"score": 4
} |
#### File: python-course/python-basic/average_temps.py
```python
def average_temps(temps):
sum_of_temps = 0
for temp in temps:
sum_of_temps += temp
return sum_of_temps / len(temps)
if __name__ == '__main__':
temps = [12,32,23,24,28,24]
result = average_temps(temps)
print(result)
```
#### File: python-course/python-basic/factorial.py
```python
def factorial(number):
if number == 0:
return 1
return number * factorial(number-1)
if __name__ == '__main__':
number = int(input('Write a number: '))
print(factorial(number))
```
#### File: python-course/python-basic/random_number.py
```python
import random
def run():
number_found = False
random_number = random.randint(0,20)
while not number_found:
number = int(input('Try a number: '))
if number == random_number:
print('Congratulation, you found the number! ')
print(random_number)
number_found = True
elif number > random_number:
print('The number is more less')
else:
print('The number is more big')
if __name__ == '__main__':
run()
``` |
{
"source": "jhurley13/automating-cbc",
"score": 2
} |
#### File: automating-cbc/common/common_paths.py
```python
from pathlib import Path
# Base Path
base_path = Path.cwd()
# Data paths
data_path = base_path / 'data'
cache_path = base_path / 'cache'
# Can Override
local_parameters_path = base_path / 'parameters' / 'Local'
system_parameters_path = base_path / 'parameters' / 'System'
kml_path = local_parameters_path / 'KML'
system_translations_path = system_parameters_path
system_translations_name_base = 'SystemTranslations'
# Input paths
inputs_path = base_path / 'Inputs'
inputs_parse_path = inputs_path / 'Parse'
inputs_email_path = inputs_path / 'EmailToContacts'
inputs_merge_path = inputs_path / 'Merge'
inputs_count_path = inputs_path / 'Count'
# - Service-Parse
# - Service-EmailToContacts
# - Service-ProcessEBird
# - Service-Weather
# - Service-RareBird
# - Service-Merge
raw_data_path = data_path / 'raw'
interim_data_path = data_path / 'interim'
processed_data_path = data_path / 'processed'
external_data_path = data_path / 'external'
reference_path = base_path / 'reference'
# Reports paths
reports_path = base_path / 'reports'
figures_path = reports_path / 'figures'
reports_setup_path = reports_path / 'setup'
reports_debug_path = reports_path / 'debug'
rare_bird_output_path = reports_path / 'rare-bird-forms'
#################
translations_path = reports_path / 'PossibleLocalTranslations.csv'
translations_xl_path = reports_path / 'PossibleLocalTranslations.xlsx'
#################
# HTML construction paths
html_base_path = base_path / 'xexperiments' / 'html_templates'
html_path_pre_table_1 = html_base_path / 'pre-table1-p1.html'
html_path_between_tables = html_base_path / 'between-tables.html'
html_path_post_table_2 = html_base_path / 'post-table2-p1.html'
html_path_participants_pre_table = html_base_path / 'participants_pre_table.html'
html_path_participants_post_table = html_base_path / 'participants_post_table.html'
# Outputs paths
outputs_path = base_path / 'Outputs'
daily_counts_path = reports_path / 'daily_counts.csv'
daily_checklists_path = reports_path / 'daily_checklists.csv'
html_path_name_fmt = 'tally_sheet_p{0}.html'
html_path_output_dir = reports_path
checklist_filers_path = reports_path / 'all_checklist_filers.csv'
updated_checklist_path = reports_path / 'Summary.xlsx'
debug_path = base_path / 'debug'
html_path_participants_list = reports_path / 'local_participants_list.html'
excel_path_participants_list = reports_path / 'local_participants_list.xlsx'
pdf_conversion_details_path = reports_debug_path / 'pdf-conversion-details'
final_checklist_path = reports_path / 'FinalChecklist-printable.xlsx'
local_checklist_generated_path = reports_path / 'LocalChecklist-generated.xlsx'
# Cache/Interim paths
taxonomy_cache_path = cache_path / 'taxonomy.csv'
reverse_geolocation_cache_path = cache_path / 'ReverseGeolocationCache.csv'
checklist_ratings_cache_path = cache_path / 'checklist_ratings.csv'
ebird_visits_path = cache_path / 'visits'
ebird_historic_path = cache_path / 'historic'
ebird_details_path = cache_path / 'details'
# Credentials
eBirdCredential_path = Path.home() / 'eBirdCredentials.yml'
credentials_openweather_path = Path.home() / 'credentials-openweather.yml'
# -------- Above here copied from AutomatingCBC ---------
taxonomy_path = reference_path / 'Taxonomy'
ebird_taxonomy_v2019_path = taxonomy_path / 'eBird_Taxonomy_v2019.xlsx'
ml_checklists_path = reference_path / 'MLChecklists'
test_data_path = base_path / 'tests'
samples_path = base_path / 'samples'
def create_project_paths():
default_mode = 0o755
# data_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
# raw_data_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
# interim_data_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
# processed_data_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
# external_data_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
reports_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
# figures_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
debug_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
# Inputs paths
inputs_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
inputs_parse_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
inputs_email_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
inputs_merge_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
inputs_count_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
# Output path
outputs_path.mkdir(mode=default_mode, parents=False, exist_ok=True)
# Cache paths
for fpath in [cache_path, ebird_visits_path, ebird_historic_path, ebird_details_path]:
fpath.mkdir(mode=default_mode, parents=False, exist_ok=True)
```
#### File: automating-cbc/common/ebird_summary.py
```python
import re
import sys
import traceback
from typing import List, Tuple, Any, Optional, Dict
import pandas as pd
from utilities_excel import excel_columns
from parameters import Parameters
from taxonomy import Taxonomy
from write_final_checklist import write_final_checklist_spreadsheet
def extract_locid_from_colname(colname):
mm = re.match(r'^(L[0-9]+)', colname)
if mm:
return mm.group(1)
return None
def extract_obs_time_from_obsdt(obsdt):
mm = re.match(r'^[0-9]{4}-[0-9]{2}-[0-9]{2} ([0-9]{2}:[0-9]{2})', obsdt)
if mm:
return mm.group(1)
return None
def extract_obstime_from_colname(colname):
mm = re.search(r'([0-9]{2}:[0-9]{2})', colname)
if mm:
return mm.group(1)
return None
def index_of_first_subtotal_column(summary: pd.DataFrame):
for ix, col in enumerate(summary.columns):
mm = re.match(r'^(L[0-9]+)', col)
if mm:
return ix
return None
def filter_additional_rare(taxonomy: Taxonomy, additional_rare: List[str]) -> List[str]:
rare_species = []
for cn in additional_rare:
row = taxonomy.find_local_name_row(cn)
if row is not None and row.Category == 'species':
rare_species.append(cn)
return rare_species
def reorder_team_columns(team_cols: List[str], std_columns: List[str]) -> List[str]:
reordered_columns = std_columns.copy()
# Each team_col is of the form
# f'{row0.locId}-{row0.Name}-{obstime}-{row0.SectorName}-{subid}'
team_col_re = re.compile(
r'^(?P<locId>L[0-9]+)-(?P<name>.*)-(?P<obstime>[0-9]{2}:[0-9]{2})-'
r'(?P<sector>.*)-(?P<subId>S[0-9]+)$'
)
team_headers = ['locId', 'name', 'obstime', 'sector', 'subId']
try:
xteam_cols = []
for team_col in team_cols:
mm = team_col_re.search(team_col)
dd = dict(zip(team_headers, [mm.group(gid) for gid in team_headers]))
xteam_cols.append(dd)
except Exception as ee:
print(team_col, team_cols, ee)
traceback.print_exc(file=sys.stdout)
# print(team_cols)
team_cols_df = pd.DataFrame(xteam_cols).sort_values(by=['locId', 'obstime', 'name'])
new_team_column_order = []
for ix, row in team_cols_df.iterrows():
col = f'{row.locId}-{row["name"]}-{row.obstime}-{row.sector}-{row.subId}'
new_team_column_order.append(col)
reordered_columns.extend(new_team_column_order)
return reordered_columns
def create_row_for_missing_species(common_name: str,
summary: pd.DataFrame,
taxonomy: Taxonomy) -> Optional[Tuple[pd.Series, bool]]:
# can also be SPUH, ISSF etc., just something that wasn't on official list
# The number of columns may vary based on the checklist, but we fill
# in the ones that we know must be there
taxonomy_row = taxonomy.find_local_name_row(common_name)
if taxonomy_row is None: # i.e. not found, drop it
return None
new_row = pd.Series([''] * len(summary.columns), index=summary.columns)
new_row['Group'] = taxonomy_row.SPECIES_GROUP
new_row['CommonName'] = common_name
new_row['TaxonOrder'] = taxonomy_row.TAXON_ORDER
new_row['NACC_SORT_ORDER'] = taxonomy_row.NACC_SORT_ORDER
new_row['ABA_SORT_ORDER'] = taxonomy_row.ABA_SORT_ORDER
new_row['Category'] = taxonomy_row.Category
# Filled in later. This is the "Grand Total", not the total from an individual checklist
new_row['Total'] = 0
# Not on official list, so mark it Rare if it's a species (not SPUH etc.)
rarity = taxonomy_row.Category == 'species'
if rarity:
new_row['Rare'] = 'X'
return new_row, rarity
def create_category_column(summary: pd.DataFrame, taxonomy: Taxonomy) -> list:
categories = []
for common_name in summary.CommonName.values:
taxonomy_row = taxonomy.find_local_name_row(common_name)
category = '' if taxonomy_row is None else taxonomy_row.Category
categories.append(category)
return categories
def create_personal_checklist_columns(sector_checklist_meta: pd.DataFrame) -> Dict[str, str]:
# Instead of just subId, make a more descriptive column header name
# df.rename(columns={"A": "a", "B": "c"})
# Format is {locid}-{subid}-{obsdt}-{name}
column_renames = {}
for ix, row in sector_checklist_meta.iterrows():
obstime = extract_obs_time_from_obsdt(row.obsDt)
if obstime is None:
obstime = '12:01'
colname = f'{row.locId}-{row.subId}-{obstime}-{row.Name}'
column_renames[row.subId] = colname
return column_renames
def create_ebird_summary(summary_base: pd.DataFrame,
personal_checklists: pd.DataFrame,
checklist_meta: pd.DataFrame,
circle_abbrev,
parameters: Parameters,
sector_name: str,
taxonomy: Taxonomy,
output_path) -> Tuple[Any, List[str]]:
# Each checklist becomes a column in the summary sheet
# Start of big processing loop
summary = summary_base.copy()
# team_cols = set()
summary_common_names = list(summary.CommonName.values)
checklist_meta = checklist_meta.copy()[checklist_meta.sharing != 'secondary']
checklist_meta.sort_values(by=['location_group', 'locId', 'obsDt', 'groupId', 'Name'],
na_position='first', inplace=True)
sector_subids = list(personal_checklists.subId.values)
sector_checklist_meta = checklist_meta[checklist_meta.subId.isin(sector_subids)]
# Group CommonName Rare Total TaxonOrder NACC_SORT_ORDER
summary['FrozenTotal'] = 0 # placeholder
if 'Category' not in summary.columns:
summary['Category'] = create_category_column(summary, taxonomy)
std_columns = ['Group', 'CommonName', 'Rare', 'Total', 'FrozenTotal',
'Category', 'TaxonOrder', 'NACC_SORT_ORDER', 'ABA_SORT_ORDER']
summary = summary[std_columns]
# Sector checklists may have added species not on the template
# Add on rows for these new species
additional_rare = []
names_to_add = set(personal_checklists.CommonName.values) - set(summary_common_names)
if not names_to_add == set():
# print(f'Need to add: {names_to_add}')
# blank_row = pd.Series([''] * len(summary.columns), index=summary.columns)
rows_to_add = []
for common_name in names_to_add:
row_rarity = create_row_for_missing_species(common_name, summary, taxonomy)
if row_rarity is None:
continue
row, rarity = row_rarity
rows_to_add.append(row)
if rarity:
additional_rare.append(common_name)
summary = summary.append(rows_to_add, ignore_index=True)
summary_common_names.extend(list(names_to_add))
# Re-sort by TaxonOrder
# Sorting has to be done before we create any formulae for Totals
summary = summary.sort_values(by=['NACC_SORT_ORDER']).reset_index(drop=True)
# Use the order from checklist_meta and add a column to summary for each checklist
# personal_columns = []
for subid in sector_checklist_meta.subId.values:
pcsub = personal_checklists[personal_checklists.subId == subid]
species_totals = []
for common_name in summary.CommonName.values:
species_row = pcsub[pcsub.CommonName == common_name]
species_total = 0 if species_row.empty else species_row.Total.values[0]
species_totals.append(species_total)
# Add the column to summary
summary[subid] = species_totals
# Don't think we need the filter any more, since that was done above
rare_species = filter_additional_rare(taxonomy, additional_rare)
if len(rare_species) > 0:
print(f' Requires rare bird form: {", ".join(rare_species)} [not on master list]')
# Re-sort by TaxonOrder
# Sorting has to be done before we create any formulae for Totals
summary = summary.sort_values(by=['NACC_SORT_ORDER']).reset_index(drop=True)
# We don't rename columns until right before we create Excel file
team_cols = sector_checklist_meta.subId.values
# The complexity here is because we can have cases where a single birder birded
# near-duplicate locations. This means location_group is e.g. L13065376+L13065792
# but each of these checklist should be considered separate (use SUM not MAX)
# Example in CAMP 2020/Rancho San Carlos:
# L13065376-S78154180-09:24-Jeff Manker | L13065792-S78156572-10:10-Jeff Manker |
# L13065792-S78184574-10:44-Jeff Manker
mask = sector_checklist_meta.location_group.isnull()
usemaxtmp = sector_checklist_meta[~mask]
single_birder_locids = set()
for locgrp, grp in usemaxtmp.groupby(['location_group']):
# print(locgrp)
if len(set(grp.Name)) == 1: # Same birder but possible location dups
single_birder_locids |= set(grp.locId.values)
mask_single = checklist_meta.locId.isin(single_birder_locids)
mask |= mask_single
use_sum_locids = sector_checklist_meta[mask].locId.values
# Remove duplicates but keep in order
use_max_locids = list(dict.fromkeys(sector_checklist_meta[~mask].locId.values))
# These are the columns we can just total up
use_sum_subids = sector_checklist_meta[sector_checklist_meta.locId.isin(use_sum_locids
)].subId.values
use_sum_total = summary[use_sum_subids].apply(pd.to_numeric).fillna(0).sum(axis=1).astype(int)
use_max_total = 0
# ToDo: logic is duplicated below
for locid in use_max_locids:
# subIds are the column names right now
# subids = sector_checklist_meta[sector_checklist_meta.locId == locid].subId.values
mask = [(lg.startswith(locid) if lg is not None else False) for lg in
checklist_meta.location_group.values]
subids = checklist_meta[mask].subId.values
# This can be empty if it is not the first in a set of duplicate locations
if len(subids) == 0:
continue
# print(locid, subids)
max_vals = summary[subids].apply(pd.to_numeric).fillna(0).max(axis=1).astype(int)
use_max_total += max_vals
summary_total = use_sum_total + use_max_total
# print(sum(summary_total))
# Values computed by formulae are only evaluated after a workbook has been opened and
# saved by Excel. This means if we create these files but never open them, the Total
# field will show up as 0 (a string formula converted to numeric)
# Add this so that service_merge/merge_checklists has an actual value to use
# ToDo: fix summary_total to use SUM/MAX
summary['FrozenTotal'] = summary_total
# Actually, make it a formula
# Has to be after sorting
# base_columns = ['Group', 'CommonName', 'Rare', 'Total', 'TaxonOrder']
# Group CommonName Rare Total Ad Im CountSpecial
# =SUM($F5:$Q5)
col_letters = excel_columns()
# std_columns = ['Group', 'CommonName', 'Rare', 'Total', 'Category', 'TaxonOrder',
# 'NACC_SORT_ORDER']
sum_start_index = len(std_columns)
sum_end_index = len(std_columns) + len(use_sum_locids) - 1
sum_start_col = col_letters[sum_start_index]
sum_end_col = col_letters[sum_end_index]
# Start template for total with non-duplicate columns
sum_formula_template = f'=SUM(${sum_start_col}INDEX:${sum_end_col}INDEX)'
header_cell_groups = []
max_formula_totals = []
max_formula = None
for locid in use_max_locids:
# subIds are the column names right now
# subids = sector_checklist_meta[sector_checklist_meta.locId == locid].subId.values
mask = [(lg.startswith(locid) if lg is not None else False) for lg in
checklist_meta.location_group.values]
subids = checklist_meta[mask].subId.values
# This can be empty if it is not the first in a set of duplicate locations
if len(subids) == 0:
continue
max_start_index = list(summary.columns).index(subids[0])
max_end_index = list(summary.columns).index(subids[-1])
max_start_col = col_letters[max_start_index]
max_end_col = col_letters[max_end_index]
max_formula_template = f'MAX(${max_start_col}INDEX:${max_end_col}INDEX)'
max_formula_totals.append(max_formula_template)
# Collect up the header cells so we can color different groups
header_cell_group = f'${max_start_col}1:${max_end_col}1'
header_cell_groups.append(header_cell_group)
if len(max_formula_totals):
max_formula = '+'.join(max_formula_totals)
total_formula = []
for ix in range(2, summary.shape[0] + 2):
sft = sum_formula_template.replace('INDEX', str(ix))
tf_sum = f'{sft}'
if max_formula is None:
total_formula.append(tf_sum)
else:
mft = max_formula.replace('INDEX', str(ix))
tf_max = f'{mft}'
total_formula.append(tf_sum + '+' + tf_max)
# print(f' {total_formula[0]}')
summary['Total'] = total_formula
# Add last row for Total and each Sector total
totals_row = pd.Series([''] * len(summary.columns), index=summary.columns)
totals_row['Group'] = 'Totals'
totals_row['TaxonOrder'] = 99999
totals_row['NACC_SORT_ORDER'] = taxonomy.INVALID_NACC_SORT_ORDER
totals_row['ABA_SORT_ORDER'] = taxonomy.INVALID_NACC_SORT_ORDER
# Formula for Grand Total, e.g. =SUM($D$2:$D$245)
total_col_letter = col_letters[std_columns.index('Total')]
total_formula = f'=SUM(${total_col_letter}2:${total_col_letter}{summary.shape[0] + 1})'
totals_row.Total = total_formula
# sector_cols = [xs for xs in summary.columns if xs.startswith('Sector')]
sector_totals = summary[team_cols].apply(pd.to_numeric).fillna(0).sum(axis=0).astype(int)
for col, st in sector_totals.items():
totals_row[col] = st
summary = summary.append(totals_row, ignore_index=True)
# Rename columns to more human readable form
newcols = create_personal_checklist_columns(sector_checklist_meta)
summary.rename(columns=newcols, inplace=True)
# Don't hide 'Rare' since this will be frequently used in a filter
cols_to_hide = ['D', 'Difficulty', 'Adult', 'Immature',
'W-morph', 'B-Morph', 'Ad', 'Im', 'CountSpecial', 'FrozenTotal']
cols_to_highlight = list(set(summary.columns) & {'Total', 'Adult/White', 'Immature/Blue'})
outname = output_path / f'{circle_abbrev}-EBird-Summary-{sector_name}.xlsx'
write_final_checklist_spreadsheet(summary, outname,
parameters.parameters,
additional_sheets=None,
cols_to_hide=cols_to_hide,
cols_to_highlight=cols_to_highlight,
header_cell_groups=header_cell_groups
)
return summary, rare_species
```
#### File: automating-cbc/common/filers_matrix.py
```python
import pandas as pd
from common_paths import reports_path
from typing import Tuple, List
def create_filers_matrix(circle_prefix: str, visits_of_interest: pd.DataFrame,
location_data: pd.DataFrame,
xoutputs_path=reports_path) -> Tuple[pd.DataFrame, List[str]]:
voi = visits_of_interest.copy()
voi = voi.merge(location_data, how='left', on='locId').drop_duplicates(['Name']).sort_values(
by=['Name']).reset_index(drop=True).fillna('')
cols_to_keep = ['locId', 'Name', 'LocationName']
filers_matrix = voi[cols_to_keep]
unique_circle_filers = list(set(filers_matrix.Name))
# filers_matrix.to_csv(xoutputs_path / f'{circle_prefix}filers_matrix.csv', index=False)
return filers_matrix, unique_circle_filers
```
#### File: automating-cbc/common/nlp_context.py
```python
from typing import Tuple, Set
import inflect
from more_itertools import flatten
from singleton_decorator import singleton
from spacy.lang.en import English
# Local imports
# import acbc_utilities as autil
@singleton
class NLPContext(object):
"""Combined Taxonomy
Attributes:
"""
# Call taxonomy_tokens and taxonomy_family_tokens, usually like:
# tokens_all, _, _ = nlp_context.taxonomy_tokens()
# cn_tokens => _tokens_common_name
#
def __init__(self, taxonomy=None, reports_path=None):
self.reports_path = reports_path
self._taxonomy = taxonomy
self.nlp = English()
self.bird_stop_words = {'bird', 'birds', 'summer'} # , 'species', 'and', 'may', 'allies'
# For singular/plural determinations
self._inflect_engine = inflect.engine()
self._create_all_taxonomy_tokens()
def _create_all_taxonomy_tokens(self):
# tk_all, tk_common, tk_scientific = self._create_tokens_for_taxonomy(self._taxonomy.taxonomy_restricted)
# self._tokens_restricted_common_scientific = tk_all
# self._tokens_restricted_common_name = tk_common
# self._tokens_restricted_scientific_name = tk_scientific
tk_all, tk_common, tk_scientific = self._create_tokens_for_taxonomy(self._taxonomy.taxonomy)
self._tokens_common_scientific = tk_all
self._tokens_common_name = tk_common
self._tokens_scientific_name = tk_scientific
tk_all, tk_common, tk_scientific = self._create_tokens_for_family(self._taxonomy.taxonomy)
self._tokens_family_all = tk_all
self._tokens_family_common_name = tk_common
self._tokens_family_scientific_name = tk_scientific
def _create_tokens_for_taxonomy(self, xtaxonomy) -> Tuple[Set, Set, Set]:
# All, Common, Scientific
common_names = set(xtaxonomy.comNameLower)
scientific_names = set(xtaxonomy.sciNameLower)
common_scientific = (common_names | scientific_names)
tokens_common_scientific = self.filter_tokens(set(flatten([self.nlp.tokenizer(wd) for wd in common_scientific])))
tokens_common_name = self.filter_tokens(set(flatten([self.nlp.tokenizer(wd) for wd in common_names])))
tokens_scientific_name = tokens_common_name | tokens_common_scientific
return tokens_common_scientific, tokens_common_name, tokens_scientific_name
def _create_tokens_for_family(self, xtaxonomy) -> Tuple[Set, Set, Set]:
# All, Common, Scientific
common_names = set([cn.lower() for cn in xtaxonomy.familyComName])
scientific_names = set([sn.lower() for sn in xtaxonomy.familySciName])
common_scientific = (common_names | scientific_names)
tokens_common_scientific = self.filter_tokens(set(flatten([self.nlp.tokenizer(wd) for wd in common_names])))
tokens_common_name = self.filter_tokens(set(flatten([self.nlp.tokenizer(wd) for wd in common_names])))
tokens_scientific_name = tokens_common_name | tokens_common_scientific
return tokens_common_scientific, tokens_common_name, tokens_scientific_name
def taxonomy_tokens(self, range_restricted=False):
# All, Common, Scientific
if range_restricted:
return self._tokens_common_scientific, self._tokens_common_name, self._tokens_scientific_name
else:
return self._tokens_common_scientific, self._tokens_common_name, self._tokens_scientific_name
def taxonomy_family_tokens(self):
# All, Common Name, Scientific Name
return self._tokens_family_all, self._tokens_family_common_name, self._tokens_family_scientific_name
def filter_tokens(self, tokens: set) -> Set:
len1_tokens = set([tok for tok in tokens if len(tok) == 1])
len2_tokens = set([tok for tok in tokens if len(tok) == 2])
# This is not all of len2_tokens
tokens_to_drop = {"'s", '10', '11', 'al', 'f1', 'f2', 'is', 'la', 'mt', 'of', 'oo', 'or', 'ou', 'sp', 'ua'}
tokens = tokens - (set(len1_tokens) | tokens_to_drop)
return tokens
# Write out tokens - DEBUG
def write_out_tokens(self):
lines = sorted(list(self._tokens_common_name_lower))
out_path = self.reports_path / f'nlp_tokens_common_name_lower.txt'
with open(out_path, 'a', encoding="utf-8") as fp: # , encoding="utf-8"
for line in lines:
line = line.strip() # encode('utf-8').
if len(line) > 0:
_ = fp.write(line + '\n')
def effectively_plural(self, word):
# Either an actual plural or the plural is the same as the singular
# i.e. the singular form of word, or False if already singular
singular = self._inflect_engine.singular_noun(word)
plural = self._inflect_engine.plural_noun(word)
return singular or (singular == plural) # e.g. 'Killdeer', 'grouse', 'quail
```
#### File: automating-cbc/common/process_csv.py
```python
from datetime import datetime
from pathlib import Path
from typing import List, Optional
import pandas as pd
from datetime_manipulation import normalize_date_for_visits
# Local imports
from ebird_extras import EBirdExtra
from ebird_visits import transform_checklist_details
from local_translation_context import LocalTranslationContext
from taxonomy import Taxonomy
from text_transform import clean_common_names
from utilities_cbc import read_excel_or_csv_path
# Now for Bob Hirt
def raw_csv_to_checklist(fpath: Path,
taxonomy: Taxonomy,
local_translation_context: LocalTranslationContext,
observer_name: str,
xdates: List[str]
) -> pd.DataFrame:
csvdf = read_excel_or_csv_path(fpath)
df = csv_dataframe_to_checklist(csvdf, taxonomy, local_translation_context,
observer_name,
xdates
)
if df is None:
print(f'File {fpath} is not a valid species data file')
return df
def csv_dataframe_to_checklist(checklist: pd.DataFrame,
taxonomy: Taxonomy,
local_translation_context: LocalTranslationContext,
observer_name: str,
xdates: List[str]
) -> Optional[pd.DataFrame]:
# Use column names from eBird and let them be fixed by transform_checklist_details
# These all have to be present for transform_checklist_details
if set(checklist.columns) & {'CommonName', 'Total'} == set():
return None
cleaned_common_names = clean_common_names(checklist.CommonName,
taxonomy, local_translation_context)
checklist.CommonName = cleaned_common_names
# This will get switched back by transform_checklist_details
checklist.rename(columns={'Total': 'howManyStr'}, inplace=True)
xdtypes = {'CommonName': str, 'howManyStr': int}
checklist = checklist.astype(dtype=xdtypes)
checklist['speciesCode'] = [taxonomy.find_species6_ebird(cn) for cn in checklist.CommonName]
checklist['locId'] = 'L5551212'
checklist['subId'] = 'S5551212'
checklist['groupId'] = ''
checklist['durationHrs'] = 0.5
checklist['effortDistanceKm'] = 0.1
checklist['effortDistanceEnteredUnit'] = 'mi'
# 'obsDt' needs dates in this form '26 Dec 2020'
obsdt = normalize_date_for_visits(xdates[0])
checklist['obsDt'] = f'{obsdt} 12:01'
checklist['userDisplayName'] = observer_name
checklist['numObservers'] = 1
checklist['comments'] = 'Generated'
# Clean up
checklist = transform_checklist_details(checklist, taxonomy)
return checklist
def subids_to_checklist(subids: List[str],
ebird_extra: EBirdExtra,
taxonomy: Taxonomy,
observer_name: str,
xdates: List[str]
) -> pd.DataFrame:
"""
Only handles one date right now
:param subids:
:param observation_date: e.g. '2020-12-26'
:return:
"""
# We jump through this hoop to take advantage of caching of eBird data
# The only fields of visits that get_details_for_dates uses are subId and obsDt
xvisits = pd.DataFrame()
xvisits['subId'] = subids
# 'obsDt' needs dates in this form '26 Dec 2020'
obsdt = datetime.strptime(xdates[0], '%Y-%m-%d').strftime('%d %b %Y')
xvisits['obsDt'] = obsdt # e.g. '26 Dec 2020'
subids_by_date = {xdates[0]: subids}
details = ebird_extra.get_details_for_dates(subids_by_date, xdates)
checklist = transform_checklist_details(details, taxonomy)
checklist['Name'] = observer_name
return checklist
```
#### File: automating-cbc/common/spacy_extra.py
```python
from spacy.attrs import ORTH
from spacy import displacy
import webcolors
from pathlib import Path
from utilities_cbc import circle_abbrev_from_path
from spacy.matcher import PhraseMatcher
from spacy.lang.en import English
from spacy.tokens import Span
from spacy.util import filter_spans
def build_phrase_matcher(nlp, taxonomy):
# species 10721
# issf 3800
# slash 709
# spuh 653
# hybrid 462
# form 122
# intergrade 31
# domestic 15
# nlp = English()
categories = ['hybrid', 'slash', 'form', 'intergrade', 'domestic', 'spuh', 'issf', 'species']
cols = ['comName', 'sciName', 'familyComName', 'familySciName', 'order']
name_prefixes = ['COM', 'SCI', 'FAMCOM', 'FAMSCI', 'ORD']
accumulation = set()
matcher_patterns = {}
pattern_names = []
for cat in categories:
subdf = taxonomy.taxonomy[taxonomy.taxonomy.Category == cat]
for ix, col in enumerate(cols):
names = set([xs.lower() for xs in subdf[col]]) - accumulation
patterns = [nlp(text) for text in names]
accumulation |= names
pattern_name = f'{name_prefixes[ix]}{cat}'.upper()
pattern_names.append(pattern_name)
matcher_patterns[pattern_name] = patterns
# print(pattern_names)
for pn in pattern_names:
nlp.vocab.strings.add(pn)
matcher = PhraseMatcher(nlp.vocab)
for key, val in matcher_patterns.items():
matcher.add(key, None, *val)
return matcher, nlp
def filter_to_possibles(tti, lines):
filter_tokens, _, _ = tti.nlp_context.taxonomy_tokens()
docSW = set(w.orth for w in tti.nlp(' '.join(tti.stop_words)))
docTax = set(w.orth for w in filter_tokens) - docSW
possibles = set()
for line in lines:
# print(line)
line_doc = set(tti.nlp(line))
docA = set(w.orth for w in line_doc)
if len(docA) == 0:
continue
intersections = docTax & docA
pctage = len(intersections) / len(docA)
# if (pctage > 0.0) and (pctage < 0.3):
# print(f'{pctage} {[tti.nlp.vocab.strings[ii] for ii in intersections]} {line}')
if pctage > 0.14:
# print(f'{pctage} {[tti.nlp.vocab.strings[ii] for ii in intersections]}')
possibles.add(line)
return possibles
def create_visualization2(docx, show_in_jupyter=True):
# Create visualization
# https://developer.mozilla.org/en-US/docs/Web/CSS/linear-gradient
# https://cssgradient.io
# https://htmlcolorcodes.com
ent_names = [
'COMHYBRID', 'SCIHYBRID', 'FAMCOMHYBRID', 'FAMSCIHYBRID', 'ORDHYBRID',
'COMSLASH', 'SCISLASH', 'FAMCOMSLASH', 'FAMSCISLASH', 'ORDSLASH', 'COMFORM',
'SCIFORM', 'FAMCOMFORM', 'FAMSCIFORM', 'ORDFORM', 'COMINTERGRADE',
'SCIINTERGRADE', 'FAMCOMINTERGRADE', 'FAMSCIINTERGRADE', 'ORDINTERGRADE',
'COMDOMESTIC', 'SCIDOMESTIC', 'FAMCOMDOMESTIC', 'FAMSCIDOMESTIC', 'ORDDOMESTIC',
'COMSPUH', 'SCISPUH', 'FAMCOMSPUH', 'FAMSCISPUH', 'ORDSPUH', 'COMISSF',
'SCIISSF', 'FAMCOMISSF', 'FAMSCIISSF', 'ORDISSF', 'COMSPECIES', 'SCISPECIES',
'FAMCOMSPECIES', 'FAMSCISPECIES', 'ORDSPECIES'
]
def ent_name_to_color(ent_name):
if ent_name.startswith('COM'):
return purplish
if ent_name.startswith('SCI'):
return aquaish
if ent_name.startswith('ORD'):
return greenish
if ent_name.startswith('FAMCOM'):
return yellowish
if ent_name.startswith('FAMSCI'):
return fuchsiaish
return webcolors.name_to_hex('HotPink'.lower())
print('Creating visualization')
# "R" suffix is for reverse
purplish = 'linear-gradient(90deg, #aa9cfc, #fc9ce7)' # original
purplishR = 'linear-gradient(45deg, #fc9ce7, #aa9cfc)'
yellowish = 'linear-gradient(90deg, #f9fc9c, #fac945)'
greenish = 'linear-gradient(90deg, #cdfc9c, #5cfa45)'
aquaish = 'linear-gradient(90deg, #9cfcea, #3cd3e7)'
aquaishR = 'linear-gradient(45deg, #3cd3e7, #9cfcea)'
fuchsiaish = 'linear-gradient(90deg, #fc9cde, #ff5aa4)'
colors = {}
for ent_name in ent_names:
colors[ent_name] = ent_name_to_color(ent_name)
options = {"ents": ent_names,
"colors": colors}
# displacy.serve(doc, style="ent", options=options)
html = displacy.render([docx], style="ent", page=True,
jupyter=show_in_jupyter, options=options)
return html
def write_visualization(names: list, fpath: Path, out_path: Path, taxonomy, tti):
# Now look for named entities
nlp = English()
docx = nlp('\n'.join(names))
matcher, nlp = build_phrase_matcher(nlp, taxonomy)
matches = matcher(docx)
match_spans = []
for match_id, start, end in matches:
rule_id = nlp.vocab.strings[match_id] # get the unicode ID, i.e. 'COLOR'
span = docx[start: end] # get the matched slice of the doc
# print(rule_id, span.text)
# create a new Span for each match and use the match_id (ANIMAL) as the label
span = Span(docx, start, end, label=match_id)
match_spans.append(span)
docx.ents = list(docx.ents) + filter_spans(match_spans)
# doc11.ents = list(doc11.ents) + [span] # add span to doc.ents
html = create_visualization2(docx, False)
# print(len(html))
# fname = f'{datetime.now().strftime("%m%d%y_%H%M%S")}.html'
abbrev = circle_abbrev_from_path(fpath)
out_path = out_path / f'{abbrev}-{fpath.suffix[1:]}-spacy.html'
# print(out_path)
tti.save_visualization(out_path, html)
def debug_print_nlp_string_hashes():
nlp = English()
# This should really integrate with build_phrase_matcher to get the names
# Avoid "[E084] Error assigning label ID 18363349229763587234 to span: not in StringStore."
cols = ['CommonName', 'ScientificName', 'Order', 'FamilyCommon', 'FamilyScientific',
'ZCOMMONNAME', 'ZSCIENTIFICNAME',
'COMHYBRID', 'SCIHYBRID', 'FAMCOMHYBRID', 'FAMSCIHYBRID', 'ORDHYBRID', 'COMSLASH',
'SCISLASH', 'FAMCOMSLASH', 'FAMSCISLASH', 'ORDSLASH', 'COMFORM', 'SCIFORM',
'FAMCOMFORM', 'FAMSCIFORM', 'ORDFORM', 'COMINTERGRADE', 'SCIINTERGRADE',
'FAMCOMINTERGRADE', 'FAMSCIINTERGRADE', 'ORDINTERGRADE', 'COMDOMESTIC',
'SCIDOMESTIC', 'FAMCOMDOMESTIC', 'FAMSCIDOMESTIC', 'ORDDOMESTIC', 'COMSPUH',
'SCISPUH', 'FAMCOMSPUH', 'FAMSCISPUH', 'ORDSPUH', 'COMISSF', 'SCIISSF',
'FAMCOMISSF', 'FAMSCIISSF', 'ORDISSF', 'COMSPECIES', 'SCISPECIES', 'FAMCOMSPECIES',
'FAMSCISPECIES', 'ORDSPECIES'
]
hashes = []
for col in cols:
hashes.append(nlp.vocab.strings.add(col))
for hh in sorted(hashes):
print(f'{hh:24} {nlp.vocab.strings[hh]}')
def show_species_and_families(docx):
families = set()
species = set()
for ent in docx.ents:
if ent.label_ == 'FamilyCommon':
families.add(ent.text)
elif ent.label_ == 'CommonName':
species.add(ent.text)
# print(ent.text, ent.start_char, ent.end_char, ent.label_)
xspecies = ', '.join(sorted(list(species)))
xfamilies = ', '.join(sorted(list(families)))
print(f'Species: {xspecies}')
print(f'Families: {xfamilies}')
```
#### File: automating-cbc/common/utilities_clustering.py
```python
import sys
import traceback
import pandas as pd
import seaborn as sns
from sklearn.cluster import KMeans
from geopy.distance import distance
from parameters import Parameters
from typing import Tuple, List, Optional
import matplotlib.pyplot as plt
# from IPython.display import display
from utilities_kml import update_geo_data_with_clustering
sns.set()
def generate_cluster_table(visits_of_interest: pd.DataFrame,
geo_data: pd.DataFrame,
parameters: Parameters,
quiet: bool = True) -> Tuple[pd.DataFrame,
Optional[pd.DataFrame],
Optional[pd.DataFrame]]:
# https://levelup.gitconnected.com/clustering-gps-co-ordinates-forming-regions-4f50caa7e4a1
# Keep in mind this is non-deterministic, so clusters and cluster labels can change
# If an ndarray is passed, it should be of shape (n_clusters, n_features)
# and gives the initial centers.
reference_sector_names = parameters.parameters.get('ReferenceSectorNames', None)
if reference_sector_names == '':
reference_sector_names = None
reference_sector_centers = parameters.parameters.get('ReferenceSectorCenters', None)
if reference_sector_centers == '':
reference_sector_centers = None
have_real_sectors = 'sector' in geo_data.type.values
# 18 is arbitrary threshold value for not constructing pseudo-sectors
if have_real_sectors or visits_of_interest.shape[0] < 18:
return geo_data, None, None
voi = visits_of_interest.copy()
# Remove rows where the Longitude and/or Latitude are null values
voi.dropna(axis=0, how='any', subset=['latitude', 'longitude'], inplace=True)
xdata = voi.loc[:, ['locId', 'latitude', 'longitude']]
cluster_size = 6 if reference_sector_names is None else len(reference_sector_names.split(','))
if not quiet:
print(f'cluster_size: {cluster_size}')
plot_elbow_curve(visits_of_interest)
kmeans = KMeans(n_clusters=cluster_size, init='k-means++')
kmeans.fit(xdata[xdata.columns[1:3]]) # Compute k-means clustering.
xdata['cluster_label'] = kmeans.fit_predict(xdata[xdata.columns[1:3]])
# labels = kmeans.predict(xdata[xdata.columns[1:3]]) # Labels of each point
centers = kmeans.cluster_centers_ # Coordinates of cluster centers.
if reference_sector_names is None or reference_sector_centers is None:
obscounts = xdata.cluster_label.value_counts().to_dict()
print(f'Counts for each observation cluster: {obscounts}')
if reference_sector_names is None:
reference_sector_names = [f'Sector{ix}' for ix in range(cluster_size)]
else:
reference_sector_names = reference_sector_names.split(',')
if reference_sector_centers is not None:
reference_sector_centers = unpack_reference_sector_centers(reference_sector_centers)
if reference_sector_centers is None:
reference_sector_centers = [(x, y) for x, y in centers]
sc_str = ','.join([str(z) for z in reference_sector_centers])
print(f'\nPossible generated sector centers: {sc_str}')
sector_info = {k: v for k, v in zip(reference_sector_names, reference_sector_centers)}
# print(sector_info)
cluster_table = deduce_sector_names(xdata, centers, sector_info, quiet)
centers_df = convert_centers_to_dataframe(centers, cluster_table)
if not cluster_table.empty:
geo_data = update_geo_data_with_clustering(geo_data, cluster_table, centers_df)
return geo_data, cluster_table, centers_df
def convert_centers_to_dataframe(centers, cluster_table) -> pd.DataFrame:
xcenters_df = cluster_table.copy().drop_duplicates(['cluster_label']).sort_values(
by=['cluster_label']).reset_index(drop=True)
# display(xcenters_df)
ccdf = pd.DataFrame(centers, columns=['latitude', 'longitude'])
xcenters_df.latitude = ccdf.latitude
xcenters_df.longitude = ccdf.longitude
xcenters_df.drop(['locId'], axis=1, inplace=True)
return xcenters_df
def deduce_sector_names(cluster_table: pd.DataFrame,
cluster_centers: pd.DataFrame,
sector_info,
quiet: bool = True
) -> pd.DataFrame:
# Deduce Sector Names
# Do this if no 'sector' in types column of geo_data
# make type 'generated-sector'
# ref build_location_meta
# https://towardsdatascience.com/finding-distant-pairs-in-python-with-pandas-fa02df50d14b
fixed_columns = ['Name', 'latitude', 'longitude', 'cluster_label', 'coordinates']
# print('deduce_sector_names cluster_centers')
# display(cluster_centers)
if not quiet:
print(f'sector_info: {sector_info}')
centers = []
for ix, cluster in enumerate(cluster_centers):
lat, lng = cluster
row = {'Name': f'C{ix}', 'latitude': lat, 'longitude': lng, 'cluster_label': ix}
centers.append(row)
centers_df = pd.DataFrame(centers)
centers_df['coordinates'] = [(x, y) for x, y in zip(centers_df.latitude, centers_df.longitude)]
for sname, rcoords in sector_info.items():
centers_df[sname] = centers_df.coordinates.apply(lambda cc: round(distance(cc, rcoords).m))
# Find index of row with the minimum distance
zmins = centers_df.iloc[:, len(fixed_columns):].idxmin(axis=0)
if not quiet:
print(f'deduce_sector_names centers_df zmins: {zmins.to_dict()}')
centers_df['TrueName'] = list(zmins.sort_values().index)
# display(centers_df)
sname = cluster_table.cluster_label.apply(lambda cl:
centers_df[
centers_df.cluster_label == cl].TrueName.values[
0])
cluster_table['GeoName'] = sname
return cluster_table
def unpack_reference_sector_centers(reference_sector_centers: str) -> \
Optional[List[Tuple[float, float]]]:
ursc = None
try:
rsc = reference_sector_centers.split('),')
xtuples = [xs + ')' for xs in rsc[:-1]]
xtuples.append(rsc[-1])
# SECURITY WARNING: eval
ursc = [eval(xs) for xs in xtuples]
except Exception as ee:
print(ee)
print(reference_sector_centers)
traceback.print_exc(file=sys.stdout)
return ursc
def plot_elbow_curve(visits_of_interest):
df = visits_of_interest
k_clusters = range(1, 10)
kmeans = [KMeans(n_clusters=i) for i in k_clusters]
y_axis = df[['latitude']]
# X_axis = df[['longitude']]
score = [kmeans[i].fit(y_axis).score(y_axis) for i in range(len(kmeans))]
# Visualize
plt.plot(k_clusters, score)
plt.xlabel('Number of Clusters')
plt.ylabel('Score')
plt.title('Elbow Curve')
plt.show()
```
#### File: automating-cbc/common/write_basic_spreadsheet.py
```python
from pathlib import Path
from typing import List
import pandas as pd
from utilities_excel import excel_columns, make_sheet_banded, add_workbook_formats, Xlformat
def write_basic_spreadsheet(df: pd.DataFrame,
fpath: Path,
column_widths: dict,
columns_to_center: List[str]):
if df.empty:
return None
df = df.fillna('')
default_column_width = 14 # if not specified in column_widths
xsheet_name = 'Basic'
with pd.ExcelWriter(fpath.as_posix(), engine='xlsxwriter') as writer:
df.to_excel(writer, index=False, sheet_name=xsheet_name)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets[xsheet_name]
xlfmts = add_workbook_formats(workbook)
# https://stackoverflow.com/questions/43991505/xlsxwriter-set-global-font-size
workbook.formats[0].set_font_size(14) # to make it readable when printed
# ----------------------------------------------
# Populate col_infos with formatting information
col_infos = {}
excel_letters = excel_columns()
for ix, col in enumerate(df.columns):
col_letter = excel_letters[ix]
col_info = {}
fmt = xlfmts[Xlformat.CENTER] if (
columns_to_center is not None and col in columns_to_center) else None
col_info['format'] = fmt
col_info['width'] = column_widths.get(col, default_column_width)
col_info['xl_col_letter'] = f'{col_letter}'
col_infos[col] = col_info
colspec = pd.DataFrame(col_infos).T
# Set the column widths and format.
# Set formats with e.g. 'C:C'
for col_num, col_info in colspec.iterrows():
xl_col_letter = col_info['xl_col_letter']
wid = col_info['width']
fmt = col_info['format']
worksheet.set_column(f'{xl_col_letter}:{xl_col_letter}', wid, fmt)
# Make the sheet banded
make_sheet_banded(worksheet, df)
worksheet.freeze_panes(1, 0) # Freeze the first row.
# Write the column headers with the defined format.
for col, col_info in colspec.iterrows():
col_num = list(colspec.index).index(col)
worksheet.write(0, col_num, col, xlfmts[Xlformat.HEADER])
```
#### File: automating-cbc/common/write_categorized_lines.py
```python
import string
from pathlib import Path
from typing import List
import pandas as pd
import webcolors
from utilities_excel import make_sheet_banded, add_workbook_formats, Xlformat
"""
Write out categorized_lines Excel file. This is used to debug the translations.
note: trying to write this code so it can be used for a generic debug Excel file.
Columns: Line Translation Translated Category
write_categorized_lines_spreadsheet(categorized_lines,
debug_path / 'categorized_lines.xlsx',
col_widths = [40, 40, 11, 16],
col_align = ['left', 'left', 'center', 'center'],
sheet_name = 'Categorized Lines',
)
"""
def write_categorized_lines_spreadsheet(df: pd.DataFrame, output_path: Path,
col_widths: List[int],
col_align: List[str],
sheet_name: str
):
if df.empty:
return None
sheet_names = banded_sheets = [sheet_name]
with pd.ExcelWriter(output_path.as_posix(), engine='xlsxwriter') as writer:
df.to_excel(writer, index=False, sheet_name=sheet_name)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
xlfmts = add_workbook_formats(workbook)
for sheet_num, sheet_name in enumerate(sheet_names):
worksheet = writer.sheets[sheet_name]
# Set the column width and format.
col_vals = df.columns.values
row_count = df.shape[0]
for ix, wid in enumerate(col_widths):
col_letter = string.ascii_uppercase[ix]
fmt = xlfmts[Xlformat.CENTER] if col_align[ix] == 'center' else None
worksheet.set_column(f'{col_letter}:{col_letter}', wid, fmt)
if sheet_name in banded_sheets:
make_sheet_banded(worksheet, df)
# ---------- Specific to categorized_lines ----------
colorize_category_column(df, workbook, worksheet)
# Write the column headers with the defined format.
for col_num, value in enumerate(col_vals):
worksheet.write(0, col_num, value, xlfmts[Xlformat.HEADER])
def ent_name_to_color(ent_name):
# Excel doesn't process linear-gradient colors
# "R" suffix is for reverse
# purplish = 'linear-gradient(90deg, #aa9cfc, #fc9ce7)' # original
# purplishR = 'linear-gradient(45deg, #fc9ce7, #aa9cfc)'
# yellowish = 'linear-gradient(90deg, #f9fc9c, #fac945)'
# greenish = 'linear-gradient(90deg, #cdfc9c, #5cfa45)'
# aquaish = 'linear-gradient(90deg, #9cfcea, #3cd3e7)'
# aquaishR = 'linear-gradient(45deg, #3cd3e7, #9cfcea)'
# fuchsiaish = 'linear-gradient(90deg, #fc9cde, #ff5aa4)'
purplish = '#aa9cfc' # original
yellowish = '#f9fc9c'
greenish = '#cdfc9c'
aquaish = '#9cfcea'
fuchsiaish = '#fc9cde'
if ent_name.startswith('COM'):
return purplish
if ent_name.startswith('SCI'):
return aquaish
if ent_name.startswith('ORD'):
return greenish
if ent_name.startswith('FAMCOM'):
return yellowish
if ent_name.startswith('FAMSCI'):
return fuchsiaish
return webcolors.name_to_hex('HotPink'.lower())
def colorize_category_column(df, workbook, worksheet):
ent_names = [
'COMDOMESTIC', 'COMFORM', 'COMHYBRID', 'COMINTERGRADE', 'COMISSF', 'COMSLASH',
'COMSPECIES', 'COMSPUH', 'FAMCOMDOMESTIC', 'FAMCOMFORM', 'FAMCOMHYBRID',
'FAMCOMINTERGRADE', 'FAMCOMISSF', 'FAMCOMSLASH', 'FAMCOMSPECIES', 'FAMCOMSPUH',
'FAMSCIDOMESTIC', 'FAMSCIFORM', 'FAMSCIHYBRID', 'FAMSCIINTERGRADE',
'FAMSCIISSF', 'FAMSCISLASH', 'FAMSCISPECIES', 'FAMSCISPUH', 'ORDDOMESTIC',
'ORDFORM', 'ORDHYBRID', 'ORDINTERGRADE', 'ORDISSF', 'ORDSLASH', 'ORDSPECIES'
'ORDSPUH', 'SCIDOMESTIC',
'SCIFORM', 'SCIHYBRID', 'SCIINTERGRADE', 'SCIISSF',
'SCISLASH', 'SCISPECIES', 'SCISPUH',
]
xl_last_data_row = df.shape[0] + 1 # plus 1 is because data starts at row 2
category_idx = list(df.columns).index('Category')
letter = string.ascii_uppercase[category_idx]
category_cells = f'{letter}2:{letter}{xl_last_data_row}'
for ent_name in ent_names:
# Could update to a "startswith" criteria; see ent_name_to_color
category_criteria = f'=EXACT({category_cells},"{ent_name}")'
# print(category_criteria)
category_color = ent_name_to_color(ent_name)
category_format = workbook.add_format(
{'bg_color': category_color}) # , 'font_color': '#006100'
worksheet.conditional_format(category_cells,
{'type': 'formula',
'criteria': category_criteria,
'format': category_format,
'stop_if_true': True})
```
#### File: automating-cbc/common/write_final_checklist.py
```python
import sys
import traceback
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Optional
import numpy as np
import pandas as pd
from IPython.display import display
from utilities_excel import add_workbook_formats, choose_format_accent, excel_columns, \
Xlformat, make_sheet_banded
# TWO_COL_SPLIT_ROWS = 80 # 59 # About 59 rows fit on normal Excel page for printing
MAX_SPECIES_ALTERNATIVES = 6
# This number seems to vary a lot; not sure why. I has been 59 and 80 previously
EXCEL_ROWS_PER_PRINTED_PAGE = 95
def unfill_species_group(local_checklist):
# Keep for eventual printing
# https://stackoverflow.com/questions/46479437/perform-operation-opposite-to-pandas-ffill
zlocal_checklist = local_checklist.copy()
ffinv = lambda s: s.mask(s == s.shift())
zlocal_checklist = zlocal_checklist.assign(Group=ffinv(zlocal_checklist.Group)).fillna('')
return zlocal_checklist
# Additional args for write_final_checklist_spreadsheet:
# - cols_to_hide: default = ['Group', 'R', 'TaxonOrder']
# - cols_to_highlight: ['Total']
# - cols_to_drop:
# This will also apply to the '_' versions of these cols (i.e. the second column)
def transform_checklist_into_two_columns(checklist,
excel_rows_per_printed_page=EXCEL_ROWS_PER_PRINTED_PAGE):
page_breaks = None
# # Do some transformations on the incoming dataframe
# col_subset = [cc for cc in ['Group', 'CommonName', 'Rare', 'Total',
# 'Ad', 'Im', 'TaxonOrder', 'D',
# 'Adult', 'Immature', 'W-morph', 'B-Morph', 'Difficulty'] if
# cc in checklist.columns]
preferred_order = ['Group', 'CommonName', 'Rare', 'D', 'Total', 'Ad', 'Im',
'TaxonOrder', 'Category', 'Difficulty',
'Adult', 'Immature', 'W-morph', 'B-Morph', 'CountSpecial']
col_subset = [col for col in preferred_order if col in checklist.columns]
checklist = unfill_species_group(checklist.copy()[col_subset])
# Rename columns
# checklist.columns = ['Group', 'CommonName', 'R', 'Total', 'TaxonOrder']
checklist.Group = checklist.Group.apply(lambda xs: xs.upper())
# We can fit 59 species per column, roundup(176/59) gives us 4
checklist_rows = checklist.shape[0]
# There is a closed-form solution to this, but this works
rpp = excel_rows_per_printed_page # Might vary, but this is what Excel splits it at
rpp_bin_tuples = [(1, rpp), (rpp, 2 * rpp)]
top = int(np.round(1 + (checklist_rows / rpp)))
for lhs in list(range(2, top, 2)):
rpp_bin_tuples.append((lhs * rpp, (lhs + 2) * rpp))
rpp_bins = pd.IntervalIndex.from_tuples(rpp_bin_tuples)
num_splits = 2 * rpp_bins.get_loc(checklist_rows)
# print(f'Rows per page: {rpp}')
# print(f'RPP Bins: {rpp_bins}')
# print(TWO_COL_SPLIT_ROWS, rpp, top, rpp_bin_tuples)
if num_splits == 0:
# Nothing to do
checklist2col = checklist.copy()
else:
# Unfortunately, array_split makes equal chunks, which are too small
# dfs = np.array_split(checklist, num_splits, axis=0)
dfs = []
for start in range(0, checklist.shape[0], excel_rows_per_printed_page):
df = checklist.iloc[start:start + excel_rows_per_printed_page].copy()
dfs.append(df)
# Get the last non-empty group to supply to next df
previous_group = None
for ix, _ in enumerate(dfs):
# print(dfs[ix].shape)
if previous_group:
dfs[ix].reset_index(drop=True, inplace=True)
if dfs[ix].loc[0, 'Group'] == '':
dfs[ix].loc[0, 'Group'] = previous_group + ' (cont.)'
last_taxon_order = dfs[ix].iloc[-1]['TaxonOrder']
blank_row = pd.Series([''] * dfs[ix].shape[1], index=dfs[ix].columns)
blank_row['TaxonOrder'] = last_taxon_order + 0.1
dfs[ix] = dfs[ix].append(blank_row, ignore_index=True)
# Get the last non-empty group to supply to next df
groups = dfs[ix]['Group']
previous_group = [x for x in groups if x != ''][-1]
# print(previous_group)
df_pages = []
for ix in range(0, len(dfs), 2):
# reset_index(drop=True) not needed for ix==0, but easier to just do it
if ix + 1 < len(dfs):
df_page = pd.concat(
[dfs[ix].reset_index(drop=True), dfs[ix + 1].reset_index(drop=True)], axis=1,
ignore_index=True)
else:
df_empty = pd.DataFrame(columns=dfs[ix].columns,
index=range(dfs[ix].shape[0])).fillna('')
df_empty.Total = ''
df_empty.TaxonOrder = 0
df_page = pd.concat([dfs[ix].reset_index(drop=True), df_empty], axis=1,
ignore_index=True)
df_pages.append(df_page)
checklist2col = pd.concat(df_pages, axis=0, ignore_index=True)
zcol = pd.Series(checklist.columns)
zcols = list(zcol) + list(zcol.apply(lambda xs: xs + ' '))
checklist2col.columns = zcols
# page_breaks = [43, 86, 129]
return checklist2col, None
def recombine_transformed_checklist(checklist, taxonomy):
# Undo transform_checklist_into_two_columns
columns = checklist.columns
if (len(columns) % 2) != 0:
return checklist
# Double check that it was made by our function transform_checklist_into_two_columns
hwp = int(len(columns) / 2)
first_half_cols = list(columns[0:hwp])
second_half_cols = list(columns[hwp:])
if not (first_half_cols == [xs.strip() for xs in second_half_cols]):
return checklist
top_half = checklist[first_half_cols]
bottom_half = checklist[second_half_cols]
bottom_half.columns = top_half.columns
# print(top_half.shape, bottom_half.shape)
combined_checklist = top_half.append(bottom_half).reset_index(drop=True)
# Rows with a TaxonOrder of 99999 are an artifact of making two columns
mask_blank = (combined_checklist.CommonName.astype(str) == '')
# display(combined_checklist[mask_blank])
combined_checklist.drop(combined_checklist[mask_blank].index, inplace=True) # mask_99999 |
combined_checklist = combined_checklist.sort_values(by=['TaxonOrder']).reset_index(drop=True)
species_groups = []
for cn in combined_checklist.CommonName:
common_name, taxon_order, species_group, nacc_sort_order = taxonomy.find_local_name(cn)
species_groups.append(species_group)
combined_checklist.Group = species_groups
# Fix total column, may be blanks instead of zeros
totals = [(0 if xx == '' else xx) for xx in combined_checklist.Total]
combined_checklist.Total = totals
return combined_checklist
# Additional args for write_final_checklist_spreadsheet:
# - cols_to_hide: default = ['Group', 'R', 'TaxonOrder']
# - cols_to_highlight: ['Total']
# - cols_to_drop:
# This will also apply to the '_' versions of these cols (i.e. the second column)
# format_rare = workbook.add_format({'bold': True}) # 'bg_color': '#FFC7CE',
def format_col_if_other_col(checklist, worksheet,
cols_to_format: List[str],
condition_cols: List[str],
xformat,
to_match: str = 'X'):
# Make CommonName background yellow if CountSpecial column is set
# Assumes format has been added to workbook already
# condition_col is the "other col"
xl_last_data_row = checklist.shape[0] + 1 # plus 1 is because data starts at row 2
# Add variants with blanks to handle two column layouts (Double)
# For example, both 'Adult' and 'Adult ' could be in columns
cols_to_format.extend([xs + ' ' for xs in cols_to_format])
condition_cols.extend([xs + ' ' for xs in condition_cols])
cols_to_format_idxs = [idx for idx, xs in enumerate(checklist.columns) if xs in cols_to_format]
# What column do we look for an 'X' in? These are the condition columns
x_cols_idxs = [idx for idx, xs in enumerate(checklist.columns) if xs in condition_cols]
col_letters = excel_columns()
for cn_idx, cond_idx in zip(cols_to_format_idxs, x_cols_idxs):
col2fmt_letter = col_letters[cn_idx]
cond_col_letter = col_letters[cond_idx]
to_format_cells = f'{col2fmt_letter}2:{col2fmt_letter}{xl_last_data_row}'
criteria_cells = f'{cond_col_letter}2'
criteria = f'=EXACT({criteria_cells}, {to_match})'
# print(f'rarity_criteria: {rarity_criteria}')
worksheet.conditional_format(to_format_cells,
{'type': 'formula', 'criteria': criteria,
'format': xformat})
def write_final_checklist_spreadsheet(checklist, checklist_path: Path,
parameters: dict,
additional_sheets: Optional[List[dict]],
cols_to_hide: list = None,
cols_to_highlight: list = None,
header_cell_groups: List[str] = None
):
# updated_checklist is the filled-in local_checklist
# It may be wrapped to a two column (printing) format
if cols_to_highlight is None:
cols_to_highlight = ['Total']
if cols_to_hide is None:
cols_to_hide = ['Group', 'R', 'TaxonOrder']
if checklist.empty:
return None
checklist = checklist.copy()
xsheet_name = 'Final Checklist'
# Columns
# Group, CommonName, R, Total, TaxonOrder, Group_, CommonName_, R_, Total_, TaxonOrder_
# A B C D E F G H I J
real_cols_to_hide = [x for x in checklist.columns if
x.rstrip() in cols_to_hide] if cols_to_hide else []
real_cols_to_highlight = [x for x in checklist.columns if x.rstrip() in cols_to_highlight] \
if cols_to_highlight else []
cols_to_center = ['R', 'Total', 'TaxonOrder', 'Rare', 'Category', 'NACC_SORT_ORDER',
'ABA_SORT_ORDER']
stripped_widths = {'Group': 20, 'CommonName': 40, 'R': 5, 'Total': 7, 'TaxonOrder': 8,
'LocalName': 35, 'may_need_writeup': 35, 'Rare': 10,
'D': 3, 'Adult': 6, 'Immature': 6, 'W-morph': 6, 'B-Morph': 6,
'Difficulty': 6, 'Adult/White': 11, 'Immature/Blue': 11,
'Ad': 3, 'Im': 3, 'CountSpecial': 3,
'Category': 10, 'NACC_SORT_ORDER': 8, 'ABA_SORT_ORDER': 8}
xl_last_data_row = checklist.shape[0] + 1 # plus 1 is because data starts at row 2
fill_values = {'Group': '', 'CommonName': '', 'Rare': '', 'TaxonOrder': 99999,
'Group ': '', 'CommonName ': '', 'Rare ': '', 'TaxonOrder ': 99999}
checklist = checklist.fillna(value=fill_values)
# Probably sector names
standard_cols_base = ['CommonName', 'LocalName', 'Group',
'Category', 'TaxonOrder', 'NACC_SORT_ORDER', 'ABA_SORT_ORDER', 'Total']
standard_cols = standard_cols_base.copy()
for col in standard_cols_base:
standard_cols.append(col + ' ')
non_standard_cols = [col for col in checklist.columns if col not in standard_cols]
for col in non_standard_cols:
cols_to_center.append(col)
if not stripped_widths.get(col, None):
stripped_widths[col] = 14
try:
intcols = [col for col in checklist.columns if col.startswith('Taxon')]
for col in intcols:
checklist[col] = pd.to_numeric(checklist[col], errors='coerce')
# checklist = checklist.astype({col: 'int32'}, errors='ignore')
except Exception as ee:
print(f'Failed to set type of column "{col}" to numeric', ee)
checklist.to_csv(checklist_path.parent / f'failure-checklist.csv', index=False)
unknown_idxs = checklist.index[checklist[col] == 'UNKNOWN']
display(checklist.loc[unknown_idxs])
traceback.print_exc(file=sys.stdout)
# pass
checklist.astype({'Total': str})
with pd.ExcelWriter(checklist_path.as_posix(), engine='xlsxwriter') as writer:
checklist.to_excel(writer, index=False, sheet_name=xsheet_name)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
xlfmts = add_workbook_formats(workbook)
# https://stackoverflow.com/questions/43991505/xlsxwriter-set-global-font-size
workbook.formats[0].set_font_size(14) # to make it readable when printed
# ----------------------------------------------
# Populate col_infos with formatting information
col_infos = {}
col_letters = excel_columns()
assert (len(checklist.columns) <= len(col_letters)) # 702
for ix, col in enumerate(checklist.columns):
stripped_col = col.rstrip()
col_letter = col_letters[ix]
col_info = {
'hide': col in real_cols_to_hide,
'highlight': col in real_cols_to_highlight,
'format': xlfmts[Xlformat.CENTER] if col in cols_to_center else None,
'width': stripped_widths.get(stripped_col, 10),
'xl_col_letter': f'{col_letter}'
}
col_infos[col] = col_info
colspec = pd.DataFrame(col_infos).T
# ----------------------------------------------
worksheet = writer.sheets[xsheet_name]
date_of_count = parameters['CountDate']
dtcount = datetime.strptime(date_of_count, '%Y-%m-%d')
dtcstr = dtcount.strftime("%d %b %Y")
yr = dtcount.strftime("%Y")
title = parameters.get('FinalChecklistTitle', '')
worksheet.set_header(f'&C&16&"Times New Roman,Regular"{title} {yr}')
region = parameters['CircleAbbrev']
party = parameters['CircleID']
footer_fmt = f'&C&12&"Times New Roman,Regular"'
worksheet.set_footer(
f'{footer_fmt}Region: {region} Party: {party} Date: {dtcstr}')
# print(f'parameters: {parameters}')
page_breaks = parameters.get('page_breaks', None)
if page_breaks:
# When splitting into 2 columns, the page breaks are known
print(f'page_breaks: {page_breaks}')
worksheet.set_h_pagebreaks(page_breaks)
else:
# https://xlsxwriter.readthedocs.io/page_setup.html
# A common requirement is to fit the printed output to n pages wide but
# have the height be as long as necessary
# print('fitting to 1 page wide')
worksheet.fit_to_pages(1, 0)
# Highlight numbers > 0 for species count
for ix, col_info in colspec[colspec.highlight].iterrows():
xl_col_letter = col_info['xl_col_letter']
v_total_cell_range = f'{xl_col_letter}2:{xl_col_letter}{xl_last_data_row}'
worksheet.conditional_format(v_total_cell_range,
{'type': 'cell',
'criteria': '>',
'value': 0,
'format': xlfmts[Xlformat.GREEN]})
excel_letters = excel_columns()
# Make CommonName bold if Rare column is set
cols_to_bold_idxs = [idx for idx, xs in enumerate(checklist.columns) if
xs.startswith('CommonName')]
rare_cols_idxs = [idx for idx, xs in enumerate(checklist.columns) if xs.startswith('Rare')]
for cn_idx, ra_idx in zip(cols_to_bold_idxs, rare_cols_idxs):
letter = excel_letters[cn_idx]
letter_rare = excel_letters[ra_idx]
format_rare = workbook.add_format({'bold': True}) # 'bg_color': '#FFC7CE',
rare_name_cells = f'{letter}2:{letter}{xl_last_data_row}'
rarity_criteria_cells = f'{letter_rare}2'
rarity_criteria = f'=EXACT({rarity_criteria_cells},"X")'
# print(f'rarity_criteria: {rarity_criteria}')
worksheet.conditional_format(rare_name_cells,
{'type': 'formula', 'criteria': rarity_criteria,
'format': format_rare})
# Make CommonName background yellow if CountSpecial column is set
format_col_if_other_col(checklist, worksheet, ['CommonName'], ['CountSpecial'],
xlfmts[Xlformat.COUNTSPECIAL])
# format_col_if_other_col(checklist, worksheet, col_to_format, condition_cols, xformat)
# Color the 'D' (Difficulty) column based on value in 'Difficulty' column
xformats = [xlfmts[idx] for idx in [Xlformat.EASY, Xlformat.MARGINAL, Xlformat.DIFFICULT]]
for to_match, xformat in zip(['E', 'M', 'D'], xformats):
format_col_if_other_col(checklist, worksheet, ['D'], ['Difficulty'], xformat, to_match)
# Color the 'Ad' (Adult) column based on value in 'Adult' column
format_col_if_other_col(checklist, worksheet, ['Ad', 'Im'], ['Adult', 'Immature'],
xlfmts[Xlformat.AGE], 'X')
# Highlight the 'Ad', 'Im' if non-zero values in 'W-morph', 'B-Morph'
# The 'Ad', 'Im' columns are overloaded here since there is no species overlap
format_col_if_other_col(checklist, worksheet, ['Ad', 'Im'], ['W-morph', 'B-Morph'],
xlfmts[Xlformat.MORPH], 'X')
# Italicize non-species
try:
if 'Category' in checklist.columns:
cols_to_italicize_idxs = [idx for idx, xs in enumerate(checklist.columns) if
xs.startswith('CommonName')]
category_cols_idxs = [idx for idx, xs in enumerate(checklist.columns) if
xs.startswith('Category')]
for cn_idx, ca_idx in zip(cols_to_italicize_idxs, category_cols_idxs):
letter = excel_letters[cn_idx]
letter_category = excel_letters[ca_idx]
common_name_cells = f'{letter}2:{letter}{xl_last_data_row}'
category_criteria_cells = f'{letter_category}2'
# category_criteria = f'=EXACT({category_criteria_cells},"slash")'
category_criteria = f'={category_criteria_cells}<>"species"'
# print(f'category_criteria: {category_criteria}')
worksheet.conditional_format(common_name_cells,
{'type': 'formula', 'criteria': category_criteria,
'format': xlfmts[Xlformat.ITALIC]})
except Exception as ee:
print(ee)
print(checklist.columns)
print(category_cols_idxs)
traceback.print_exc(file=sys.stdout)
raise
# rare_name_cells = f'G2:G{xl_last_data_row}'
# rarity_criteria = '=EXACT(H2,"X")'
# worksheet.conditional_format(rare_name_cells,
# {'type': 'formula', 'criteria': rarity_criteria, 'format': format_rare})
# Set the column width and format.
# Set formats with e.g. 'C:C'
for col_num, col_info in colspec.iterrows():
xl_col_letter = col_info['xl_col_letter']
wid = col_info['width']
fmt = col_info['format']
worksheet.set_column(f'{xl_col_letter}:{xl_col_letter}', wid, fmt)
# https://xlsxwriter.readthedocs.io/worksheet.html#set_column
for ix, col_info in colspec[colspec.hide].iterrows():
xl_col_letter = col_info['xl_col_letter']
worksheet.set_column(f'{xl_col_letter}:{xl_col_letter}', None, None, {'hidden': 1})
# Make the sheet banded
make_sheet_banded(worksheet, checklist)
# Set the width, and other properties of a row
# row (int) – The worksheet row (zero indexed).
# height (float) – The row height.
worksheet.set_row(0, 70, None, None)
worksheet.freeze_panes(1, 0) # Freeze the first row.
# header_cell_groups
if header_cell_groups is not None:
for ix, header_cell_group in enumerate(header_cell_groups):
category_criteria = f'=True'
# print(header_cell_group, ix, fmt)
worksheet.conditional_format(header_cell_group,
{'type': 'formula',
'criteria': category_criteria,
'format': choose_format_accent(xlfmts, ix)})
# Write the column headers with the defined format.
for col, col_info in colspec.iterrows():
# fmt = col_info['format']
col_num = list(colspec.index).index(col)
worksheet.write(0, col_num, col, xlfmts[Xlformat.HEADER])
# ***
if additional_sheets is not None:
for sheet_info in additional_sheets:
# print(sheet_info['sheet_name'])
df = sheet_info['data']
df.to_excel(writer, index=False, sheet_name=sheet_info['sheet_name'])
worksheet = writer.sheets[sheet_info['sheet_name']]
make_sheet_banded(worksheet, df)
center_cols = sheet_info['to_center']
for col, wid in sheet_info['widths'].items():
col_index = list(df.columns).index(col)
col_letter = excel_letters[col_index]
fmt = xlfmts[Xlformat.CENTER] if col in center_cols else None
worksheet.set_column(f'{col_letter}:{col_letter}', wid, fmt)
# Set the width, and other properties of a row
# row (int) – The worksheet row (zero indexed).
# height (float) – The row height.
# worksheet.set_row(0, 70, None, None)
worksheet.freeze_panes(1, 0) # Freeze the first row.
# Set the header format
worksheet.write_row(0, 0, list(df.columns), xlfmts[Xlformat.HEADER])
# Write out cells
def expand_group_rows(checklist: pd.DataFrame) -> pd.DataFrame:
# Move group to its own row before the species in its group
temp_checklist = checklist.copy() # local1
fill_values = {'Group': '', 'CommonName': '', 'Rare': '', 'TaxonOrder': 99999}
temp_checklist = temp_checklist.fillna(value=fill_values).sort_values(by=['TaxonOrder'])
# unfill_species_group utterly fails if not sorted by TaxonOrder
temp_checklist = unfill_species_group(temp_checklist).reset_index(drop=True)
group_indices = temp_checklist[temp_checklist.Group != ''].index
# ---------------------------------
expanded_rows = []
group_keep_cols = ['Group', 'TaxonOrder']
for ix, row in temp_checklist.iterrows():
if ix in group_indices:
# insert group row before current row
expanded_rows.append(row.copy()[group_keep_cols])
row['Group'] = ''
# Now add the current row with group blanked out
expanded_rows.append(row)
expanded_checklist = pd.DataFrame(expanded_rows).reset_index(drop=True).fillna('')
return expanded_checklist
def write_local_checklist_with_group(updated_checklist, output_file_path, parameters: dict):
# Together
local3_df = pd.DataFrame()
output_directory_path = output_file_path.parent
excel_rows_per_printed_page = parameters.get('ExcelRowsPerPrintedPage',
EXCEL_ROWS_PER_PRINTED_PAGE)
try:
local2_df = expand_group_rows(updated_checklist)
preferred_order = ['Group', 'CommonName', 'Rare', 'D', 'Total', 'Ad', 'Im',
'Category', 'TaxonOrder', 'NACC_SORT_ORDER', 'ABA_SORT_ORDER',
'Difficulty',
'Adult', 'Immature', 'W-morph', 'B-Morph', 'CountSpecial']
# ToDo: use filter()
newcols = [col for col in preferred_order if col in local2_df.columns]
local2_df['Total'] = '' # Since this is for printing
local3_df, page_breaks = transform_checklist_into_two_columns(local2_df[newcols],
excel_rows_per_printed_page)
fill_values = {'Group': '', 'CommonName': '', 'Rare': '', 'TaxonOrder': 99999,
'Group ': '', 'CommonName ': '', 'Rare ': '', 'TaxonOrder ': 99999}
local3_df = local3_df.fillna(value=fill_values)
# if page_breaks:
# parameters['page_breaks'] = page_breaks
cols_to_hide = ['Category', 'TaxonOrder', 'NACC_SORT_ORDER', 'ABA_SORT_ORDER', 'Rare',
'Adult', 'Immature', 'W-morph', 'B-Morph',
'Difficulty', 'CountSpecial']
write_final_checklist_spreadsheet(local3_df, output_file_path,
parameters,
additional_sheets=None,
cols_to_hide=cols_to_hide,
cols_to_highlight=None) # ['TOTAL']
except Exception as ee:
updated_checklist.to_csv(output_directory_path / f'failure-local1_df.csv', index=False)
if not local3_df.empty:
local3_df.to_csv(output_directory_path / f'failure-local3_df.csv', index=False)
print(f'Failed to write {output_file_path.as_posix()}, {ee}')
traceback.print_exc(file=sys.stdout)
def write_possible_translations_spreadsheet(translations_df, translations_xl_path):
if translations_df.empty:
return None
# LocalSpeciesName eBirdSpeciesName levd match_whole_line regex circle AltName1 Lev2 AltName2
col_widths = [50, 50, 12, 16, 12, 12, 30]
for ix in range(MAX_SPECIES_ALTERNATIVES - 1):
col_widths.append(30)
col_widths.append(10)
xsheet_name = 'Possible Translations'
sheet_names = banded_sheets = [xsheet_name]
center_cols = [col for col in translations_df.columns if col.startswith('lev')]
for col in ['match_whole_line', 'regex', 'circle']:
center_cols.append(col)
with pd.ExcelWriter(translations_xl_path, engine='xlsxwriter') as writer:
translations_df.to_excel(writer, index=False, sheet_name=xsheet_name)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
xlfmts = add_workbook_formats(workbook)
excel_letters = excel_columns()
for sheet_num, sheet_name in enumerate(sheet_names):
worksheet = writer.sheets[xsheet_name]
# Set the column width and format.
widths = col_widths
col_vals = translations_df.columns.values # df_columns[sheet_num].values
for ix, wid in enumerate(widths):
col_letter = excel_letters[ix]
fmt = xlfmts[Xlformat.CENTER] if col_vals[ix] in center_cols else None
worksheet.set_column(f'{col_letter}:{col_letter}', wid, fmt)
if sheet_name in banded_sheets:
make_sheet_banded(worksheet, translations_df)
# Write the column headers with the defined format.
for col_num, value in enumerate(col_vals):
worksheet.write(0, col_num, value, xlfmts[Xlformat.HEADER])
# -----------
def write_nlp_statistics(nlp_statistics, stats_path: Path):
if nlp_statistics.empty:
return None
nlp_statistics = nlp_statistics.copy()
xsheet_name = 'ParsePDF Statistics'
# Columns
# ['family', 'unknown', 'intersections', 'line_token_count', 'line', 'original_line', 'guess',
# 'levd', 'line_len', 'lev_len_pct', 'species_inferred', 'is_species_line',
# 'guess_correct', 'source']
cols_to_center = ['is_group', 'non_avian', 'intersections', 'line_token_count', 'levd',
'tx_line_len',
'species_inferred', 'is_species_line', 'guess_correct', 'source']
column_widths = {
'classification': 20, 'original_line': 45, 'transformed_line': 45, 'species': 45,
'closest_match': 45,
'is_group': 12, 'non_avian': 12, 'intersections': 12, 'line_token_count': 12,
'levd': 11, 'tx_line_len': 11, 'lev_len_pct': 11,
'species_inferred': 14, 'exact_match': 14, 'verified': 14, 'source': 14
}
numeric_cols = ['intersections', 'line_token_count', 'levd', 'line_len']
text_cols = ['classification', 'transformed_line', 'original_line', 'species',
'closest_match'] # force otherwise may interpret original_line as a formula
with pd.ExcelWriter(stats_path.as_posix(), engine='xlsxwriter') as writer:
nlp_statistics.to_excel(writer, index=False, sheet_name=xsheet_name)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
xlfmts = add_workbook_formats(workbook)
# ----------------------------------------------
# Populate col_infos with formatting information
col_infos = {}
excel_letters = excel_columns()
for ix, col in enumerate(nlp_statistics.columns):
col_letter = excel_letters[ix]
col_info = {}
# col_info['hide'] = col in real_cols_to_hide
# col_info['highlight'] = col in real_cols_to_highlight
if col in numeric_cols:
fmt = xlfmts[Xlformat.NUMERIC_CENTERED]
elif col == 'lev_len_pct':
fmt = xlfmts[Xlformat.PERCENTAGE]
elif col in text_cols:
fmt = xlfmts[Xlformat.TEXT]
else:
fmt = xlfmts[Xlformat.CENTER] if col in cols_to_center else None
col_info['format'] = fmt
col_info['width'] = column_widths.get(col, 10)
col_info['xl_col_letter'] = f'{col_letter}'
col_infos[col] = col_info
colspec = pd.DataFrame(col_infos).T
# ----------------------------------------------
worksheet = writer.sheets[xsheet_name]
title = 'NLP Statistics'
worksheet.set_header(f'&C&16&"Times New Roman,Regular"{title}')
# footer_fmt = f'&C&12&"Times New Roman,Regular"'
# worksheet.set_footer(f'{footer_fmt}Region: {region} Party: {party} Date: {dtcstr}')
# https://xlsxwriter.readthedocs.io/page_setup.html
# A common requirement is to fit the printed output to n pages wide but have the
# height be as long as necessary
# worksheet.fit_to_pages(1, 0)
# rare_name_cells = f'G2:G{xl_last_data_row}'
# rarity_criteria = '=EXACT(H2,"X")'
# worksheet.conditional_format(rare_name_cells,
# {'type': 'formula', 'criteria': rarity_criteria, 'format': format_rare})
# Set the column width and format.
# Set formats with e.g. 'C:C'
for col_num, col_info in colspec.iterrows():
xl_col_letter = col_info['xl_col_letter']
wid = col_info['width']
fmt = col_info['format']
worksheet.set_column(f'{xl_col_letter}:{xl_col_letter}', wid, fmt)
# https://xlsxwriter.readthedocs.io/worksheet.html#set_column
# for ix, col_info in colspec[colspec.hide].iterrows():
# xl_col_letter = col_info['xl_col_letter']
# worksheet.set_column(f'{xl_col_letter}:{xl_col_letter}', None, None, {'hidden': 1})
# Make the sheet banded
make_sheet_banded(worksheet, nlp_statistics)
# Write the column headers with the defined format.
for col, col_info in colspec.iterrows():
# fmt = col_info['format']
col_num = list(colspec.index).index(col)
worksheet.write(0, col_num, col, xlfmts[Xlformat.HEADER])
# Close the Pandas Excel writer and output the Excel file.
# writer.save()
def write_ground_truths(truths, out_path: Path):
if truths.empty:
return None
truths = truths.copy()
xsheet_name = 'Ground Truths'
# Columns
# name Category ABED-1 ABED-1v ABED-2 ABED-2v ABED-3 ABED-3v ABED-4 ABED-4v...
cols_to_center = truths.columns.drop('name').drop('Category') # everything else is centered
column_widths = {'name': 40, 'Category': 10}
for col in cols_to_center:
column_widths[col] = 5 if col.endswith('v') else 11
numeric_cols = cols_to_center
text_cols = ['name', 'Category'] # force otherwise may interpret original_line as a formula
xl_last_data_row = truths.shape[0] + 1 # plus 1 is because data starts at row 2
with pd.ExcelWriter(out_path.as_posix(), engine='xlsxwriter') as writer:
truths.to_excel(writer, index=False, sheet_name=xsheet_name)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
xlfmts = add_workbook_formats(workbook)
# ----------------------------------------------
# Populate col_infos with formatting information
col_infos = {}
col_letters = excel_columns()
for ix, col in enumerate(truths.columns):
col_letter = col_letters[ix]
col_info = {}
# col_info['hide'] = col in real_cols_to_hide
# col_info['highlight'] = col in real_cols_to_highlight
if col in numeric_cols:
fmt = xlfmts[Xlformat.NUMERIC_CENTERED]
elif col in text_cols:
fmt = xlfmts[Xlformat.TEXT]
else:
fmt = xlfmts[Xlformat.CENTER] if col in cols_to_center else None
col_info['format'] = fmt
col_info['width'] = column_widths.get(col, 10)
col_info['xl_col_letter'] = f'{col_letter}'
col_infos[col] = col_info
colspec = pd.DataFrame(col_infos).T
# ----------------------------------------------
worksheet = writer.sheets[xsheet_name]
title = 'Ground Truths'
worksheet.set_header(f'&C&16&"Times New Roman,Regular"{title}')
# footer_fmt = f'&C&12&"Times New Roman,Regular"'
# worksheet.set_footer(f'{footer_fmt}Region: {region} Party: {party} Date: {dtcstr}')
# https://xlsxwriter.readthedocs.io/page_setup.html
# A common requirement is to fit the printed output to n pages wide but
# have the height be as long as necessary
# worksheet.fit_to_pages(1, 0)
# Highlight numbers > 0 for species count
last_column_letter = col_letters[truths.shape[1] - 1]
v_total_cell_range = f'C2:{last_column_letter}{xl_last_data_row}'
worksheet.conditional_format(v_total_cell_range,
{'type': 'cell',
'criteria': 'equal to',
'value': True,
'format': xlfmts[Xlformat.GREEN]})
# Set the column width and format.
# Set formats with e.g. 'C:C'
for col_num, col_info in colspec.iterrows():
xl_col_letter = col_info['xl_col_letter']
wid = col_info['width']
fmt = col_info['format']
worksheet.set_column(f'{xl_col_letter}:{xl_col_letter}', wid, fmt)
# https://xlsxwriter.readthedocs.io/worksheet.html#set_column
# for ix, col_info in colspec[colspec.hide].iterrows():
# xl_col_letter = col_info['xl_col_letter']
# worksheet.set_column(f'{xl_col_letter}:{xl_col_letter}', None, None, {'hidden': 1})
# Make the sheet banded
make_sheet_banded(worksheet, truths)
# Write the column headers with the defined format.
for col, col_info in colspec.iterrows():
# fmt = col_info['format']
col_num = list(colspec.index).index(col)
worksheet.write(0, col_num, col, xlfmts[Xlformat.HEADER])
def sheet_info_for_party_efforts(df: pd.DataFrame) -> dict:
# ['Party Lead', 'Duration (Hrs)', 'Distance (mi)']
column_widths = {
'Party Lead': 25,
'Duration (Hrs)': 10,
'Distance (mi)': 10
}
columns_to_center = ['Duration (Hrs)', 'Distance (mi)']
sheet_info = {
'sheet_name': 'Individual Efforts',
'data': df,
'widths': column_widths,
'to_center': columns_to_center,
'to_hide': None
}
return sheet_info
def sheet_info_for_party_details(df: pd.DataFrame) -> dict:
# ['locId', 'subId', 'Total', 'Name', 'Observers', 'sharing', 'groupId',
# 'location_group', 'Date/Time', 'url', 'LocationName', 'Duration (Hrs)',
# 'Distance (mi)', 'Distance (km)', 'comments']
column_widths = {'locId': 10, 'subId': 10, 'Total': 10, 'Name': 25, 'Observers': 10,
'sharing': 10, 'groupId': 10, 'location_group': 20, 'Date/Time': 20, 'url': 28,
'LocationName': 25, 'Duration (Hrs)': 10, 'Distance (mi)': 10,
'comments': 60}
columns_to_center = ['locId', 'subId', 'groupId', 'Date/Time', 'Total', 'Observers',
'effortDistanceKm', 'durationHrs', 'sharing', 'location_group']
sheet_info = {
'sheet_name': 'Individual Details',
'data': df,
'widths': column_widths,
'to_center': columns_to_center,
'to_hide': None
}
return sheet_info
def sheet_info_for_rarities(df: pd.DataFrame) -> dict:
# ['locId', 'subId', 'Name', 'obsDt', 'Total', 'CommonName', 'effortDistanceKm',
# 'effortDistanceEnteredUnit', 'durationHrs', 'Observers', 'comments', 'Reason', 'Where']
column_widths = {
'locId': 10, 'subId': 10, 'Name': 25, 'obsDt': 15, 'Total': 8, 'CommonName': 20,
'DistanceMi': 8, 'durationHrs': 10,
'Observers': 8, 'comments': 60, 'Reason': 10, 'Where': 60
}
columns_to_center = ['locId', 'subId', 'obsDt', 'Total', 'Observers',
'DistanceMi', 'durationHrs', 'Reason']
sheet_info = {
'sheet_name': 'Rarities',
'data': df,
'widths': column_widths,
'to_center': columns_to_center,
'to_hide': None
}
return sheet_info
def sheet_info_for_filers(df: pd.DataFrame) -> dict:
# ['locId', 'Name', 'LocationName']
column_widths = {
'locId': 10, 'Name': 25, 'LocationName': 60
}
columns_to_center = ['locId']
sheet_info = {
'sheet_name': 'Filers',
'data': df,
'widths': column_widths,
'to_center': columns_to_center,
'to_hide': None
}
return sheet_info
def sheet_info_for_locations(df: pd.DataFrame) -> dict:
# ['locId', 'Name', 'LocationName']
column_widths = {
'locId': 10, 'Name': 25, 'LocationName': 60
}
columns_to_center = ['locId']
sheet_info = {
'sheet_name': 'Filers',
'data': df,
'widths': column_widths,
'to_center': columns_to_center,
'to_hide': None
}
return sheet_info
```
#### File: automating-cbc/taxonomy/taxonomy_aba.py
```python
import re
from pathlib import Path
import numpy as np
import pandas as pd
# Base Path
from singleton_decorator import singleton
from typing import Optional
"""
--------------------------------------------------------------------------------
ABA 8.0.7 https://www.aba.org/aba-checklist/ January 2021
ABA_Checklist-8.0.7.csv
--------------------------------------------------------------------------------
"""
@singleton
class TaxonomyABA(object):
""" Taxonomy from ABA
Attributes:
"""
def __init__(self):
taxonomy_base_path = Path(__file__).parent.absolute()
self.taxonomy_reference_path = taxonomy_base_path / 'reference'
self.INVALID_ABA_SORT_ORDER = 999999.1
self._aba_taxonomy_path = self.taxonomy_reference_path / 'ABA_Checklist-8.0.7.csv'
xheader = None
self.aba_taxonomy = pd.read_csv(self._aba_taxonomy_path, dtype=str, header=xheader,
low_memory=False, skiprows=3).fillna('')
self.aba_taxonomy.columns = ['aba_'+xs for xs in ['Group', 'common_name', 'nom_commun',
'scientific_name', 'code4', 'v5']]
# Get rid of all the "Group" rows
self.aba_taxonomy[self.aba_taxonomy.aba_common_name != ''].reset_index(drop=True)
self.aba_taxonomy.drop(columns=['aba_Group', 'aba_v5'], inplace=True)
# Add ordering column
# AOS/AOU ordering seems to be the literal order in the checklist, not the id
self.aba_taxonomy['ABA_SORT_ORDER'] = list(self.aba_taxonomy.index.astype(int))
# add lower case column for faster lookups
self.aba_taxonomy['aba_common_name_lower'] = \
self.aba_taxonomy.aba_common_name.apply(lambda xs: xs.lower())
def get_taxonomy(self) -> pd.DataFrame:
return self.aba_taxonomy
def find_local_name_row(self, local_name) -> Optional[pd.Series]:
# Look for exact matches and return a single record
if not local_name:
return None
record = None
try:
local_name_lower = local_name.lower()
mask = self.aba_taxonomy.nacc_common_name_lower == local_name_lower
records = self.aba_taxonomy[mask]
# there should only be one
record = records.iloc[0]
except IndexError:
pass
return record
```
#### File: automating-cbc/taxonomy/taxonomy.py
```python
import sys
import traceback
from pathlib import Path
from typing import Tuple, Optional, Any, List
from IPython.display import display
# https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html
from pandas.api.types import CategoricalDtype
import numpy as np
import pandas as pd
from singleton_decorator import singleton
import numbers
from ebird_extras import EBirdExtra
from taxonomy_clements import TaxonomyClements
from taxonomy_ioc import TaxonomyIOC
from taxonomy_nacc import TaxonomyNACC
from taxonomy_aba import TaxonomyABA
# Base Path
"""
https://ebird.org/science/the-ebird-taxonomy
Spuh: Genus or identification at broad level, e.g., swan sp. Cygnus sp.
Slash: Identification to Species-pair, e.g., Tundra/Trumpeter Swan Cygnus
columbianus/buccinator
Species: e.g., Tundra Swan Cygnus columbianus
ISSF or Identifiable Sub-specific Group: Identifiable subspecies or group of
subspecies, e.g., Tundra Swan (Bewick’s) Cygnus columbianus bewickii or Tundra
Swan (Whistling) Cygnus columbianus columbianus
Hybrid: Hybrid between two species, e.g., Tundra x Trumpeter Swan (hybrid)
Intergrade: Hybrid between two ISSF (subspecies or subspecies groups), e.g.,
Tundra Swan (Whistling x Bewick’s) Cygnus columbianus columbianus x bewickii
Domestic: Distinctly-plumaged domesticated varieties that may be free-flying
(these do not count on personal lists) e.g., Mallard (Domestic type)
Form: Miscellaneous other taxa, including recently-described species yet to be
accepted or distinctive forms that are not universally accepted, e.g.,
Red-tailed Hawk (abieticola), Upland Goose (Bar-breasted)
https://www.birds.cornell.edu/clementschecklist/
Note:
tt[(tt.TAXON_ORDER != tt.Clem_Seq) & (tt.Clem_Seq != '')] is empty, i.e. for records
with Clem_Seq, it matches TAXON_ORDER
"""
"""
Notes on reference sources
The base used for the taxonomy is the eBird/Clements taxonomy, for three main reasons.
- It will match up with species reported through eBird
- It has the taxon_order field for sorting
- It contains hybrids and SPUH entries
--------------------------------------------------------------------------------
Suggested citation for the current version of the Clements Checklist, including the August 2019
Updates and Corrections:
<NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. 2019. The eBird/Clements Checklist of Birds of the World: v2019. Downloaded from
https://www.birds.cornell.edu/clementschecklist/download/
https://www.birds.cornell.edu/clementschecklist/download/
Three checklists are available. The first is the 2019 edition of the Clements Checklist (Clements
Checklist v2019); the second is the 2019 edition of the eBird taxonomy (eBird v2019); and the third
is the “master” or integrated checklist, which includes all entries in both the Clements Checklist
and the eBird taxonomy.
clements_base = 'https://www.birds.cornell.edu/clementschecklist/wp-content/uploads/2019/08'
Clements Checklist v2019:
{clements_base}/Clements-Checklist-v2019-August-2019.xlsx
eBird Taxonomy v2019:
{clements_base}/eBird_Taxonomy_v2019.xlsx
eBird/Clements Checklist v2019:
{clements_base}/eBird-Clements-v2019-integrated-checklist-August-2019.xlsx
--------------------------------------------------------------------------------
https://www.worldbirdnames.org/new/
<NAME>, <NAME> & <NAME> (Eds). 2020. IOC World Bird List (v10.2).
doi : 10.14344/IOC.ML.10.2.
https://www.worldbirdnames.org/new/ioc-lists/master-list-2/
Comparison of IOC 10.2 with Clements 2019 (<NAME>)
http://www.worldbirdnames.org/IOC%20v10-2%20v%20Clements%202019.xlsx
This is the one we use for IOC as it has both Clements and IOC sequence numbers. This is
also the only one on this site with "tidy" data suitable for data science
Master List
http://www.worldbirdnames.org/master_ioc_list_v10.2.xlsx
Cross reference between IOC 10.2 and Clements v2019, HBW/BL(vol1, vol2), H&M4.1, HBW, Peters,
TiF 3.10, HBW/BirdLife v4 (2019), S&M '93, IOC10.1. Simplified version.
http://www.worldbirdnames.org/IOC_10.2_vs_other_lists.xlsx
http://www.worldbirdnames.org/IOC_Names_File_Plus-10.2_full_ssp.xlsx
--------------------------------------------------------------------------------
OTHERS
The Howard & Moore Complete Checklist of the Birds of the World, 4th Edition
The Trust for Avian Systematics
https://www.aviansystematics.org/index
Not used; not available in electronic form
Zoological Nomenclature Resource
http://www.zoonomen.net
<NAME>, M.D.
--------------------------------------------------------------------------------
"""
MISSING_TAXON_ORDER = 0 # or 99999, depends where we want those to sort
@singleton
class Taxonomy(object):
"""Combined Taxonomy
Attributes:
"""
def __init__(self, cache_path: Path = None, ebird_extra: EBirdExtra = None):
self._cache_path = cache_path
self._ebird_extra = ebird_extra
taxonomy_base_path = Path(__file__).parent.absolute()
self.taxonomy_reference_path = taxonomy_base_path / 'reference'
# Fill these lazily
self.taxonomy = None
self._taxonomy_clements = None # TaxonomyClements().get_taxonomy()
self._taxonomy_ioc = None # TaxonomyIOC().get_taxonomy()
self._taxonomy_nacc = None # TaxonomyNACC().get_taxonomy()
self._taxonomy_aba = None
self._taxonomy_ebird = None
self.INVALID_NACC_SORT_ORDER = 999999.1 # set again from NACC
self.taxonomy = self.get_taxonomy_cached()
def fix_up_merged_taxonomy(self):
self.taxonomy['taxonOrder'] = self.taxonomy['taxonOrder'].fillna(MISSING_TAXON_ORDER)
self.taxonomy['extinct'] = self.taxonomy['extinct'].fillna(False)
self.taxonomy['extinctYear'] = self.taxonomy['extinctYear'].replace(0.0, '')
# self.INVALID_NACC_SORT_ORDER = self._taxonomy_nacc.INVALID_NACC_SORT_ORDER
# Renames
try:
self.taxonomy.rename(columns={'category': 'Category'}, inplace=True)
except AttributeError:
pass
# species should be first, spuh last, the others don't matter
ordered_categories = ['species', 'issf', 'slash', 'hybrid', 'form',
'intergrade', 'domestic', 'spuh']
cat_type = CategoricalDtype(categories=ordered_categories, ordered=True)
# Writing to CSV will strip categorical information, so need to add after reading cache
self.taxonomy.Category = self.taxonomy.Category.astype(cat_type)
# self.taxonomy.NACC_SORT_ORDER.fillna(0, inplace=True)
xdtypes = {
'sciName': str, 'comName': str, 'speciesCode': str, 'Category': str,
'taxonOrder': int,
'bandingCodes': str, 'comNameCodes': str, 'sciNameCodes': str, 'order': str,
'familyComName': str, 'familySciName': str, 'reportAs': str, 'extinct': bool,
'extinctYear': str,
'comNameLower': str, 'sciNameLower': str, 'TAXON_ORDER': int, 'CATEGORY': str,
'SPECIES_CODE': str, 'PRIMARY_COM_NAME': str, 'SCI_NAME': str, 'ORDER1': str,
'FAMILY': str,
'SPECIES_GROUP': str, 'REPORT_AS': str, 'ioc_seq': int,
'ioc_scientific_name': str,
'ioc_common_name': str, 'ioc_clements_seq': int,
'ioc_clements_scientific_name': str,
'ioc_clements_common_name': str, 'ioc_range': str, 'NACC_SORT_ORDER': float,
'ABA_SORT_ORDER': float, 'nacc_id': str,
'nacc_avibase_id': str, 'nacc_rank': str, 'nacc_common_name': str, 'nacc_order': str,
'nacc_family': str, 'nacc_subfamily': str, 'nacc_genus': str, 'nacc_species': str,
'nacc_common_name_lower': str
}
self.taxonomy.ioc_seq = self.taxonomy.ioc_seq.replace('', 0)
self.taxonomy.ioc_clements_seq = self.taxonomy.ioc_clements_seq.replace('', 0)
self.taxonomy.NACC_SORT_ORDER = self.taxonomy.NACC_SORT_ORDER.replace('', 0.0)
self.taxonomy.ABA_SORT_ORDER = self.taxonomy.ABA_SORT_ORDER.replace('', 0.0)
self.taxonomy = self.taxonomy.astype(dtype=xdtypes)
# Fix up any remaining NA values
colnames_numerics_only = self.taxonomy.select_dtypes(include=np.number).columns.tolist()
if 'Category' in colnames_numerics_only:
colnames_numerics_only.remove('Category')
almost_all_cols = list(self.taxonomy.columns)
almost_all_cols.remove('Category')
fill_values = {col: 0 if col in colnames_numerics_only else ''
for col in almost_all_cols}
self.taxonomy.fillna(fill_values, inplace=True)
#
# for col in colnames_numerics_only:
# self.taxonomy[col] = self.taxonomy[col].astype(int)
# for col in self.taxonomy.columns:
# newtype = xdtypes.get(col, str)
# self.taxonomy[col] = self.taxonomy[col].astype(newtype)
def get_taxonomy_cached(self) -> pd.DataFrame:
cached_taxonomy_path = self._cache_path / 'taxonomy_full.csv'
try:
if cached_taxonomy_path.is_file():
self.taxonomy = pd.read_csv(cached_taxonomy_path,
index_col=False, low_memory=False)
self.fix_up_merged_taxonomy()
else:
print(f'Creating full taxonomy cache...')
# EBird API taxonomy is the base
self._taxonomy_ebird = self.get_taxonomy_api_cached()
self.taxonomy = self._taxonomy_ebird.copy()
# print(f'ebird: {self.taxonomy.shape}')
self._taxonomy_clements = TaxonomyClements().get_taxonomy()
self._taxonomy_ioc = TaxonomyIOC().get_taxonomy()
self._taxonomy_nacc = TaxonomyNACC().get_taxonomy()
self._taxonomy_aba = TaxonomyABA().get_taxonomy()
# Now merge in Clements, IOC and NACC checklists
self.taxonomy = self.merge_clements_into_taxonomy()
# print(f'clements: {self.taxonomy.shape}')
self.taxonomy = self.merge_ioc_into_taxonomy()
# print(f'ioc: {self.taxonomy.shape}')
self.taxonomy = self.merge_nacc_into_taxonomy()
# print(f'nacc: {self.taxonomy.shape}')
self.taxonomy = self.merge_aba_into_taxonomy()
self.fix_up_merged_taxonomy()
# print(f'fixu: {self.taxonomy.shape}')
print('Adding synthesized NACC sort orders')
self.add_synthesized_sort_orders('NACC_SORT_ORDER')
print('Adding synthesized ABA sort orders')
self.add_synthesized_sort_orders('ABA_SORT_ORDER')
self.taxonomy.to_csv(cached_taxonomy_path, index=False)
print(f'Written to cache: {self.taxonomy.shape[0]} records')
except Exception as ee:
print(ee)
traceback.print_exc(file=sys.stdout)
# Fill in code4 column
# self.fill_code4s()
# print(f'exit: {self.taxonomy.shape}')
return self.taxonomy
def fill_code4s(self):
code4s = []
for ix, row in self.taxonomy.iterrows():
if row.Category != 'species':
code4s.append(None)
elif len(row.banding_codes) == 1:
code4s.append(list(row.banding_codes)[0])
elif len(row.comname_codes) > 0:
code4s.append(list(row.comname_codes)[0])
else:
code4s.append(None)
self.taxonomy['code4'] = code4s
def get_taxonomy_api_cached(self) -> pd.DataFrame:
taxonomy_df = pd.DataFrame()
cached_taxonomy_path = self._cache_path / 'taxonomy_ebird_api.csv'
try:
if cached_taxonomy_path.is_file():
taxonomy_df = pd.read_csv(cached_taxonomy_path, index_col=False)
else:
print(f'Creating eBird taxonomy cache...')
taxonomy_df = self._ebird_extra.get_taxonomy_from_ebird()
taxonomy_df['comNameLower'] = taxonomy_df.comName.apply(lambda x: x.lower())
taxonomy_df['sciNameLower'] = taxonomy_df.sciName.apply(lambda x: x.lower())
taxonomy_df.to_csv(cached_taxonomy_path, index=False)
except Exception as ee:
print(ee)
traceback.print_exc(file=sys.stdout)
return taxonomy_df
def find_local_name(self, local_name) -> \
Tuple[Optional[Any], Optional[Any], Optional[Any], Optional[Any]]:
record = self.find_local_name_row(local_name)
if not record:
return None, None, None, None
return record.comName, record.TAXON_ORDER, record.SPECIES_GROUP, record.NACC_SORT_ORDER
def find_local_name_row(self, common_name) -> Optional[pd.Series]:
# Look for exact matches
if not common_name:
return None
record = None
try:
common_name_lower = common_name.lower()
mask = self.taxonomy.comNameLower == common_name_lower
records = self.taxonomy[mask]
record = records.iloc[0]
except IndexError:
pass
return record
def find_scientific_name_row(self, scientific_name) -> Optional[pd.Series]:
# Look for exact matches
if not scientific_name:
return None
record = None
try:
scientific_name_lower = scientific_name.lower()
mask = self.taxonomy.sciNameLower == scientific_name_lower
records = self.taxonomy[mask]
record = records.iloc[0]
except IndexError:
pass
return record
# @property
# def local_to_ebird_translations(self):
# return self._local_to_ebird_translations
def species6_to_common_name(self, species6):
commonname = species6
try:
commonname = self.taxonomy[self.taxonomy.speciesCode == species6.lower()].iloc[
0].comName
except Exception as ee:
print(f'{species6} not found: {ee}')
traceback.print_exc(file=sys.stdout)
return commonname
# def species6_to_common_name_aou(self, species6):
# commonname = species6
# try:
# species6u = species6.upper()
# commonname = aou_codes[aou_codes.SPEC6 == species6.upper()][0].COMMONNAME
# except Exception as ee:
# print(f'{species6} not found: {ee}')
#
# return commonname
def find_species6_ebird(self, common_name):
try:
# common_name_u = common_name.upper()
# commonname = ebird_taxonomy[ebird_taxonomy.SPECIES_CODE ==
# species6.lower()].iloc[0].COMMON_NAME
# ebird-api uses speciesCode
species6 = self.taxonomy[self.taxonomy.comName == common_name].iloc[0].speciesCode
except Exception as ee:
# print(f'{common_name} not found: {ee} [find_species6_ebird]')
species6 = None
return species6
def merge_clements_into_taxonomy(self) -> pd.DataFrame:
self.taxonomy = self.taxonomy.merge(self._taxonomy_clements,
left_on='comName',
right_on='PRIMARY_COM_NAME', how='left').fillna('')
return self.taxonomy
def merge_ioc_into_taxonomy(self) -> pd.DataFrame:
self.taxonomy = self.taxonomy.merge(self._taxonomy_ioc, left_on='comName',
right_on='ioc_clements_common_name',
how='left').fillna('')
return self.taxonomy
def merge_nacc_into_taxonomy(self) -> pd.DataFrame:
self.taxonomy = self.taxonomy.merge(self._taxonomy_nacc, left_on='comName',
right_on='nacc_common_name', how='left').fillna('')
return self.taxonomy
def merge_aba_into_taxonomy(self) -> pd.DataFrame:
self.taxonomy = self.taxonomy.merge(self._taxonomy_aba, left_on='comName',
right_on='aba_common_name', how='left').fillna('')
return self.taxonomy
def get_nacc_taxonomy(self) -> pd.DataFrame:
return self._taxonomy_nacc
# -------------------------------- NACC Ordering --------------------------------------------
@staticmethod
def identify_family_sort_orders(family: pd.DataFrame, sort_col: str) -> list:
# family e.g. 'Grebes'
need_order = family[family.Category != 'species']
family_species = family[family.Category == 'species'] # .reset_index(drop=True)
sort_orders = []
base_sort_order = 0
for ix, row in need_order.iterrows():
try:
base_sort_order = 0
if row.Category == 'spuh':
base_sort_order = max(family[sort_col])
else:
bc_mask = [len(row.comname_codes & bc) > 0 for bc in
family_species.banding_codes]
if any(bc_mask):
mask = bc_mask
else:
cn_mask = [len(row.comname_codes & bc) > 0 for bc in
family_species.comname_codes]
mask = cn_mask or bc_mask
parents = family_species[mask]
if not parents.empty:
base_sort_order = max(parents[sort_col])
# "diurnal raptor sp." is weird
if not isinstance(base_sort_order, numbers.Number):
base_sort_order = 0
if base_sort_order > 0:
sort_orders.append({'comNameLower': row.comNameLower,
sort_col: base_sort_order,
'Category': row.Category})
except Exception as ee:
print(ee)
display(row)
display(family)
print(f'base_sort_order: {base_sort_order}, type: {type(base_sort_order)}')
raise
return sort_orders
def add_synthesized_sort_orders(self, sort_col: str):
# Only species have NACC sort orders, so make up some for issf, slash, etc.
# this takes 16s to run, so try to cache results. Same for ABA
# SECURITY WARNING: using eval. Trust your taxonomy file.
# These columns contain Python objects, so not appropriate for CSV file
self.taxonomy['banding_codes'] = [set(eval(cnc)) for cnc in self.taxonomy.bandingCodes]
self.taxonomy['comname_codes'] = [set(eval(cnc)) for cnc in self.taxonomy.comNameCodes]
sort_orders = []
for ix, group in enumerate(self.taxonomy.groupby(['order', 'familyComName'])):
fam_order, grp = group
# order, family = fam_order
familydf = grp # .reset_index(drop=True)
grp_sort_orders = self.identify_family_sort_orders(familydf, sort_col)
sort_orders.extend(grp_sort_orders)
# print(f'len(sort_orders): {len(sort_orders)}')
sort_orders_df = pd.DataFrame(sort_orders)
# print(f'sort_orders_df: {sort_orders_df.columns}')
addl_sort_orders = sort_orders_df.groupby(sort_col)[sort_col].transform(
self.smear_orders)
sort_orders_df = sort_orders_df.assign(sort_col=addl_sort_orders)
# Now set those rows in taxonomy
# mask = [(cn in list(sort_orders_df.comNameLower)) for cn in self.taxonomy.comNameLower]
# self.taxonomy.loc[mask, 'NACC_SORT_ORDER'] = list(sort_orders_df.NACC_SORT_ORDER)
# Crappy way, the proper way is eluding me
for ix, row in sort_orders_df.iterrows():
self.taxonomy.loc[self.taxonomy.comNameLower == row.comNameLower,
sort_col] = row[sort_col]
# Cleanup
self.taxonomy.drop(labels=['banding_codes', 'comname_codes'], axis=1, inplace=True)
# https://stackoverflow.com/questions/59951415/how-do-you-replace-duplicate-values-with-multiple-unique-strings-in-pandas
@staticmethod
def smear_orders(orders):
"""
On input, all elements of orders have the same value, e.g. 777
This routine smears them across a range so that we would have something like
[777.01, 777.02, 777.03, ...]
:param orders:
:return:
"""
nn = len(orders)
step_size = 0.01 if nn > 9 else 0.1
addends = np.linspace(step_size, nn * step_size, nn)
return orders.radd(addends)
# -------------------------------- ISSF Helpers --------------------------------------------
# https://support.ebird.org/en/support/solutions/articles/48000837816-the-ebird-taxonomy
# Subspecies (ISSF or Identifiable Sub-specific Group): Identifiable subspecies or group
# of subspecies, e.g., Tundra Swan (Bewick’s) or Tundra Swan (Whistling)
def filter_issf(self, common_names: List[str]) -> List[str]:
issfs = []
for cn in common_names:
row = self.find_local_name_row(cn)
if row is not None and row.Category == 'issf':
issfs.append(cn)
return issfs
def report_as(self, common_name: str) -> Optional[str]:
row = self.find_local_name_row(common_name)
base_species = self.taxonomy[self.taxonomy.speciesCode == row.reportAs]
if base_species.empty:
return None
else:
return base_species.comNameLower.values[0]
def filter_species(self, common_names: List[str]) -> List[str]:
species = []
for cn in common_names:
row = self.find_local_name_row(cn)
if row is not None and row.Category == 'species':
species.append(cn)
return species
``` |
{
"source": "jhurley13/DistributedBirding",
"score": 3
} |
#### File: jhurley13/DistributedBirding/xruxidownload.py
```python
__author__ = "github.com/ruxi"
__license__ = "MIT"
import requests
import tqdm # progress bar
import os.path
def download_file(url, filename=False, verbose=False):
"""
Download file with progressbar
Usage:
download_file('http://web4host.net/5MB.zip')
"""
if not filename:
local_filename = os.path.join(".", url.split('/')[-1])
else:
local_filename = filename
try:
r = requests.get(url, stream=True)
# print(r.headers)
file_size = int(r.headers.get('Content-Length', 1000000))
chunk = 1
chunk_size = 1024
num_bars = int(file_size / chunk_size)
if verbose:
print(dict(file_size=file_size))
print(dict(num_bars=num_bars))
# leave=True progressbar stays
with open(local_filename, 'wb') as fp:
for chunk in tqdm.tqdm(
r.iter_content(chunk_size=chunk_size)
, total=num_bars
, unit='KB'
, desc=local_filename
, leave=True):
fp.write(chunk)
except Exception as ee:
print(r.headers)
print(ee)
raise
return
``` |
{
"source": "jhurley13/weather_report",
"score": 3
} |
#### File: jhurley13/weather_report/weather_summary.py
```python
import pandas as pd
import numpy as np
from pathlib import Path
import re
import json
import requests
import geog
from datetime import tzinfo, timedelta, datetime, date
# ## Constants and Globals
# Constants and Globals
KM_PER_MILE = 1.60934
MILES_PER_KILOMETER = 0.62137119
# # Code
def meters_to_miles(xmeters: float):
return (xmeters / 1000) * MILES_PER_KILOMETER
# https://openweathermap.org/api/one-call-api
# https://api.openweathermap.org/data/2.5/onecall?lat=33.441792&lon=-94.037689&exclude=hourly,daily&appid={YOUR API KEY}
def weather_at_location(latitude, longitude, api_key) -> pd.DataFrame:
results = pd.DataFrame()
try:
api_url_base = 'https://api.openweathermap.org/data/2.5/onecall'
exclusions = ','.join(['minutely'])
url = api_url_base
# For temperature in Fahrenheit use units=imperial
params = {
'lat': str(latitude),
'lon': str(longitude),
'units': 'imperial',
'exclude': exclusions,
'appid': str(api_key)
}
rr = requests.get(url, params=params, headers=None, stream=True) # headers=api_auth_header
if rr.status_code == requests.codes.ok:
results = rr.json()
rr.raise_for_status()
except Exception as ee:
print(ee)
return results
# http://api.openweathermap.org/data/2.5/onecall/timemachine?lat=60.99&lon=30.9&dt=1586468027&appid={YOUR API KEY}
def weather_at_location_history(latitude, longitude, timestamp, api_key) -> pd.DataFrame:
results = pd.DataFrame()
try:
api_url_base = 'https://api.openweathermap.org/data/2.5/onecall/timemachine'
exclusions = ','.join(['minutely'])
url = api_url_base
# For temperature in Fahrenheit use units=imperial
params = {
'lat': str(latitude),
'lon': str(longitude),
'dt': int(timestamp),
'units': 'imperial',
'exclude': exclusions,
'appid': str(api_key)
}
rr = requests.get(url, params=params, headers=None, stream=True) # headers=api_auth_header
if rr.status_code == requests.codes.ok:
results = rr.json()
rr.raise_for_status()
except Exception as ee:
print(ee)
return results
def weather_at_location_x(latitude, longitude, api_key):
results = weather_at_location(latitude, longitude, api_key)
return transform_weather_results(results)
def weather_at_location_history_x(latitude, longitude, timestamp, api_key):
results = weather_at_location_history(latitude, longitude, timestamp, api_key)
return transform_weather_results(results)
def transform_weather_results(results):
daily = pd.DataFrame()
hourly = pd.DataFrame()
current = pd.DataFrame()
# -------------------- Daily --------------------
daily_json = results.get('daily', None)
if daily_json:
daily = pd.json_normalize(daily_json).fillna('')
# For some reason, sunrise & sunset already are in local time?
for col, tz_offset in [('dt', results['timezone_offset']), ('sunrise', 0), ('sunset', 0)]:
daily[col] = daily[col].apply(convert_timestamp_to_local_str, tz_offset=tz_offset)
# -------------------- Hourly --------------------
hourly_json = results.get('hourly', None)
if hourly_json:
hourly = pd.json_normalize(hourly_json).fillna('')
hourly['dt'] = hourly['dt'].apply(convert_timestamp_to_local_str, tz_offset=0)
# -------------------- Current --------------------
current_json = results.get('current')
if current_json:
current = pd.json_normalize(current_json).fillna('')
# For some reason, dt, sunrise & sunset already are in local time?
for col in ['dt', 'sunrise', 'sunset']:
current[col] = current[col].apply(convert_timestamp_to_local_str, tz_offset=0)
return results, current, daily, hourly
def convert_timestamp_to_local_str(ts, tz_offset) -> str:
return datetime.fromtimestamp(ts + tz_offset).strftime('%Y-%m-%d %H:%M:%S')
def wind_direction_degrees_to_text(wind_direction: float) -> str:
# See http://snowfence.umn.edu/Components/winddirectionanddegrees.htm
section_degrees = (360 / 16)
half_bin = (section_degrees / 2)
north_bin_start = 360 - (section_degrees / 2)
# We rotate by a half_bin to make bins monotonically increasing
wbins = [(north_bin_start + ix * section_degrees + half_bin) % 360 for ix in range(17)]
# to make cut work
wbins[-1] = 360
dir_labels = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
rx = pd.cut([(wind_direction + half_bin) % 360], wbins, labels=dir_labels)
return rx[0]
# Glyph for ℉ looks too compressed, use °F instead
def conditions_summary(cond: dict) -> str:
conditions = ''
temp = cond.get("temp")
if temp:
conditions += f'Temperature: {temp} °F\n'
temp_min = cond.get("temp_min")
if temp_min:
conditions += f'Low temperature: {temp_min} °F\n'
temp_max = cond.get("temp_max")
if temp_max:
conditions += f'High temperature: {temp_max} °F\n'
wind_deg = cond.get('wind_deg', None)
wind_dir_str = f' from {wind_direction_degrees_to_text(wind_deg)}' if wind_deg else ''
conditions += f'Wind: {cond["wind_speed"]} mph{wind_dir_str}\n'
rain = cond.get("rain", 0)
if rain == '':
rain = 0
if rain > 0:
conditions += f'Rain: {rain} mm\n'
snow = cond.get("snow", 0)
if snow == '':
snow = 0
if snow > 0:
conditions += f'Snow: {snow} mm\n'
humidity = cond.get("humidity", 0)
if humidity == '':
humidity = 0
if humidity > 0:
conditions += f'Humidity: {humidity} %\n'
pct_cloudy = cond["clouds"]
weather_description = ', '.join([w["description"] for w in cond["weather"]])
conditions += f'Description: {weather_description}, {pct_cloudy:.0f}% cloudy\n'
sunrise = cond.get("sunrise")
if sunrise:
conditions += f'Sunrise: {sunrise}\n'
sunset = cond.get("sunset")
if sunrise:
conditions += f'Sunset : {sunset}\n'
return conditions
def summary_weather_report(results, daily_df, current_df, reporting_location, actual_hourly) -> str:
summary = ''
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
utoday = datetime.timestamp(today)
current = current_df.iloc[0].to_dict()
daily = daily_df.iloc[0].to_dict()
del daily["sunrise"]
del daily["sunset"]
summary += f'Current Conditions {current["dt"]}\n{conditions_summary(current)}\n'
summary += f'Forecast {current["dt"]}\n{conditions_summary(daily)}\n'
for hr in [7, 10, 13, 16]: # 7,8,11,12,15,16
dt_str = today.replace(hour=hr, minute=0, second=0, microsecond=0).strftime('%Y-%m-%d %H:%M:%S')
df = actual_hourly[actual_hourly.dt == dt_str]
if df.shape[0] > 0:
summary += f'Conditions at {dt_str}\n'
summary += conditions_summary(df.iloc[0].to_dict())
summary += '\n'
weather_station_location = (results["lat"], results["lon"]) # 1.0001*lat to move a bit
summary += f'Weather station location: {weather_station_location}\n'
summary += f'Reporting location : {reporting_location}\n'
# distance in meters
dist_to_station_m = geog.distance(weather_station_location, reporting_location)
dist_to_station = meters_to_miles(dist_to_station_m)
summary += f'Reporting location is {dist_to_station:.2f} miles from weather station\n'
return summary
def create_weather_summary(reporting_location: tuple, openweather_api_key) -> str:
# reporting_location is a tuple: (latitude, longitude)
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
utoday = datetime.timestamp(today)
results, current, daily, hourly = weather_at_location_x(*reporting_location, api_key=openweather_api_key)
hist_results, actual_current, actual_daily, actual_hourly = weather_at_location_history_x(*reporting_location,
timestamp=utoday,
api_key=openweather_api_key)
summary = summary_weather_report(results, daily, current, reporting_location, actual_hourly)
return summary
```
#### File: jhurley13/weather_report/xutilities.py
```python
from collections import Iterable
import yaml
def flatten(items):
"""Yield items from any nested iterable; see Reference."""
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
for sub_x in flatten(x):
yield sub_x
else:
yield x
# File looks like
# credentials:
# username: myusername
# password: <PASSWORD>
# app_id: c9e43f93cdff4ff59a7de17c4219a0f414929b48c0234c818400c1f67da24564
# app_secret: 0593518c7a044fa58907f3355082f16290662698e5bc497aa48e038bc3e212ec
#
# Note: hex values generated as follows (random each time):
# import uuid
# print('{}{}'.format(uuid.uuid4().hex, uuid.uuid4().hex))
# print('{}{}'.format(uuid.uuid4().hex, uuid.uuid4().hex))
#
# Alternatively:
# import secrets
# secrets.token_hex(32)
def load_credentials(config_file):
with open(config_file, 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
return cfg
``` |
{
"source": "jhurreaq/PyBaMM",
"score": 3
} |
#### File: pybamm/models/standard_variables.py
```python
import pybamm
import numpy as np
class StandardVariables:
def __init__(self):
# Discharge capacity
self.Q = pybamm.Variable("Discharge capacity [A.h]")
# Electrolyte concentration
self.c_e_n = pybamm.Variable(
"Negative electrolyte concentration",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, np.inf),
)
self.c_e_s = pybamm.Variable(
"Separator electrolyte concentration",
domain="separator",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, np.inf),
)
self.c_e_p = pybamm.Variable(
"Positive electrolyte concentration",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, np.inf),
)
self.c_e_av = pybamm.Variable(
"X-averaged electrolyte concentration",
domain="current collector",
bounds=(0, np.inf),
)
# Electrolyte porosity times concentration
self.eps_c_e_n = pybamm.Variable(
"Negative electrode porosity times concentration",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, np.inf),
)
self.eps_c_e_s = pybamm.Variable(
"Separator porosity times concentration",
domain="separator",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, np.inf),
)
self.eps_c_e_p = pybamm.Variable(
"Positive electrode porosity times concentration",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, np.inf),
)
self.eps_c_e_av = pybamm.Variable(
"X-averaged porosity times concentration",
domain="current collector",
bounds=(0, np.inf),
)
# Electrolyte potential
self.phi_e_n = pybamm.Variable(
"Negative electrolyte potential",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
)
self.phi_e_s = pybamm.Variable(
"Separator electrolyte potential",
domain="separator",
auxiliary_domains={"secondary": "current collector"},
)
self.phi_e_p = pybamm.Variable(
"Positive electrolyte potential",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
)
# Electrode potential
self.phi_s_n = pybamm.Variable(
"Negative electrode potential",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
)
self.phi_s_p = pybamm.Variable(
"Positive electrode potential",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
)
# Potential difference
self.delta_phi_n = pybamm.Variable(
"Negative electrode surface potential difference",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
)
self.delta_phi_p = pybamm.Variable(
"Positive electrode surface potential difference",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
)
self.delta_phi_n_av = pybamm.Variable(
"X-averaged negative electrode surface potential difference",
domain="current collector",
)
self.delta_phi_p_av = pybamm.Variable(
"X-averaged positive electrode surface potential difference",
domain="current collector",
)
# current collector variables
self.phi_s_cn = pybamm.Variable(
"Negative current collector potential", domain="current collector"
)
self.phi_s_cp = pybamm.Variable(
"Positive current collector potential", domain="current collector"
)
self.i_boundary_cc = pybamm.Variable(
"Current collector current density", domain="current collector"
)
self.phi_s_cn_composite = pybamm.Variable(
"Composite negative current collector potential", domain="current collector"
)
self.phi_s_cp_composite = pybamm.Variable(
"Composite positive current collector potential", domain="current collector"
)
self.i_boundary_cc_composite = pybamm.Variable(
"Composite current collector current density", domain="current collector"
)
# Particle concentration
self.c_s_n = pybamm.Variable(
"Negative particle concentration",
domain="negative particle",
auxiliary_domains={
"secondary": "negative electrode",
"tertiary": "current collector",
},
bounds=(0, 1),
)
self.c_s_p = pybamm.Variable(
"Positive particle concentration",
domain="positive particle",
auxiliary_domains={
"secondary": "positive electrode",
"tertiary": "current collector",
},
bounds=(0, 1),
)
self.c_s_n_xav = pybamm.Variable(
"X-averaged negative particle concentration",
domain="negative particle",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
self.c_s_p_xav = pybamm.Variable(
"X-averaged positive particle concentration",
domain="positive particle",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
self.c_s_n_rav = pybamm.Variable(
"R-averaged negative particle concentration",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
self.c_s_p_rav = pybamm.Variable(
"R-averaged positive particle concentration",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
self.c_s_n_rxav = pybamm.Variable(
"R-X-averaged negative particle concentration",
domain="current collector",
bounds=(0, 1),
)
self.c_s_p_rxav = pybamm.Variable(
"R-X-averaged positive particle concentration",
domain="current collector",
bounds=(0, 1),
)
self.c_s_n_surf = pybamm.Variable(
"Negative particle surface concentration",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
self.c_s_p_surf = pybamm.Variable(
"Positive particle surface concentration",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
self.c_s_n_surf_xav = pybamm.Variable(
"X-averaged negative particle surface concentration",
domain="current collector",
bounds=(0, 1),
)
self.c_s_p_surf_xav = pybamm.Variable(
"X-averaged positive particle surface concentration",
domain="current collector",
bounds=(0, 1),
)
# Average particle concentration gradient (for polynomial particle concentration
# models). Note: we make the distinction here between the flux defined as
# N = -D*dc/dr and the concentration gradient q = dc/dr
self.q_s_n_rav = pybamm.Variable(
"R-averaged negative particle concentration gradient",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
)
self.q_s_p_rav = pybamm.Variable(
"R-averaged positive particle concentration gradient",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
)
self.q_s_n_rxav = pybamm.Variable(
"R-X-averaged negative particle concentration gradient",
domain="current collector",
)
self.q_s_p_rxav = pybamm.Variable(
"R-X-averaged positive particle concentration gradient",
domain="current collector",
)
# Porosity
self.eps_n = pybamm.Variable(
"Negative electrode porosity",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
self.eps_s = pybamm.Variable(
"Separator porosity",
domain="separator",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
self.eps_p = pybamm.Variable(
"Positive electrode porosity",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
bounds=(0, 1),
)
# Piecewise constant (for asymptotic models)
self.eps_n_pc = pybamm.Variable(
"X-averaged negative electrode porosity",
domain="current collector",
bounds=(0, 1),
)
self.eps_s_pc = pybamm.Variable(
"X-averaged separator porosity", domain="current collector", bounds=(0, 1)
)
self.eps_p_pc = pybamm.Variable(
"X-averaged positive electrode porosity",
domain="current collector",
bounds=(0, 1),
)
# Temperature
self.T_cn = pybamm.Variable(
"Negative currents collector temperature", domain="current collector"
)
self.T_n = pybamm.Variable(
"Negative electrode temperature",
domain="negative electrode",
auxiliary_domains={"secondary": "current collector"},
)
self.T_s = pybamm.Variable(
"Separator temperature",
domain="separator",
auxiliary_domains={"secondary": "current collector"},
)
self.T_p = pybamm.Variable(
"Positive electrode temperature",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
)
self.T_cp = pybamm.Variable(
"Positive currents collector temperature", domain="current collector"
)
self.T_av = pybamm.Variable(
"X-averaged cell temperature", domain="current collector"
)
self.T_vol_av = pybamm.Variable("Volume-averaged cell temperature")
# SEI variables
self.L_inner_av = pybamm.Variable(
"X-averaged inner SEI thickness",
domain="current collector",
)
self.L_inner = pybamm.Variable(
"Inner SEI thickness",
domain=["negative electrode"],
auxiliary_domains={"secondary": "current collector"},
)
self.L_outer_av = pybamm.Variable(
"X-averaged outer SEI thickness",
domain="current collector",
)
self.L_outer = pybamm.Variable(
"Outer SEI thickness",
domain=["negative electrode"],
auxiliary_domains={"secondary": "current collector"},
)
def __setattr__(self, name, value):
value.print_name = name
super().__setattr__(name, value)
standard_variables = StandardVariables()
```
#### File: electrode/ohm/li_metal_explicit.py
```python
from .base_ohm import BaseModel
class LithiumMetalExplicit(BaseModel):
"""Explicit model for potential drop across a lithium metal electrode.
Parameters
----------
param : parameter class
The parameters to use for this submodel
options : dict, optional
A dictionary of options to be passed to the model.
**Extends:** :class:`pybamm.electrode.ohm.BaseModel`
"""
def __init__(self, param, options=None):
super().__init__(param, "Negative", options=options)
def get_coupled_variables(self, variables):
param = self.param
i_boundary_cc = variables["Current collector current density"]
T_n = variables["Negative current collector temperature"]
l_n = param.l_n
delta_phi_s = i_boundary_cc * l_n / param.sigma_n(T_n)
delta_phi_s_dim = param.potential_scale * delta_phi_s
variables.update(
{
"Negative electrode potential drop": delta_phi_s,
"Negative electrode potential drop [V]": delta_phi_s_dim,
"X-averaged negative electrode ohmic losses": delta_phi_s / 2,
"X-averaged negative electrode ohmic losses [V]": delta_phi_s_dim / 2,
}
)
return variables
def set_boundary_conditions(self, variables):
pass
``` |
{
"source": "JHurricane96/1000Langs",
"score": 3
} |
#### File: 1000Langs/biblecrawler/general_crawler.py
```python
__author__ = "<NAME>"
__license__ = "Apache 2"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__website__ = "https://llp.berkeley.edu/asgari/"
__git__ = "https://github.com/ehsanasgari/"
__email__ = "<EMAIL>"
__project__ = "1000Langs -- Super parallel project at CIS LMU"
#!/usr/bin/env python3
"""Crawl bibles hosted on http://pngscriptures.org."""
'''
This code is largely inspired/adapted from Michael Cysouw's crawling code
'''
import sys
sys.path.append('../')
import os.path
import sys
import time
import urllib
from urllib.parse import urljoin, urlsplit
from utility.file_utility import FileUtility
import requests
from lxml import html, etree
class BibleCrawler(object):
SLEEPTIME = 0 # seconds
log = []
def run_crawler(self, nextpath, url, destination_directory, website='generic'):
'''
:param nextpath:
:param url:
:param destination_directory:
:param website:
:return:
'''
self.url=url
self.nextpath = nextpath
self.website = website
self.counter=-1
session = requests.Session()
session.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:40.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5'})
if self.print:
print(time.strftime('%H:%M:%S'), url, file=sys.stderr)
self.seen = set()
self.useless_url = set()
flag=True
count=0
while flag and count <2000:
if (url in self.seen and not self.website == 'PNG') or (self.website == 'PNG' and self.counter>=1188):
if self.print:
print('Break on seen url:', url, file=sys.stderr)
BibleCrawler.log.append('\t'.join(['Break on seen url:', str(url)]))
flag=False
break
self.seen.add(url)
if self.print:
print(url)
for i in range(3):
try:
response = session.get(url)
break
except Exception as e:
if self.print:
print("Unable to reach: ", url, " : ", e)
return
if response.status_code != requests.codes.ok:
if self.print:
print('Error', url, response.url, response.status_code, file=sys.stderr)
print(time.strftime('%H:%M:%S'), url, file=sys.stderr)
BibleCrawler.log.append(
'\t'.join([ 'Error', str(url), str(response.url), str(response.status_code)]))
if self.website == 'PNG':
url=self.jump_url()
if not url:
flag=False
return
else:
flag=False
return
self.save_response(response, destination_directory)
url = self.get_next_url(response)
if not url or not url.startswith('http'):
if self.print:
print('Break on invalid url:', url, file=sys.stderr)
BibleCrawler.log.append('\t'.join(['Break on invalid url:', str(url)]))
if self.website == 'PNG' and self.counter>=1188:
url=self.jump_url()
else:
flag=False
break
if BibleCrawler.SLEEPTIME>0:
time.sleep(BibleCrawler.SLEEPTIME)
count+=1
if self.print:
print(time.strftime('%H:%M:%S'), url, file=sys.stderr)
def jump_url(self):
'''
:return:
'''
while self.counter < 1188:
self.counter+=1
url_select='/'.join(self.url.split('/')[0:-1])+'/'+FileUtility.load_list('../meta/pngscript_filenames.txt')[self.counter]
if url_select not in self.seen and url_select not in self.useless_url:
if requests.get(url_select).status_code==404:
if requests.get('/'.join(self.url.split('/')[0:-1])).status_code==404:
self.counter=1189
return None
self.useless_url.add(url_select)
else:
url=url_select
self.useless_url.add(url)
return url
return None
def get_filename(self, url, base_dir):
'''
:param url:
:param base_dir:
:return:
'''
"""Derive a filename from the given URL"""
parts = urlsplit(url)
path_parts = parts.path.split('/')
if path_parts[-1] == '':
path_parts.pop()
path_parts[-1] += '.html'
dir_name = os.path.join(base_dir, *path_parts[1:-1])
if not os.access(dir_name, os.F_OK):
os.makedirs(dir_name)
filename = os.path.join(dir_name, path_parts[-1])
return filename
def save_response(self, response, base_dir):
'''
:param response:
:param base_dir:
:return:
'''
filename = self.get_filename(response.url, base_dir)
if self.website == 'JW':
# try to save only a part of the response
tree = html.fromstring(response.content)
text_divs = tree.xpath('//div[@id="bibleText"]')
text_div = text_divs[0] if text_divs else None
if text_div is not None:
with open(filename, 'wb') as f:
f.write(etree.tostring(text_div))
else:
with open(filename, 'wb') as f:
f.write(handle.write(response.content))
elif self.website == "bible.com":
# try to save only a part of the response
tree = html.fromstring(response.content)
text_divs = tree.xpath('//div[contains(@class,"yv-bible-text")]')
text_div = text_divs[0] if text_divs else None
if text_div is not None:
with open(filename, 'wb') as f:
f.write(etree.tostring(text_div))
else:
with open(filename, 'wb') as f:
f.write(handle.write(response.content))
else:
with open(filename, 'wb') as f:
f.write(response.content)
def get_next_url(self, response):
tree = html.fromstring(response.content)
if self.website == 'JW':
link = tree.xpath('//div[@class="navLinkNext"]/a/@href')
if link:
url = urllib.parse.urljoin(response.url, link[0])
else:
url = None
return url
elif self.website == 'bible.com' or self.website=='bible.org':
xpath_result = tree.xpath(
'//a[contains(@class, "bible-nav-button nav-right fixed dim br-100 ba b--black-20 pa2 pa3-m flex items-center justify-center bg-white right-1")]//@href')
relevant = xpath_result[0] if len(xpath_result) >= 1 else None
# mydivs = soup.findAll("a", {"class": 'bible-nav-button nav-right fixed dim br-100 ba b--black-20 pa2 pa3-m flex items-center justify-center bg-white right-1'})
# print ('Yes', 'bible-nav-button nav-right fixed dim br-100 ba b--black-20 pa2 pa3-m flex items-center justify-center bg-white right-1' in str(response.content))
elif self.website == 'generic' or self.website == 'PNG':
xpath_result = list(set(tree.xpath(self.nextpath)))
relevant = xpath_result[0] if len(xpath_result) == 1 else None
if relevant:
return urljoin(response.url, relevant)
else:
return None
``` |
{
"source": "JHurricane96/random-audio",
"score": 3
} |
#### File: random-audio/random_audio/__init__.py
```python
import argparse
from os import listdir
from os.path import isfile, join, splitext
from random import randint, uniform
from pydub import AudioSegment
def main():
parser = argparse.ArgumentParser(description="Make a randomized mash-up of audio clips.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("directory",
help="Directory to read audio files from, must contain only the audio files.")
parser.add_argument("-s", metavar=("A", "B"), nargs=2, type=float,
help="Each clip is speeded up X times, where X is A random number between A and B."
" A and B must be greater than 1, and A less than B."
" Omit this option to have all clips play at normal speed.")
parser.add_argument("-n", type=int,
help="Number of times a random clip is picked and added to the mash-up.", default=15)
parser.add_argument("-o", help="Name of output file.", default="output.mp3")
parser.add_argument("-v", action="store_true", help="Enable verbose mode.")
args = parser.parse_args()
path = args.directory
files = [join(path, f) for f in listdir(path) if isfile(join(path, f))]
audio_segments = [AudioSegment.from_file(f, splitext(f)[1][1:]) for f in files]
speed_bounds = [1.01, 1.01]
if args.s is not None:
lower, upper = args.s
speed_bounds = [max(lower, 1.01), max(upper, 1.01)]
results = AudioSegment.empty()
for _ in range(args.n):
audio_seg_index = randint(0, len(audio_segments) - 1)
audio_seg = audio_segments[audio_seg_index]
speedup_factor = uniform(*speed_bounds)
results += audio_seg.speedup(playback_speed=speedup_factor)
if args.v:
print("Adding file {} speeded up by a factor of {}".format(files[audio_seg_index], speedup_factor))
results.export(args.o)
print("Successfully written output to", args.o)
if __name__ == "__main__":
main()
``` |
{
"source": "jhurtadojerves/product-hunt",
"score": 2
} |
#### File: product-hunt/products/models.py
```python
from django.db import models
from django.utils.text import slugify
# Third party imports
# Local imports
from profiles.models import Profile
class Product(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
slug = models.SlugField()
owner = models.ForeignKey(
Profile,
related_name='product',
on_delete=models.CASCADE
)
votes = models.ManyToManyField(
Profile,
related_name='votes_to',
through='Vote',
)
url = models.URLField(blank=True)
pub_date = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
slug = slugify(self.title)
if Product.objects.filter(slug=slug).exists():
slug = slug + '-' + self.id
self.slug = slug
super().save(*args, **kwargs)
def __str__(self):
return self.title
class Image(models.Model):
image = models.ImageField(upload_to='products/')
product = models.ForeignKey(
'products.Product',
related_name='images',
on_delete=models.CASCADE
)
class Vote(models.Model):
product = models.ForeignKey(
'products.Product',
on_delete=models.CASCADE,
)
owner = models.ForeignKey(
Profile,
on_delete=models.CASCADE
)
class Meta:
unique_together = ['product', 'owner', ]
``` |
{
"source": "jhurt/peewee",
"score": 2
} |
#### File: jhurt/peewee/peewee.py
```python
from __future__ import with_statement
from gevent.local import local
import datetime
import decimal
import logging
import operator
import re
import sys
from collections import deque, namedtuple
from copy import deepcopy
from psycopg2._psycopg import OperationalError, InterfaceError, TransactionRollbackError
from psycopg2_pool import PostgresConnectionPool
thread_local = local()
logger = logging.getLogger(__name__)
__all__ = [
'BigIntegerField',
'BlobField',
'BooleanField',
'CharField',
'Clause',
'DateField',
'DateTimeField',
'DecimalField',
'DoubleField',
'DQ',
'Field',
'FloatField',
'fn',
'ForeignKeyField',
'ImproperlyConfigured',
'IntegerField',
'JOIN_FULL',
'JOIN_INNER',
'JOIN_LEFT_OUTER',
'Model',
'MySQLDatabase',
'PostgresqlDatabase',
'prefetch',
'PrimaryKeyField',
'R',
'SqliteDatabase',
'TextField',
'TimeField',
'UniqueField'
]
# Python 2/3 compat
def with_metaclass(meta, base=object):
return meta("NewBase", (base,), {})
PY3 = sys.version_info[0] == 3
if PY3:
import builtins
from collections import Callable
from functools import reduce, wraps
callable = lambda c: isinstance(c, Callable)
unicode_type = str
string_type = bytes
basestring = str
print_ = getattr(builtins, 'print')
binary_construct = lambda s: bytes(s.encode('raw_unicode_escape'))
else:
unicode_type = unicode
string_type = basestring
binary_construct = buffer
def print_(s):
sys.stdout.write(s)
sys.stdout.write('\n')
import psycopg2
class ImproperlyConfigured(Exception):
pass
if psycopg2 is None:
raise ImproperlyConfigured('psycopg2 must be installed')
DT_LOOKUPS = set(['year', 'month', 'day', 'hour', 'minute', 'second'])
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
OP_AND = 0
OP_OR = 1
OP_ADD = 10
OP_SUB = 11
OP_MUL = 12
OP_DIV = 13
OP_AND = 14
OP_OR = 15
OP_XOR = 16
OP_MOD = 17
OP_EQ = 20
OP_LT = 21
OP_LTE = 22
OP_GT = 23
OP_GTE = 24
OP_NE = 25
OP_IN = 26
OP_IS = 27
OP_LIKE = 28
OP_ILIKE = 29
OP_BETWEEN = 30
DJANGO_MAP = {
'eq': OP_EQ,
'lt': OP_LT,
'lte': OP_LTE,
'gt': OP_GT,
'gte': OP_GTE,
'ne': OP_NE,
'in': OP_IN,
'is': OP_IS,
'like': OP_LIKE,
'ilike': OP_ILIKE,
}
JOIN_INNER = 1
JOIN_LEFT_OUTER = 2
JOIN_FULL = 3
def dict_update(orig, extra):
new = {}
new.update(orig)
new.update(extra)
return new
def returns_clone(func):
def inner(self, *args, **kwargs):
clone = self.clone()
func(clone, *args, **kwargs)
return clone
inner.call_local = func
return inner
def not_allowed(fn):
def inner(self, *args, **kwargs):
raise NotImplementedError('%s is not allowed on %s instances' % (
fn, type(self).__name__,
))
return inner
class Leaf(object):
def __init__(self):
self.negated = False
self._alias = None
def clone_base(self):
return type(self)()
def clone(self):
inst = self.clone_base()
inst.negated = self.negated
inst._alias = self._alias
return inst
@returns_clone
def __invert__(self):
self.negated = not self.negated
@returns_clone
def alias(self, a=None):
self._alias = a
def asc(self):
return Ordering(self, True)
def desc(self):
return Ordering(self, False)
def _e(op, inv=False):
def inner(self, rhs):
if inv:
return Expr(rhs, op, self)
return Expr(self, op, rhs)
return inner
__and__ = _e(OP_AND)
__or__ = _e(OP_OR)
__add__ = _e(OP_ADD)
__sub__ = _e(OP_SUB)
__mul__ = _e(OP_MUL)
__div__ = _e(OP_DIV)
__xor__ = _e(OP_XOR)
__radd__ = _e(OP_ADD, inv=True)
__rsub__ = _e(OP_SUB, inv=True)
__rmul__ = _e(OP_MUL, inv=True)
__rdiv__ = _e(OP_DIV, inv=True)
__rand__ = _e(OP_AND, inv=True)
__ror__ = _e(OP_OR, inv=True)
__rxor__ = _e(OP_XOR, inv=True)
__eq__ = _e(OP_EQ)
__lt__ = _e(OP_LT)
__le__ = _e(OP_LTE)
__gt__ = _e(OP_GT)
__ge__ = _e(OP_GTE)
__ne__ = _e(OP_NE)
__lshift__ = _e(OP_IN)
__rshift__ = _e(OP_IS)
__mod__ = _e(OP_LIKE)
__pow__ = _e(OP_ILIKE)
class Expr(Leaf):
def __init__(self, lhs, op, rhs):
super(Expr, self).__init__()
self.lhs = lhs
self.op = op
self.rhs = rhs
def clone_base(self):
return Expr(self.lhs, self.op, self.rhs)
class DQ(Leaf):
def __init__(self, **query):
super(DQ, self).__init__()
self.query = query
def clone_base(self):
return DQ(**self.query)
class Param(Leaf):
def __init__(self, data):
self.data = data
super(Param, self).__init__()
def clone_base(self):
return Param(self.data)
class R(Leaf):
def __init__(self, value):
self.value = value
super(R, self).__init__()
def clone_base(self):
return R(self.value)
class Ordering(Leaf):
def __init__(self, param, asc):
self.param = param
self.asc = asc
super(Ordering, self).__init__()
def clone_base(self):
return Ordering(self.param, self.asc)
class Func(Leaf):
def __init__(self, name, *params):
self.name = name
self.params = params
super(Func, self).__init__()
def clone_base(self):
return Func(self.name, *self.params)
def __getattr__(self, attr):
def dec(*args, **kwargs):
return Func(attr, *args, **kwargs)
return dec
fn = Func(None)
class Clause(Leaf):
def __init__(self, *pieces):
super(Clause, self).__init__()
self.pieces = pieces
def clone_base(self):
return Clause(*self.pieces)
class FieldDescriptor(object):
def __init__(self, field):
self.field = field
self.att_name = self.field.name
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance._data.get(self.att_name)
return self.field
def __set__(self, instance, value):
instance._data[self.att_name] = value
class Field(Leaf):
_field_counter = 0
_order = 0
db_field = 'unknown'
template = '%(column_type)s'
def __init__(self, null=False, index=False, unique=False, verbose_name=None,
help_text=None, db_column=None, default=None, choices=None,
primary_key=False, sequence=None, *args, **kwargs):
self.null = null
self.index = index
self.unique = unique
self.verbose_name = verbose_name
self.help_text = help_text
self.db_column = db_column
self.default = default
self.choices = choices
self.primary_key = primary_key
self.sequence = sequence
self.attributes = self.field_attributes()
self.attributes.update(kwargs)
Field._field_counter += 1
self._order = Field._field_counter
self._is_bound = False
super(Field, self).__init__()
def clone_base(self, **kwargs):
inst = type(self)(
null=self.null,
index=self.index,
unique=self.unique,
verbose_name=self.verbose_name,
help_text=self.help_text,
db_column=self.db_column,
default=self.default,
choices=self.choices,
primary_key=self.primary_key,
sequence=self.sequence,
**kwargs
)
inst.attributes = dict(self.attributes)
if self._is_bound:
inst.name = self.name
inst.model_class = self.model_class
return inst
def add_to_class(self, model_class, name):
self.name = name
self.model_class = model_class
self.db_column = self.db_column or self.name
self.verbose_name = self.verbose_name or re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
setattr(model_class, name, FieldDescriptor(self))
self._is_bound = True
def get_database(self):
return self.model_class._meta.database
def field_attributes(self):
return {}
def get_db_field(self):
return self.db_field
def coerce(self, value):
return value
def db_value(self, value):
return value if value is None else self.coerce(value)
def python_value(self, value):
return value if value is None else self.coerce(value)
def __hash__(self):
return hash(self.name + '.' + self.model_class.__name__)
def between(self, low, high):
return Expr(self, OP_BETWEEN, Clause(low, R('AND'), high))
class IntegerField(Field):
db_field = 'int'
def coerce(self, value):
return int(value)
class BigIntegerField(IntegerField):
db_field = 'bigint'
class PrimaryKeyField(IntegerField):
db_field = 'primary_key'
def __init__(self, *args, **kwargs):
kwargs['primary_key'] = True
super(PrimaryKeyField, self).__init__(*args, **kwargs)
class FloatField(Field):
db_field = 'float'
def coerce(self, value):
return float(value)
class DoubleField(FloatField):
db_field = 'double'
class DecimalField(Field):
db_field = 'decimal'
template = '%(column_type)s(%(max_digits)d, %(decimal_places)d)'
def field_attributes(self):
return {
'max_digits': 10,
'decimal_places': 5,
'auto_round': False,
'rounding': decimal.DefaultContext.rounding,
}
def db_value(self, value):
D = decimal.Decimal
if not value:
return value if value is None else D(0)
if self.attributes['auto_round']:
exp = D(10)**(-self.attributes['decimal_places'])
return D(str(value)).quantize(exp, rounding=self.attributes['rounding'])
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(str(value))
def format_unicode(s, encoding='utf-8'):
if isinstance(s, unicode_type):
return s
elif isinstance(s, string_type):
return s.decode(encoding)
return unicode_type(s)
class CharField(Field):
db_field = 'string'
template = '%(column_type)s(%(max_length)s)'
def field_attributes(self):
return {'max_length': 255}
def coerce(self, value):
value = format_unicode(value or '')
return value[:self.attributes['max_length']]
class TextField(Field):
db_field = 'text'
def coerce(self, value):
return format_unicode(value or '')
class BlobField(Field):
db_field = 'blob'
def db_value(self, value):
if isinstance(value, basestring):
return binary_construct(value)
return value
def format_date_time(value, formats, post_process=None):
post_process = post_process or (lambda x: x)
for fmt in formats:
try:
return post_process(datetime.datetime.strptime(value, fmt))
except ValueError:
pass
return value
def _date_part(date_part):
def dec(self):
return self.model_class._meta.database.extract_date(date_part, self)
return dec
class DateTimeField(Field):
db_field = 'datetime'
def field_attributes(self):
return {
'formats': [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
return format_date_time(value, self.attributes['formats'])
return value
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class DateField(Field):
db_field = 'date'
def field_attributes(self):
return {
'formats': [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.attributes['formats'], pp)
elif value and isinstance(value, datetime.datetime):
return value.date()
return value
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
class TimeField(Field):
db_field = 'time'
def field_attributes(self):
return {
'formats': [
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
}
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.time()
return format_date_time(value, self.attributes['formats'], pp)
elif value and isinstance(value, datetime.datetime):
return value.time()
return value
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class BooleanField(Field):
db_field = 'bool'
def coerce(self, value):
return bool(value)
class RelationDescriptor(FieldDescriptor):
def __init__(self, field, rel_model):
self.rel_model = rel_model
super(RelationDescriptor, self).__init__(field)
def get_object_or_id(self, instance):
rel_id = instance._data.get(self.att_name)
if rel_id is not None or self.att_name in instance._obj_cache:
if self.att_name not in instance._obj_cache:
obj = self.rel_model.get(self.rel_model._meta.primary_key==rel_id)
instance._obj_cache[self.att_name] = obj
return instance._obj_cache[self.att_name]
elif not self.field.null:
return None
return rel_id
def __get__(self, instance, instance_type=None):
if instance is not None:
return self.get_object_or_id(instance)
return self.field
def __set__(self, instance, value):
if isinstance(value, self.rel_model):
instance._data[self.att_name] = value.get_id()
instance._obj_cache[self.att_name] = value
else:
instance._data[self.att_name] = value
class ReverseRelationDescriptor(object):
def __init__(self, field):
self.field = field
self.rel_model = field.model_class
def __get__(self, instance, instance_type=None):
if instance is not None:
return self.rel_model.select().where(self.field==instance.get_id())
return self
class ForeignKeyField(IntegerField):
def __init__(self, rel_model, null=False, related_name=None, cascade=False, extra=None, *args, **kwargs):
self.rel_model = rel_model
self._related_name = related_name
self.cascade = cascade
self.extra = extra
kwargs.update(dict(
cascade='ON DELETE CASCADE' if self.cascade else '',
extra=extra or '',
))
super(ForeignKeyField, self).__init__(null=null, *args, **kwargs)
def clone_base(self):
return super(ForeignKeyField, self).clone_base(
rel_model=self.rel_model,
related_name=self.related_name,
cascade=self.cascade,
extra=self.extra,
)
def add_to_class(self, model_class, name):
self.name = name
self.model_class = model_class
self.db_column = self.db_column or '%s_id' % self.name
self.verbose_name = self.verbose_name or re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
self.related_name = self._related_name or '%s_set' % (model_class._meta.name)
if self.rel_model == 'self':
self.rel_model = self.model_class
if self.related_name in self.rel_model._meta.fields:
raise AttributeError('Foreign key: %s.%s related name "%s" collision with field of same name' % (
self.model_class._meta.name, self.name, self.related_name))
setattr(model_class, name, RelationDescriptor(self, self.rel_model))
setattr(self.rel_model, self.related_name, ReverseRelationDescriptor(self))
self._is_bound = True
model_class._meta.rel[self.name] = self
self.rel_model._meta.reverse_rel[self.related_name] = self
def get_db_field(self):
to_pk = self.rel_model._meta.primary_key
if not isinstance(to_pk, PrimaryKeyField):
return to_pk.get_db_field()
return super(ForeignKeyField, self).get_db_field()
def coerce(self, value):
return self.rel_model._meta.primary_key.coerce(value)
def db_value(self, value):
if isinstance(value, self.rel_model):
value = value.get_id()
return self.rel_model._meta.primary_key.db_value(value)
class QueryCompiler(object):
field_map = {
'bigint': 'INTEGER',
'blob': 'BLOB',
'bool': 'SMALLINT',
'date': 'DATE',
'datetime': 'DATETIME',
'decimal': 'DECIMAL',
'double': 'REAL',
'float': 'REAL',
'int': 'INTEGER',
'primary_key': 'INTEGER',
'string': 'VARCHAR',
'text': 'TEXT',
'time': 'TIME',
}
op_map = {
OP_EQ: '=',
OP_LT: '<',
OP_LTE: '<=',
OP_GT: '>',
OP_GTE: '>=',
OP_NE: '!=',
OP_IN: 'IN',
OP_IS: 'IS',
OP_LIKE: 'LIKE',
OP_ILIKE: 'ILIKE',
OP_BETWEEN: 'BETWEEN',
OP_ADD: '+',
OP_SUB: '-',
OP_MUL: '*',
OP_DIV: '/',
OP_XOR: '#',
OP_AND: 'AND',
OP_OR: 'OR',
OP_MOD: '%',
}
join_map = {
JOIN_INNER: 'INNER',
JOIN_LEFT_OUTER: 'LEFT OUTER',
JOIN_FULL: 'FULL',
}
def __init__(self, quote_char='"', interpolation='?', field_overrides=None,
op_overrides=None):
self.quote_char = quote_char
self.interpolation = interpolation
self._field_map = dict_update(self.field_map, field_overrides or {})
self._op_map = dict_update(self.op_map, op_overrides or {})
def quote(self, s):
return ''.join((self.quote_char, s, self.quote_char))
def get_field(self, f):
return self._field_map[f]
def get_op(self, q):
return self._op_map[q]
def _max_alias(self, am):
max_alias = 0
if am:
for a in am.values():
i = int(a.lstrip('t'))
if i > max_alias:
max_alias = i
return max_alias + 1
def parse_expr(self, expr, alias_map=None, conv=None):
s = self.interpolation
p = [expr]
if isinstance(expr, Expr):
if isinstance(expr.lhs, Field):
conv = expr.lhs
lhs, lparams = self.parse_expr(expr.lhs, alias_map, conv)
rhs, rparams = self.parse_expr(expr.rhs, alias_map, conv)
s = '(%s %s %s)' % (lhs, self.get_op(expr.op), rhs)
p = lparams + rparams
elif isinstance(expr, Field):
s = self.quote(expr.db_column)
if alias_map and expr.model_class in alias_map:
s = '.'.join((alias_map[expr.model_class], s))
p = []
elif isinstance(expr, Func):
p = []
exprs = []
for param in expr.params:
parsed, params = self.parse_expr(param, alias_map, conv)
exprs.append(parsed)
p.extend(params)
s = '%s(%s)' % (expr.name, ', '.join(exprs))
elif isinstance(expr, Clause):
p = []
exprs = []
for piece in expr.pieces:
parsed, params = self.parse_expr(piece, alias_map, conv)
exprs.append(parsed)
p.extend(params)
s = ' '.join(exprs)
elif isinstance(expr, Param):
s = self.interpolation
p = [expr.data]
elif isinstance(expr, Ordering):
s, p = self.parse_expr(expr.param, alias_map, conv)
s += ' ASC' if expr.asc else ' DESC'
elif isinstance(expr, R):
s = expr.value
p = []
elif isinstance(expr, SelectQuery):
max_alias = self._max_alias(alias_map)
alias_copy = alias_map and alias_map.copy() or None
clone = expr.clone()
if not expr._explicit_selection:
clone._select = (clone.model_class._meta.primary_key,)
subselect, p = self.generate_select(clone, max_alias, alias_copy)
s = '(%s)' % subselect
elif isinstance(expr, (list, tuple)):
exprs = []
p = []
for i in expr:
e, v = self.parse_expr(i, alias_map, conv)
exprs.append(e)
p.extend(v)
s = '(%s)' % ','.join(exprs)
elif isinstance(expr, Model):
s = self.interpolation
p = [expr.get_id()]
elif conv and p:
p = [conv.db_value(i) for i in p]
if isinstance(expr, Leaf):
if expr.negated:
s = 'NOT %s' % s
if expr._alias:
s = ' '.join((s, 'AS', expr._alias))
return s, p
def parse_expr_list(self, s, alias_map):
parsed = []
data = []
for expr in s:
expr_str, vars = self.parse_expr(expr, alias_map)
parsed.append(expr_str)
data.extend(vars)
return ', '.join(parsed), data
def parse_field_dict(self, d):
sets, params = [], []
for field, expr in d.items():
field_str, _ = self.parse_expr(field)
# because we don't know whether to call db_value or parse_expr first,
# we'd prefer to call parse_expr since its more general, but it does
# special things with lists -- it treats them as if it were buliding
# up an IN query. for some things we don't want that, so here, if the
# expr is *not* a special object, we'll pass thru parse_expr and let
# db_value handle it
if not isinstance(expr, (Leaf, Model, Query)):
expr = Param(expr) # pass through to the fields db_value func
val_str, val_params = self.parse_expr(expr)
val_params = [field.db_value(vp) for vp in val_params]
sets.append((field_str, val_str))
params.extend(val_params)
return sets, params
def parse_query_node(self, qnode, alias_map):
if qnode is not None:
return self.parse_expr(qnode, alias_map)
return '', []
def calculate_alias_map(self, query, start=1):
alias_map = {query.model_class: 't%s' % start}
for model, joins in query._joins.items():
if model not in alias_map:
start += 1
alias_map[model] = 't%s' % start
for join in joins:
if join.model_class not in alias_map:
start += 1
alias_map[join.model_class] = 't%s' % start
return alias_map
def generate_joins(self, joins, model_class, alias_map):
parsed = []
params = []
seen = set()
q = [model_class]
while q:
curr = q.pop()
if curr not in joins or curr in seen:
continue
seen.add(curr)
for join in joins[curr]:
from_model = curr
to_model = join.model_class
if isinstance(join.on, Expr):
join_expr = join.on
else:
field = from_model._meta.rel_for_model(to_model, join.on)
if field:
left_field = field
right_field = to_model._meta.primary_key
else:
field = to_model._meta.rel_for_model(from_model, join.on)
left_field = from_model._meta.primary_key
right_field = field
join_expr = (left_field == right_field)
join_type = join.join_type or JOIN_INNER
parsed_join, join_params = self.parse_expr(join_expr, alias_map)
parsed.append('%s JOIN %s AS %s ON %s' % (
self.join_map[join_type],
self.quote(to_model._meta.db_table),
alias_map[to_model],
parsed_join,
))
params.extend(join_params)
q.append(to_model)
return parsed, params
def generate_select(self, query, start=1, alias_map=None):
model = query.model_class
db = model._meta.database
alias_map = alias_map or {}
alias_map.update(self.calculate_alias_map(query, start))
parts = ['SELECT']
params = []
if query._distinct:
parts.append('DISTINCT')
selection = query._select
select, s_params = self.parse_expr_list(selection, alias_map)
parts.append(select)
params.extend(s_params)
parts.append('FROM %s AS %s' % (self.quote(model._meta.db_table), alias_map[model]))
joins, j_params = self.generate_joins(query._joins, query.model_class, alias_map)
if joins:
parts.append(' '.join(joins))
params.extend(j_params)
where, w_params = self.parse_query_node(query._where, alias_map)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
if query._group_by:
group_by, g_params = self.parse_expr_list(query._group_by, alias_map)
parts.append('GROUP BY %s' % group_by)
params.extend(g_params)
if query._having:
having, h_params = self.parse_query_node(query._having, alias_map)
parts.append('HAVING %s' % having)
params.extend(h_params)
if query._order_by:
order_by, o_params = self.parse_expr_list(query._order_by, alias_map)
parts.append('ORDER BY %s' % order_by)
params.extend(o_params)
if query._limit or (query._offset and db.limit_max):
limit = query._limit or db.limit_max
parts.append('LIMIT %s' % limit)
if query._offset:
parts.append('OFFSET %s' % query._offset)
if query._for_update:
parts.append('FOR UPDATE')
return ' '.join(parts), params
def generate_update(self, query):
model = query.model_class
parts = ['UPDATE %s SET' % self.quote(model._meta.db_table)]
sets, params = self.parse_field_dict(query._update)
parts.append(', '.join('%s=%s' % (f, v) for f, v in sets))
where, w_params = self.parse_query_node(query._where, None)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
return ' '.join(parts), params
def generate_insert(self, query):
model = query.model_class
parts = ['INSERT INTO %s' % self.quote(model._meta.db_table)]
sets, params = self.parse_field_dict(query._insert)
if sets:
parts.append('(%s)' % ', '.join(s[0] for s in sets))
parts.append('VALUES (%s)' % ', '.join(s[1] for s in sets))
return ' '.join(parts), params
def generate_delete(self, query):
model = query.model_class
parts = ['DELETE FROM %s' % self.quote(model._meta.db_table)]
params = []
where, w_params = self.parse_query_node(query._where, None)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
return ' '.join(parts), params
def field_sql(self, field):
attrs = field.attributes
attrs['column_type'] = self.get_field(field.get_db_field())
template = field.template
if isinstance(field, ForeignKeyField):
to_pk = field.rel_model._meta.primary_key
if not isinstance(to_pk, PrimaryKeyField):
template = to_pk.template
attrs.update(to_pk.attributes)
parts = [self.quote(field.db_column), template]
if not field.null:
parts.append('NOT NULL')
if field.primary_key:
parts.append('PRIMARY KEY')
if isinstance(field, ForeignKeyField):
ref_mc = (
self.quote(field.rel_model._meta.db_table),
self.quote(field.rel_model._meta.primary_key.db_column),
)
parts.append('REFERENCES %s (%s)' % ref_mc)
parts.append('%(cascade)s%(extra)s')
elif field.sequence:
parts.append("DEFAULT NEXTVAL('%s')" % self.quote(field.sequence))
return ' '.join(p % attrs for p in parts)
def create_table_sql(self, model_class):
parts = ['CREATE TABLE IF NOT EXISTS']
parts.append(self.quote(model_class._meta.db_table))
columns = ', '.join(self.field_sql(f) for f in model_class._meta.get_fields())
uniques = model_class._meta.get_uniques()
if len(uniques) > 0:
columns += ', UNIQUE ('
columns += ', '.join(map(lambda x: '"' + x + '"',uniques))
columns += ')'
parts.append('(%s)' % columns)
return parts
def create_table(self, model_class):
return ' '.join(self.create_table_sql(model_class))
def drop_table(self, model_class, fail_silently=False, cascade=False):
parts = ['DROP TABLE']
if fail_silently:
parts.append('IF EXISTS')
parts.append(self.quote(model_class._meta.db_table))
if cascade:
parts.append('CASCADE')
return ' '.join(parts)
def create_index_sql(self, model_class, fields, unique):
tbl_name = model_class._meta.db_table
colnames = [f.db_column for f in fields]
parts = ['CREATE %s' % ('UNIQUE INDEX' if unique else 'INDEX')]
parts.append(self.quote('%s_%s' % (tbl_name, '_'.join(colnames))))
parts.append('ON %s' % self.quote(tbl_name))
parts.append('(%s)' % ', '.join(map(self.quote, colnames)))
return parts
def create_index(self, model_class, fields, unique):
return ' '.join(self.create_index_sql(model_class, fields, unique))
def create_sequence(self, sequence_name):
return 'CREATE SEQUENCE %s;' % self.quote(sequence_name)
def drop_sequence(self, sequence_name):
return 'DROP SEQUENCE %s;' % self.quote(sequence_name)
class QueryResultWrapper(object):
"""
Provides an iterator over the results of a raw Query, additionally doing
two things:
- converts rows from the database into python representations
- ensures that multiple iterations do not result in multiple queries
"""
def __init__(self, model, cursor, meta=None):
self.model = model
self.cursor = cursor
self.__ct = 0
self.__idx = 0
self._result_cache = []
self._populated = False
def __iter__(self):
self.__idx = 0
if not self._populated:
return self
else:
return iter(self._result_cache)
def process_row(self, row):
return row
def iterate(self):
row = self.cursor.fetchone()
if not row:
self._populated = True
raise StopIteration
return self.process_row(row)
def iterator(self):
while True:
yield self.iterate()
def next(self):
if self.__idx < self.__ct:
inst = self._result_cache[self.__idx]
self.__idx += 1
return inst
obj = self.iterate()
self._result_cache.append(obj)
self.__ct += 1
self.__idx += 1
return obj
__next__ = next
def fill_cache(self, n=None):
n = n or float('Inf')
self.__idx = self.__ct
while not self._populated and (n > self.__ct):
try:
self.next()
except StopIteration:
break
class TuplesQueryResultWrapper(QueryResultWrapper):
def __init__(self, model, cursor, meta=None):
super(TuplesQueryResultWrapper, self).__init__(model, cursor, meta)
conv = []
identity = lambda x: x
for i in range(len(self.cursor.description)):
col = self.cursor.description[i][0]
if col in model._meta.columns:
field_obj = model._meta.columns[col]
conv.append(field_obj.python_value)
else:
conv.append(identity)
self.conv = conv
def process_row(self, row):
return tuple([self.conv[i](col) for i, col in enumerate(row)])
class NaiveQueryResultWrapper(QueryResultWrapper):
def __init__(self, model, cursor, meta=None):
super(NaiveQueryResultWrapper, self).__init__(model, cursor, meta)
cols = []
non_cols = []
for i in range(len(self.cursor.description)):
col = self.cursor.description[i][0]
if col in model._meta.columns:
field_obj = model._meta.columns[col]
cols.append((i, field_obj.name, field_obj.python_value))
else:
non_cols.append((i, col))
self._cols = cols
self._non_cols = non_cols
def process_row(self, row):
instance = self.model()
for i, fname, pv in self._cols:
setattr(instance, fname, pv(row[i]))
for i, f in self._non_cols:
setattr(instance, f, row[i])
instance.prepared()
return instance
class DictQueryResultWrapper(NaiveQueryResultWrapper):
def process_row(self, row):
res = {}
for i, fname, pv in self._cols:
res[fname] = pv(row[i])
for i, f in self._non_cols:
res[f] = row[i]
return res
class ModelQueryResultWrapper(QueryResultWrapper):
def __init__(self, model, cursor, meta=None):
super(ModelQueryResultWrapper, self).__init__(model, cursor, meta)
self.column_meta, self.join_meta = meta
def process_row(self, row):
collected = self.construct_instance(row)
instances = self.follow_joins(collected)
for i in instances:
i.prepared()
return instances[0]
def construct_instance(self, row):
# we have columns, models, and a graph of joins to reconstruct
collected_models = {}
cols = [c[0] for c in self.cursor.description]
for i, expr in enumerate(self.column_meta):
value = row[i]
if isinstance(expr, FieldProxy):
key = expr._model_alias # model alias
constructor = expr.model # instance constructor
elif isinstance(expr, Field):
key = constructor = expr.model_class
else:
key = constructor = self.model
if key not in collected_models:
collected_models[key] = constructor()
instance = collected_models[key]
if isinstance(expr, Field):
setattr(instance, expr.name, expr.python_value(value))
elif isinstance(expr, Expr) and expr._alias:
setattr(instance, expr._alias, value)
else:
setattr(instance, cols[i], value)
return collected_models
def follow_joins(self, collected):
joins = self.join_meta
stack = [self.model]
prepared = [collected[self.model]]
while stack:
current = stack.pop()
if current not in joins:
continue
inst = collected[current]
for join in joins[current]:
if join.model_class in collected:
joined_inst = collected[join.model_class]
fk_field = current._meta.rel_for_model(join.model_class)
if not fk_field:
if isinstance(join.on, Expr):
fk_field = join.on.lhs
else:
continue
if joined_inst.get_id() is None and fk_field.name in inst._data:
rel_inst_id = inst._data[fk_field.name]
joined_inst.set_id(rel_inst_id)
setattr(inst, fk_field.name, joined_inst)
stack.append(join.model_class)
prepared.append(joined_inst)
return prepared
Join = namedtuple('Join', ('model_class', 'join_type', 'on'))
class Query(Leaf):
def __init__(self, model_class):
super(Query, self).__init__()
self.model_class = model_class
self.database = model_class._meta.database
self._dirty = True
self._query_ctx = model_class
self._joins = {self.model_class: []} # adjacency graph
self._where = None
def __repr__(self):
sql, params = self.sql()
return '%s %s %s' % (self.model_class, sql, params)
def clone(self):
query = type(self)(self.model_class)
if self._where is not None:
query._where = self._where.clone()
query._joins = self.clone_joins()
query._query_ctx = self._query_ctx
return query
def clone_joins(self):
return dict(
(mc, list(j)) for mc, j in self._joins.items()
)
@returns_clone
def where(self, *q_or_node):
if self._where is None:
self._where = reduce(operator.and_, q_or_node)
else:
for piece in q_or_node:
self._where &= piece
@returns_clone
def join(self, model_class, join_type=None, on=None):
if not self._query_ctx._meta.rel_exists(model_class) and on is None:
raise ValueError('No foreign key between %s and %s' % (
self._query_ctx, model_class,
))
if on and isinstance(on, basestring):
on = self._query_ctx._meta.fields[on]
self._joins.setdefault(self._query_ctx, [])
self._joins[self._query_ctx].append(Join(model_class, join_type, on))
self._query_ctx = model_class
@returns_clone
def switch(self, model_class=None):
self._query_ctx = model_class or self.model_class
def ensure_join(self, lm, rm, on=None):
ctx = self._query_ctx
for join in self._joins.get(lm, []):
if join.model_class == rm:
return self
query = self.switch(lm).join(rm, on=on).switch(ctx)
return query
def convert_dict_to_node(self, qdict):
accum = []
joins = []
for key, value in sorted(qdict.items()):
curr = self.model_class
if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP:
key, op = key.rsplit('__', 1)
op = DJANGO_MAP[op]
else:
op = OP_EQ
for piece in key.split('__'):
model_attr = getattr(curr, piece)
if isinstance(model_attr, (ForeignKeyField, ReverseRelationDescriptor)):
curr = model_attr.rel_model
joins.append(model_attr)
accum.append(Expr(model_attr, op, value))
return accum, joins
def filter(self, *args, **kwargs):
# normalize args and kwargs into a new expression
dq_node = Leaf()
if args:
dq_node &= reduce(operator.and_, [a.clone() for a in args])
if kwargs:
dq_node &= DQ(**kwargs)
# dq_node should now be an Expr, lhs = Leaf(), rhs = ...
q = deque([dq_node])
dq_joins = set()
while q:
curr = q.popleft()
if not isinstance(curr, Expr):
continue
for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)):
if isinstance(piece, DQ):
query, joins = self.convert_dict_to_node(piece.query)
dq_joins.update(joins)
setattr(curr, side, reduce(operator.and_, query))
else:
q.append(piece)
dq_node = dq_node.rhs
query = self.clone()
for field in dq_joins:
if isinstance(field, ForeignKeyField):
lm, rm = field.model_class, field.rel_model
field_obj = field
elif isinstance(field, ReverseRelationDescriptor):
lm, rm = field.field.rel_model, field.rel_model
field_obj = field.field
query = query.ensure_join(lm, rm, field_obj)
return query.where(dq_node)
def compiler(self):
return self.database.compiler()
def sql(self):
raise NotImplementedError
def _execute(self):
sql, params = self.sql()
return self.database.execute_sql(sql, params)
def execute(self):
raise NotImplementedError
def scalar(self, as_tuple=False):
conn = None
try:
result = self._execute()
row = result[0].fetchone()
conn = result[1]
if row and not as_tuple:
return row[0]
else:
return row
finally:
self.database.put_connection_back(conn)
class RawQuery(Query):
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
self._qr = None
self._tuples = False
self._dicts = False
super(RawQuery, self).__init__(model)
def clone(self):
query = RawQuery(self.model_class, self._sql, *self._params)
query._tuples = self._tuples
query._dicts = self._dicts
return query
join = not_allowed('joining')
where = not_allowed('where')
switch = not_allowed('switch')
@returns_clone
def tuples(self, tuples=True):
self._tuples = tuples
@returns_clone
def dicts(self, dicts=True):
self._dicts = dicts
def sql(self):
return self._sql, self._params
def execute(self):
if self._qr is None:
if self._tuples:
ResultWrapper = TuplesQueryResultWrapper
elif self._dicts:
ResultWrapper = DictQueryResultWrapper
else:
ResultWrapper = NaiveQueryResultWrapper
conn = None
try:
result = self._execute()
cursor = result[0]
conn = result[1]
self._qr = ResultWrapper(self.model_class, cursor, None)
finally:
self.database.put_connection_back(conn)
return self._qr
def __iter__(self):
return iter(self.execute())
class SelectQuery(Query):
def __init__(self, model_class, *selection):
super(SelectQuery, self).__init__(model_class)
self._explicit_selection = len(selection) > 0
self._select = self._model_shorthand(selection or model_class._meta.get_fields())
self._group_by = None
self._having = None
self._order_by = None
self._limit = None
self._offset = None
self._distinct = False
self._for_update = False
self._naive = False
self._tuples = False
self._dicts = False
self._alias = None
self._qr = None
def clone(self):
query = super(SelectQuery, self).clone()
query._explicit_selection = self._explicit_selection
query._select = list(self._select)
if self._group_by is not None:
query._group_by = list(self._group_by)
if self._having:
query._having = self._having.clone()
if self._order_by is not None:
query._order_by = list(self._order_by)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._for_update = self._for_update
query._naive = self._naive
query._tuples = self._tuples
query._dicts = self._dicts
query._alias = self._alias
return query
def _model_shorthand(self, args):
accum = []
for arg in args:
if isinstance(arg, Leaf):
accum.append(arg)
elif isinstance(arg, Query):
accum.append(arg)
elif isinstance(arg, ModelAlias):
accum.extend(arg.get_proxy_fields())
elif issubclass(arg, Model):
accum.extend(arg._meta.get_fields())
return accum
@returns_clone
def group_by(self, *args):
self._group_by = self._model_shorthand(args)
@returns_clone
def having(self, *q_or_node):
if self._having is None:
self._having = reduce(operator.and_, q_or_node)
else:
for piece in q_or_node:
self._having &= piece
@returns_clone
def order_by(self, *args):
self._order_by = list(args)
@returns_clone
def limit(self, lim):
self._limit = lim
@returns_clone
def offset(self, off):
self._offset = off
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def distinct(self, is_distinct=True):
self._distinct = is_distinct
@returns_clone
def for_update(self, for_update=True):
self._for_update = for_update
@returns_clone
def naive(self, naive=True):
self._naive = naive
@returns_clone
def tuples(self, tuples=True):
self._tuples = tuples
@returns_clone
def dicts(self, dicts=True):
self._dicts = dicts
@returns_clone
def alias(self, alias=None):
self._alias = alias
def annotate(self, rel_model, annotation=None):
annotation = annotation or fn.Count(rel_model._meta.primary_key).alias('count')
query = self.clone()
query = query.ensure_join(query._query_ctx, rel_model)
if not query._group_by:
query._group_by = [x.alias() for x in query._select]
query._select = tuple(query._select) + (annotation,)
return query
def _aggregate(self, aggregation=None):
aggregation = aggregation or fn.Count(self.model_class._meta.primary_key)
query = self.order_by()
query._select = [aggregation]
return query
def aggregate(self, aggregation=None):
return self._aggregate(aggregation).scalar()
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
# defaults to a count() of the primary key
return self.aggregate() or 0
def wrapped_count(self):
clone = self.order_by()
clone._limit = clone._offset = None
sql, params = clone.sql()
wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
rq = RawQuery(self.model_class, wrapped, *params)
return rq.scalar() or 0
def exists(self):
clone = self.paginate(1, 1)
clone._select = [self.model_class._meta.primary_key]
return bool(clone.scalar())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
return None
def first(self):
res = self.execute()
res.fill_cache(1)
try:
return res._result_cache[0]
except IndexError:
pass
def sql(self):
return self.compiler().generate_select(self)
def verify_naive(self):
for expr in self._select:
if isinstance(expr, Field) and expr.model_class != self.model_class:
return False
return True
def execute(self):
if self._dirty or not self._qr:
query_meta = None
if self._tuples:
ResultWrapper = TuplesQueryResultWrapper
elif self._dicts:
ResultWrapper = DictQueryResultWrapper
elif self._naive or not self._joins or self.verify_naive():
ResultWrapper = NaiveQueryResultWrapper
else:
query_meta = [self._select, self._joins]
ResultWrapper = ModelQueryResultWrapper
conn = None
try:
result = self._execute()
cursor = result[0]
conn = result[1]
self._qr = ResultWrapper(self.model_class, cursor, query_meta)
finally:
self.database.put_connection_back(conn)
self._dirty = False
return self._qr
else:
return self._qr
def __iter__(self):
return iter(self.execute())
def iterator(self):
return iter(self.execute().iterator())
def __getitem__(self, value):
offset = limit = None
if isinstance(value, slice):
if value.start:
offset = value.start
if value.stop:
limit = value.stop - (value.start or 0)
else:
if value < 0:
raise ValueError('Negative indexes are not supported, try ordering in reverse')
offset = value
limit = 1
if self._limit != limit or self._offset != offset:
self._qr = None
self._limit = limit
self._offset = offset
res = list(self)
return limit == 1 and res[0] or res
class UpdateQuery(Query):
def __init__(self, model_class, update=None):
self._update = update
super(UpdateQuery, self).__init__(model_class)
def clone(self):
query = super(UpdateQuery, self).clone()
query._update = dict(self._update)
return query
join = not_allowed('joining')
def sql(self):
return self.compiler().generate_update(self)
def execute(self):
conn = None
try:
result = self._execute()
cursor = result[0]
conn = result[1]
return self.database.rows_affected(cursor)
finally:
self.database.put_connection_back(conn)
class InsertQuery(Query):
def __init__(self, model_class, insert=None):
mm = model_class._meta
query = dict((mm.fields[f], v) for f, v in mm.get_default_dict().items())
query.update(insert)
self._insert = query
super(InsertQuery, self).__init__(model_class)
def clone(self):
query = super(InsertQuery, self).clone()
query._insert = dict(self._insert)
return query
join = not_allowed('joining')
where = not_allowed('where clause')
def sql(self):
return self.compiler().generate_insert(self)
def execute(self):
conn = None
try:
result = self._execute()
cursor = result[0]
conn = result[1]
return self.database.last_insert_id(cursor, self.model_class)
finally:
self.database.put_connection_back(conn)
class DeleteQuery(Query):
join = not_allowed('joining')
def sql(self):
return self.compiler().generate_delete(self)
def execute(self):
conn = None
try:
result = self._execute()
cursor = result[0]
conn = result[1]
return self.database.rows_affected(cursor)
finally:
self.database.put_connection_back(conn)
class PostgresqlDatabase(object):
commit_select = True
field_overrides = {
'bigint': 'BIGINT',
'blob': 'BYTEA',
'bool': 'BOOLEAN',
'datetime': 'TIMESTAMP',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'primary_key': 'SERIAL',
}
for_update = True
interpolation = '%s'
reserved_tables = ['user']
sequences = True
compiler_class = QueryCompiler
limit_max = None
op_overrides = {}
quote_char = '"'
subquery_delete_same_table = True
def __init__(self, database, fields=None, ops=None, **connect_kwargs):
self.database = database
self.connect_kwargs = connect_kwargs
self.pool = PostgresConnectionPool(database, maxsize=10, **connect_kwargs)
self.field_overrides = dict_update(self.field_overrides, fields or {})
self.op_overrides = dict_update(self.op_overrides, ops or {})
def get_thread_local_conn(self):
db_connection = getattr(thread_local, 'db_connection', {})
if 'conn' in db_connection:
return db_connection['conn']
return None
def set_thread_local_conn(self, conn):
db_connection = getattr(thread_local, 'db_connection', {})
if not db_connection:
setattr(thread_local, 'db_connection', {'conn':conn})
else:
thread_local.db_connection['conn'] = conn
def clear_thread_local_conn(self):
db_connection = getattr(thread_local, 'db_connection', {})
if db_connection:
del thread_local.db_connection['conn']
def new_conn(self):
conn = self.pool.get()
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
return conn
def new_transaction_conn(self):
conn = self.pool.get()
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
return conn
def get_conn(self):
conn = self.get_thread_local_conn()
if conn is None:
return self.new_conn()
return conn
@classmethod
def register_fields(cls, fields):
cls.field_overrides = dict_update(cls.field_overrides, fields)
@classmethod
def register_ops(cls, ops):
cls.op_overrides = dict_update(cls.op_overrides, ops)
def last_insert_id(self, cursor, model):
seq = model._meta.primary_key.sequence
if seq:
cursor.execute("SELECT CURRVAL('\"%s\"')" % (seq))
return cursor.fetchone()[0]
elif model._meta.auto_increment:
cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (
model._meta.db_table, model._meta.primary_key.db_column))
return cursor.fetchone()[0]
def rows_affected(self, cursor):
return cursor.rowcount
def compiler(self):
return self.compiler_class(
self.quote_char, self.interpolation, self.field_overrides,
self.op_overrides)
def put_connection_back(self, conn):
if conn:
#only give the connection back to the pool if it's not a thread local, thread local connections are cleaned up elsewhere
thread_local_conn = self.get_thread_local_conn()
if thread_local_conn is None:
self.pool.put(conn)
def execute_sql(self, sql, params=None):
try:
conn = self.get_conn()
cursor = conn.cursor()
cursor.execute(sql, params or ())
#logger.debug((sql, params))
return cursor, conn
except (OperationalError, InterfaceError) as e:
logger.error(e.message)
if e.__class__ == TransactionRollbackError:
raise e
else:
thread_local_conn = self.get_thread_local_conn()
if thread_local_conn is None:
conn.close()
self.pool.size -= 1
raise e
def execute_many(self, sql, params):
conn = None
try:
conn = self.get_conn()
cursor = conn.cursor()
cursor.executemany(sql, params)
return cursor
except (OperationalError, InterfaceError) as e:
if e.__class__ == TransactionRollbackError:
raise e
else:
thread_local_conn = self.get_thread_local_conn()
if thread_local_conn is None:
conn.close()
self.pool.size -= 1
raise e
finally:
self.put_connection_back(conn)
def get_tables(self):
res = self.execute_sql("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY c.relname""")[0]
return [row[0] for row in res.fetchall()]
def get_indexes_for_table(self, table):
res = self.execute_sql("""
SELECT c2.relname, i.indisprimary, i.indisunique
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i
WHERE c.relname = %s AND c.oid = i.indrelid AND i.indexrelid = c2.oid
ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname""", (table,))[0]
return sorted([(r[0], r[1]) for r in res.fetchall()])
def sequence_exists(self, sequence):
res = self.execute_sql("""
SELECT COUNT(*)
FROM pg_class, pg_namespace
WHERE relkind='S'
AND pg_class.relnamespace = pg_namespace.oid
AND relname=%s""", (sequence,))[0]
return bool(res.fetchone()[0])
def create_table(self, model_class):
qc = self.compiler()
return self.execute_sql(qc.create_table(model_class))[0]
def create_index(self, model_class, fields, unique=False):
qc = self.compiler()
if not isinstance(fields, (list, tuple)):
raise ValueError('fields passed to "create_index" must be a list or tuple: "%s"' % fields)
field_objs = [model_class._meta.fields[f] if isinstance(f, basestring) else f for f in fields]
return self.execute_sql(qc.create_index(model_class, field_objs, unique))[0]
def create_foreign_key(self, model_class, field):
if not field.primary_key:
return self.create_index(model_class, [field], field.unique)
def create_sequence(self, seq):
if self.sequences:
qc = self.compiler()
return self.execute_sql(qc.create_sequence(seq))[0]
def drop_table(self, model_class, fail_silently=False):
qc = self.compiler()
return self.execute_sql(qc.drop_table(model_class, fail_silently))[0]
def drop_sequence(self, seq):
if self.sequences:
qc = self.compiler()
return self.execute_sql(qc.drop_sequence(seq))[0]
def extract_date(self, date_part, date_field):
return fn.EXTRACT(Clause(date_part, R('FROM'), date_field))
def set_search_path(self, *search_path):
path_params = ','.join(['%s'] * len(search_path))
self.execute_sql('SET search_path TO %s' % path_params, search_path)
def doInTransaction(db, body, retry_count=100):
conn = db.new_transaction_conn()
db.set_thread_local_conn(conn)
try:
result = body()
conn.commit()
db.clear_thread_local_conn()
db.pool.put(conn)
except (OperationalError, InterfaceError) as e:
conn.rollback()
if e.__class__ == TransactionRollbackError:
if retry_count == 0:
db.clear_thread_local_conn()
db.pool.put(conn)
raise e
return doInTransaction(db, body, retry_count - 1)
else:
conn.close()
db.pool.size -= 1
logger.error(e.message)
raise e
return result
class ModelOptions(object):
def __init__(self, cls, database=None, db_table=None, indexes=None,
order_by=None, primary_key=None, uniques=None):
self.model_class = cls
self.name = cls.__name__.lower()
self.fields = {}
self.columns = {}
self.defaults = {}
self.database = database
self.db_table = db_table
self.indexes = indexes or []
self.uniques = uniques or []
self.order_by = order_by
self.primary_key = primary_key
self.auto_increment = None
self.rel = {}
self.reverse_rel = {}
def prepared(self):
for field in self.fields.values():
if field.default is not None:
self.defaults[field] = field.default
if self.order_by:
norm_order_by = []
for clause in self.order_by:
field = self.fields[clause.lstrip('-')]
if clause.startswith('-'):
norm_order_by.append(field.desc())
else:
norm_order_by.append(field.asc())
self.order_by = norm_order_by
def get_default_dict(self):
dd = {}
for field, default in self.defaults.items():
if callable(default):
dd[field.name] = default()
else:
dd[field.name] = default
return dd
def get_sorted_fields(self):
return sorted(self.fields.items(), key=lambda i: (i[1] is self.primary_key and 1 or 2, i[1]._order))
def get_field_names(self):
return [f[0] for f in self.get_sorted_fields()]
def get_fields(self):
return [f[1] for f in self.get_sorted_fields()]
def get_uniques(self):
return self.uniques
def rel_for_model(self, model, field_obj=None):
for field in self.get_fields():
if isinstance(field, ForeignKeyField) and field.rel_model == model:
if field_obj is None or field_obj.name == field.name:
return field
def reverse_rel_for_model(self, model):
return model._meta.rel_for_model(self.model_class)
def rel_exists(self, model):
return self.rel_for_model(model) or self.reverse_rel_for_model(model)
class BaseModel(type):
inheritable_options = ['database', 'indexes', 'order_by', 'primary_key', 'uniques']
def __new__(cls, name, bases, attrs):
if not bases:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
meta_options.update((k, v) for k, v in meta.__dict__.items() if not k.startswith('_'))
orig_primary_key = None
# inherit any field descriptors by deep copying the underlying field obj
# into the attrs of the new model, additionally see if the bases define
# inheritable model options and swipe them
for b in bases:
if not hasattr(b, '_meta'):
continue
base_meta = getattr(b, '_meta')
for (k, v) in base_meta.__dict__.items():
if k in cls.inheritable_options and k not in meta_options:
meta_options[k] = v
for (k, v) in b.__dict__.items():
if isinstance(v, FieldDescriptor) and k not in attrs:
if not v.field.primary_key:
attrs[k] = deepcopy(v.field)
elif not orig_primary_key:
orig_primary_key = deepcopy(v.field)
# initialize the new class and set the magic attributes
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
cls._data = None
primary_key = None
# replace the fields with field descriptors, calling the add_to_class hook
for name, attr in list(cls.__dict__.items()):
cls._meta.indexes = list(cls._meta.indexes)
if isinstance(attr, Field):
attr.add_to_class(cls, name)
if attr.primary_key:
primary_key = attr
if primary_key is None:
if orig_primary_key:
primary_key = orig_primary_key
name = primary_key.name
else:
primary_key = PrimaryKeyField(primary_key=True)
name = 'id'
primary_key.add_to_class(cls, name)
cls._meta.primary_key = primary_key
cls._meta.auto_increment = isinstance(primary_key, PrimaryKeyField) or bool(primary_key.sequence)
if not cls._meta.db_table:
cls._meta.db_table = re.sub('[^\w]+', '_', cls.__name__.lower())
# create a repr and error class before finalizing
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %r>' % (
cls.__name__, self.__unicode__()))
cls._meta.prepared()
return cls
class FieldProxy(Field):
def __init__(self, alias, field_instance):
self._model_alias = alias
self.model = self._model_alias.model_class
self.field_instance = field_instance
def clone_base(self):
return FieldProxy(self._model_alias, self.field_instance)
def __getattr__(self, attr):
if attr == 'model_class':
return self._model_alias
return getattr(self.field_instance, attr)
class ModelAlias(object):
def __init__(self, model_class):
self.__dict__['model_class'] = model_class
def __getattr__(self, attr):
model_attr = getattr(self.model_class, attr)
if isinstance(model_attr, Field):
return FieldProxy(self, model_attr)
return model_attr
def __setattr__(self, attr, value):
raise AttributeError('Cannot set attributes on ModelAlias instances')
def get_proxy_fields(self):
return [FieldProxy(self, f) for f in self.model_class._meta.get_fields()]
class Model(with_metaclass(BaseModel)):
def __init__(self, *args, **kwargs):
self._data = self._meta.get_default_dict()
self._obj_cache = {} # cache of related objects
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def alias(cls):
return ModelAlias(cls)
@classmethod
def select(cls, *selection):
query = SelectQuery(cls, *selection)
if cls._meta.order_by:
query = query.order_by(*cls._meta.order_by)
return query
@classmethod
def update(cls, **update):
fdict = dict((cls._meta.fields[f], v) for f, v in update.items())
return UpdateQuery(cls, fdict)
@classmethod
def insert(cls, **insert):
fdict = dict((cls._meta.fields[f], v) for f, v in insert.items())
return InsertQuery(cls, fdict)
@classmethod
def delete(cls):
return DeleteQuery(cls)
@classmethod
def raw(cls, sql, *params):
return RawQuery(cls, sql, *params)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save(force_insert=True)
return inst
@classmethod
def get(cls, *query, **kwargs):
sq = cls.select().naive()
if query:
sq = sq.where(*query)
if kwargs:
sq = sq.filter(**kwargs)
return sq.get()
@classmethod
def get_or_create(cls, **kwargs):
sq = cls.select().filter(**kwargs)
r = sq.get()
if r is None:
return cls.create(**kwargs)
@classmethod
def filter(cls, *dq, **query):
return cls.select().filter(*dq, **query)
@classmethod
def table_exists(cls):
return cls._meta.db_table in cls._meta.database.get_tables()
@classmethod
def create_table(cls):
db = cls._meta.database
conn = db.get_conn()
try:
db.set_thread_local_conn(conn)
if not cls.table_exists():
pk = cls._meta.primary_key
if db.sequences and pk.sequence and not db.sequence_exists(pk.sequence):
db.create_sequence(pk.sequence)
db.create_table(cls)
cls._create_indexes()
finally:
db.clear_thread_local_conn()
db.put_connection_back(conn)
@classmethod
def _create_indexes(cls):
db = cls._meta.database
for field_name, field_obj in cls._meta.fields.items():
if isinstance(field_obj, ForeignKeyField):
db.create_foreign_key(cls, field_obj)
elif field_obj.index or field_obj.unique:
db.create_index(cls, [field_obj], field_obj.unique)
if cls._meta.indexes:
for fields, unique in cls._meta.indexes:
db.create_index(cls, fields, unique)
@classmethod
def drop_table(cls, fail_silently=False):
cls._meta.database.drop_table(cls, fail_silently)
def get_id(self):
return getattr(self, self._meta.primary_key.name)
def set_id(self, id):
setattr(self, self._meta.primary_key.name, id)
def prepared(self):
pass
def _prune_fields(self, field_dict, only):
new_data = {}
for field in only:
if field.name in field_dict:
new_data[field.name] = field_dict[field.name]
return new_data
def save(self, force_insert=False, only=None):
field_dict = dict(self._data)
pk = self._meta.primary_key
if only:
field_dict = self._prune_fields(field_dict, only)
if self.get_id() is not None and not force_insert:
field_dict.pop(pk.name, None)
update = self.update(
**field_dict
).where(pk == self.get_id())
update.execute()
else:
if self._meta.auto_increment:
field_dict.pop(pk.name, None)
insert = self.insert(**field_dict)
new_pk = insert.execute()
if self._meta.auto_increment:
self.set_id(new_pk)
def dependencies(self, search_nullable=False):
stack = [(type(self), self.select().where(self._meta.primary_key == self.get_id()))]
seen = set()
while stack:
klass, query = stack.pop()
if klass in seen:
continue
seen.add(klass)
for rel_name, fk in klass._meta.reverse_rel.items():
rel_model = fk.model_class
expr = fk << query
if not fk.null or search_nullable:
stack.append((rel_model, rel_model.select().where(expr)))
yield (expr, fk)
def delete_instance(self, recursive=False, delete_nullable=False):
if recursive:
for query, fk in reversed(list(self.dependencies(delete_nullable))):
if fk.null and not delete_nullable:
fk.model_class.update(**{fk.name: None}).where(query).execute()
else:
fk.model_class.delete().where(query).execute()
return self.delete().where(self._meta.primary_key == self.get_id()).execute()
def __eq__(self, other):
return other.__class__ == self.__class__ and \
self.get_id() is not None and \
other.get_id() == self.get_id()
def __ne__(self, other):
return not self == other
def prefetch_add_subquery(sq, subqueries):
fixed_queries = [(sq, None)]
for i, subquery in enumerate(subqueries):
if not isinstance(subquery, Query) and issubclass(subquery, Model):
subquery = subquery.select()
subquery_model = subquery.model_class
fkf = None
for j in range(i, -1, -1):
last_query = fixed_queries[j][0]
fkf = subquery_model._meta.rel_for_model(last_query.model_class)
if fkf:
break
if not fkf:
raise AttributeError('Error: unable to find foreign key for query: %s' % subquery)
fixed_queries.append((subquery.where(fkf << last_query), fkf))
return fixed_queries
def prefetch(sq, *subqueries):
if not subqueries:
return sq
fixed_queries = prefetch_add_subquery(sq, subqueries)
deps = {}
rel_map = {}
for query, foreign_key_field in reversed(fixed_queries):
query_model = query.model_class
deps[query_model] = {}
id_map = deps[query_model]
has_relations = bool(rel_map.get(query_model))
for result in query:
if foreign_key_field:
fk_val = result._data[foreign_key_field.name]
id_map.setdefault(fk_val, [])
id_map[fk_val].append(result)
if has_relations:
for rel_model, rel_fk in rel_map[query_model]:
rel_name = '%s_prefetch' % rel_fk.related_name
rel_instances = deps[rel_model].get(result.get_id(), [])
for inst in rel_instances:
setattr(inst, rel_fk.name, result)
setattr(result, rel_name, rel_instances)
if foreign_key_field:
rel_model = foreign_key_field.rel_model
rel_map.setdefault(rel_model, [])
rel_map[rel_model].append((query_model, foreign_key_field))
return query
def create_model_tables(models, **create_table_kwargs):
"""Create tables for all given models (in the right order)."""
for m in sort_models_topologically(models):
m.create_table(**create_table_kwargs)
def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
m.drop_table(**drop_table_kwargs)
def sort_models_topologically(models):
"""Sort models topologically so that parents will precede children."""
models = set(models)
seen = set()
ordering = []
def dfs(model):
if model in models and model not in seen:
seen.add(model)
for foreign_key in model._meta.reverse_rel.values():
dfs(foreign_key.model_class)
ordering.append(model) # parent will follow descendants
# order models by name and table initially to guarantee a total ordering
names = lambda m: (m._meta.name, m._meta.db_table)
for m in sorted(models, key=names, reverse=True):
dfs(m)
return list(reversed(ordering)) # want parents first in output ordering
``` |
{
"source": "jhurt/slotmath",
"score": 3
} |
#### File: jhurt/slotmath/multilinebruteforcemath.py
```python
symbols_weights = []
slot_layout_file = open('reels_weights_shuffled.csv', 'r')
line = slot_layout_file.readline()
while line:
tokens = line.split(',')
if len(tokens) < 5:
break
symbol_weights = []
for symbol_weight in tokens:
strs = symbol_weight.split("_")
if len(strs) == 2:
symbol = strs[0]
weight = float(strs[1])
symbol_weights.append({'symbol':symbol, 'weight':weight})
symbols_weights.append(symbol_weights)
line = slot_layout_file.readline()
slot_layout_file.close()
#get the payouts
symbol_to_payouts = {} # map the symbol to a list of dicts of frequency, value
payouts_file = open('payouts.csv', 'r')
line = payouts_file.readline()
while line:
tokens = line.split(',')
if tokens[0] in symbol_to_payouts.keys():
symbol_to_payouts[tokens[0]].append({'frequency': int(tokens[1]), 'value': float(tokens[2])})
else:
symbol_to_payouts[tokens[0]] = [{'frequency': int(tokens[1]), 'value': float(tokens[2])}]
line = payouts_file.readline()
payouts_file.close()
#calculate the total symbol weight for each reel
reel_weights = [0,0,0,0,0]
symbols_per_reel = [len(symbols_weights), len(symbols_weights), len(symbols_weights), len(symbols_weights), len(symbols_weights)]
for symbol_weights in symbols_weights:
reel_weights = map(lambda x,y:x+y, reel_weights, map(lambda x: x['weight'], symbol_weights))
def makeSymbolToCount(line):
symbol_to_count = {}
for symbol_weights in line:
if symbol_weights['symbol'] in symbol_to_count.keys():
symbol_to_count[symbol_weights['symbol']] += 1
else:
symbol_to_count[symbol_weights['symbol']] = 1
return symbol_to_count
possible_lines = []
possible_lines.append([0,0,0,0,0])
possible_lines.append([-1,-1,-1,-1,-1])
possible_lines.append([1,1,1,1,1])
possible_lines.append([-1,0,1,0,-1])
possible_lines.append([1,0,-1,0,1])
possible_lines.append([0,-1,-1,-1,0])
possible_lines.append([0,1,1,1,0])
possible_lines.append([-1,-1,0,1,1])
possible_lines.append([1,1,0,-1,-1])
#find the expected value
total_choices = reduce(lambda x,y: x*y, reel_weights)
expected_value = 0.0
for a in range(symbols_per_reel[0]):
for b in range(symbols_per_reel[1]):
for c in range(symbols_per_reel[2]):
for d in range(symbols_per_reel[3]):
for e in range(symbols_per_reel[4]):
lines = []
for possible_line in possible_lines:
ai = a + possible_line[0]
if ai == symbols_per_reel[0]:
ai = 0
bi = b + possible_line[1]
if bi == symbols_per_reel[1]:
bi = 0
ci = c + possible_line[2]
if ci == symbols_per_reel[2]:
ci = 0
di = d + possible_line[3]
if di == symbols_per_reel[3]:
di = 0
ei = e + possible_line[4]
if ei == symbols_per_reel[4]:
ei = 0
line = []
line.append(symbols_weights[ai][0])
line.append(symbols_weights[bi][1])
line.append(symbols_weights[ci][2])
line.append(symbols_weights[di][3])
line.append(symbols_weights[ei][4])
lines.append(line)
for line in lines:
symbol_to_count = makeSymbolToCount(line)
for symbol in symbol_to_payouts.keys():
if symbol in symbol_to_count.keys():
for payout in symbol_to_payouts[symbol]:
if payout['frequency'] == symbol_to_count[symbol]:
probability = reduce(lambda x,y: x*y, map(lambda x: x['weight'], lines[0])) / total_choices
expected_value += payout['value'] * probability
print "expected value: {0}".format(expected_value/len(possible_lines))
``` |
{
"source": "Jhustin27/Eye-of-summit-2.0",
"score": 3
} |
#### File: Eye-of-summit-2.0/Test/Test.py
```python
import unittest
from app import clases
class TestClass(unittest.TestCase):
def test_clases(self):
self.assertEqual(clases(1), 9)
def test_clases2(self):
self.assertEqual(clases(2), 14)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhustles/new_egg_webscraper",
"score": 3
} |
#### File: jhustles/new_egg_webscraper/app.py
```python
import os
import re
import glob
import time
import random
import requests
import datetime
import pandas as pd
from re import search
from splinter import Browser
from playsound import playsound
from bs4 import BeautifulSoup as soup
# In[2]:
# Reminder to self.
#import this
# ## Functions & Classes Setup
# ---
# In[3]:
# Return date throughout the program.
def return_dt():
global current_date
current_date = str(datetime.datetime.now()).replace(':','.').replace(' ','_')[:-7]
return current_date
#return_dt()
# In[4]:
"""
Main NewEgg WebScraper function - outputs are csv file and Laptop objects.
"""
def newegg_page_scraper(containers, turn_page):
page_nums = []
general_category = []
product_categories = []
images = []
product_brands = []
product_models = []
product_links = []
item_numbers = []
promotions = []
prices = []
shipping_terms = []
# Set gen_category as a global variable to make it accessible throughout the program, and to avoid an error.
global gen_category
"""
Loop through all the containers on the HTML, and scrap the following content into the following lists
"""
for con in containers:
try:
page_counter = turn_page
page_nums.append(int(turn_page))
gen_category = target_page_soup.find_all('div', class_="nav-x-body-top-bar fix")[0].text.split('\n')[5]
general_category.append(gen_category)
prod_category = target_page_soup.find_all('h1', class_="page-title-text")[0].text
product_categories.append(prod_category)
image = con.a.img["src"]
images.append(image)
prd_title = con.find_all('a', class_="item-title")[0].text
product_models.append(prd_title)
product_link = con.find_all('a', class_="item-title")[0]['href']
product_links.append(product_link)
shipping = con.find_all('li', class_='price-ship')[0].text.strip().split()[0]
if shipping != "Free":
shipping = shipping.replace('$', '')
shipping_terms.append(shipping)
else:
shipping = 0.00
shipping_terms.append(shipping)
brand_name = con.find_all('a', class_="item-brand")[0].img["title"]
product_brands.append(brand_name)
except (IndexError, ValueError) as e:
# If there are no item_brand container, take the Brand from product details.
product_brands.append(con.find_all('a', class_="item-title")[0].text.split()[0])
try:
current_promo = con.find_all("p", class_="item-promo")[0].text
promotions.append(current_promo)
except:
promotions.append('null')
try:
price = con.find_all('li', class_="price-current")[0].text.split()[0].replace('$','').replace(',', '')
prices.append(price)
except:
price = 'null / out of stock'
prices.append(price)
try:
item_num = con.find_all('a', class_="item-title")[0]['href'].split('p/')[1].split('?')[0]
item_numbers.append(item_num)
except (IndexError) as e:
item_num = con.find_all('a', class_="item-title")[0]['href'].split('p/')[1]
item_numbers.append(item_num)
# Convert all of the lists into a dataframe
df = pd.DataFrame({
'item_number': item_numbers,
'general_category': general_category,
'product_category': product_categories,
'brand': product_brands,
'model_specifications': product_models,
'price': prices,
'current_promotions': promotions,
'shipping': shipping_terms,
'page_number': page_nums,
'product_links': product_links,
'image_link': images
})
# Rearrange the dataframe columns into the following order.
df = df[['item_number', 'general_category','product_category', 'page_number' ,'brand','model_specifications' ,'current_promotions' ,'price' ,'shipping' ,'product_links','image_link']]
# Convert the dataframe into a dictionary.
global scraped_dict
scraped_dict = df.to_dict('records')
# Grab the subcategory "Laptop/Notebooks" and eliminate any special characters that may cause errors.
global pdt_category
pdt_category = df['product_category'].unique()[0]
# Eliminate special characters in a string if it exists.
pdt_category = ''.join(e for e in pdt_category if e.isalnum())
""" Count the number of items scraped by getting the length of a all the models for sale.
This parameter is always available for each item-container in the HTML
"""
global items_scraped
items_scraped = len(df['model_specifications'])
"""
Save the results into a csv file using Pandas
"""
df.to_csv(f'./processing/{current_date}_{pdt_category}_{items_scraped}_scraped_page{turn_page}.csv')
# Return these variables as they will be used.
return scraped_dict, items_scraped, pdt_category
# In[5]:
def web_Scraper_part2():
x = random.randint(6, 10)
def rdm_slp_6_10(x):
time.sleep(x)
print(f"Mimic Humans - Sleeping for {x} seconds. ")
return x
keep_trying = True
try_counter = 0
while keep_trying == True:
try:
try_counter += 1
target_url = browser.url
rdm_slp_6_10(x)
response_target = requests.get(target_url)
print(f"{response} \n")
target_page_soup = soup(response.text, 'html.parser')
are_you_human_backend(target_page_soup)
newegg_page_scraper(containers, turn_page)
except IndexError as e:
# Usually puts out JavaScriptionExceptions not errors
print(f"EXCEPTION - Neweggscraper function error:| {e} | \n")
playsound('./sounds/break_pedal.wav')
rdm_slp_6_10(x)
break_pedal_2 = input("Exception - WEBSCRAPER - Break pedal - manually refresh the page, and perhaps add an item to the cart, and go back to the same page where you left off and resume the scrape. \n")
else: # Execute this if no exception is raised. Set keep trying to false.
keep_trying = False
print(f"After {try_counter} attempts, SUCCESSFULLY scraped the page and will continue... \n")
print(f"Scraped Current Page: {turn_page} \n")
#global scraped_dict
return scraped_dict, items_scraped, pdt_category
# In[6]:
# Function to return the total results pages.
def results_pages(target_page_soup):
# Use BeautifulSoup to extract the total results page number.
results_pages = target_page_soup.find_all('span', class_="list-tool-pagination-text")[0].text.strip()
#print(results_pages)
# Find and extract total pages + and add 1 to ensure proper length of total pages.
global total_results_pages
total_results_pages = int(re.split("/", results_pages)[1])
return total_results_pages
# In[7]:
"""
Build a function to concatenate all pages that were scraped and saved in the processing folder.
Save the final output (1 csv file) all the results
"""
def concatenate(total_results_pages):
path = f'./processing\\'
scraped_pages = glob.glob(path + "/*.csv")
concatenate_pages = []
counter = 0
for page in scraped_pages:
df = pd.read_csv(page, index_col=0, header=0)
concatenate_pages.append(df)
compiled_data = pd.concat(concatenate_pages, axis=0, ignore_index=True)
total_items_scraped = len(compiled_data['brand'])
concatenated_output = compiled_data.to_csv(f"./finished_outputs/{current_date}_{total_items_scraped}_scraped_{total_results_pages}_pages_.csv")
return
# In[8]:
"""
Built a function to clear out the entire processing files folder to avoid clutter.
Or the user can keep the processing files (page by page) for their own analysis.
"""
def clean_processing_fldr():
path = f'./processing\\'
scraped_pages = glob.glob(path + "/*.csv")
if len(scraped_pages) < 1:
print("There are no files in the folder to clear. \n")
else:
print(f"Clearing out a total of {len(scraped_pages)} scraped pages in the processing folder... \n")
clear_processing_files = []
for page in scraped_pages:
os.remove(page)
print('Clearing of "Processing" folder complete. \n')
return
# In[9]:
# Mouse over function to go thru hover over product links on the page to emulate humans.
def random_a_tag_mouse_over3():
x = random.randint(6, 10)
def rdm_slp_6_10(x):
time.sleep(x)
print(f"Mimic Humans - Sleeping for {x} seconds. ")
return x
working_try_atags = []
finally_atags = []
working_atags = []
not_working_atags = []
try_counter = 0
finally_counter = 0
time.sleep(1)
# Mouse over to header of the page "Laptops"
browser.find_by_tag("h1").mouse_over()
number_of_a_tags = len(browser.find_by_tag("a"))
# My observation has taught me that most of the actual laptop clickable links on the grid are in the <a> range 2000 to 2100.
if number_of_a_tags > 1900:
print(f"Found {number_of_a_tags} <a> tags when parsing html... ")
random_90_percent_plug = (random.randint(90, 94)/100.00)
start_a_tag = int(round((number_of_a_tags * random_90_percent_plug)))
end_a_tag = int(round((number_of_a_tags * .96)))
else:
# After proving you're human, clickable <a>'s sometimes are reduced 300, so adjusting mouse_over for that scenario.
print(f"Found {number_of_a_tags} <a> tags when parsing html... ")
random_40_percent_plug = (random.randint(40, 44)/100.00)
start_a_tag = int(round((number_of_a_tags * random_40_percent_plug)))
end_a_tag = int(round((number_of_a_tags * .46)))
step = random.randint(13, 23)
for i in range(start_a_tag, end_a_tag, step):
try: # try this as normal part of the program - SHORT
rdm_slp_6_10(x)
browser.find_by_tag("a")[i+2].mouse_over()
time.sleep(3)
except: # Execute this when there is an exception
print("EXCEPTION raised during mouse over. Going to break loop and proceed with moving to the next page. \n")
break
else: # execute this only if no exceptions are raised
working_try_atags.append(i+2)
working_atags.append(i+2)
try_counter += 1
print(f"<a> number = {i+2} | Current Attempts (Try Count): {try_counter} \n")
return
# In[10]:
# Checks for Google's reCaptcha "are you human?" test and alerts the user.
def g_recaptcha_check():
if browser.is_element_present_by_id('g-recaptcha') == True:
for sound in range(0, 2):
playsound('./sounds/user_alert.wav')
print("recaptcha - Check Alert! \n")
continue_scrape = input("Newegg system suspects you are a bot. \n Complete the recaptcha test to prove you're not a bot. After, enter in any key and press ENTER to continue the scrape. \n")
print("Continuing with scrape... \n")
return
# In[11]:
# Created an are you human backend test bc Newegg would send bogus "are you human" html when "requesting" for html.
def are_you_human_backend(target_page_soup):
if target_page_soup.find_all("title")[0].text == 'Are you a human?':
playsound('./sounds/user_alert.wav')
print("Newegg suspects you're a bot on the backend. Automatically will refresh, and target new URL. \n")
print("Refreshing page. Please wait... \n")
for i in range(0, 1):
browser.reload()
time.sleep(2)
browser.back()
print("Sleeping for 30 seconds to emulate humans. \n")
time.sleep(30)
browser.forward()
playsound('./sounds/break_pedal.wav')
break_pedal_ayh = input("Please click a laptop item, and add or remove it from the cart, and go back to the same page using the back button of your browser. \n Then enter in any key and press enter to continue scraping. \n")
# Allocate time for page to load.
time.sleep(3)
print("Targeting new url... ")
# After user passes test, target the new url, and return updated target_page_soup.
target_url = browser.url
response_target = requests.get(target_url)
target_page_soup = soup(response_target.text, 'html.parser')
# Recursively call the function, and if it passes, continue on with the program.
are_you_human_backend(target_page_soup)
else:
print("Passed the 'Are you human?' check when requesting and parsing the html. Continuing with scrape ... \n")
# Otherwise, return the target_page_soup that was passed in.
return target_page_soup
# In[12]:
# crazy idea, put links in a list, and then loop thru them and try and except else (break out of the loop) and continue
def random_xpath_top_bottom():
x = random.randint(3, 8)
def rdm_slp_3_8(x):
time.sleep(x)
print(f"Slept for {x} seconds. \n")
return x
coin_toss_top_bottom = random.randint(0,1)
next_page_button_results = []
# If the coin toss is even, mouse_over and click the top page link.
if (coin_toss_top_bottom == 0):
try:
print('Heads - Clicking "Next Page" Top Button. \n')
x = random.randint(3, 8)
print(f"Mimic human behavior by randomly sleeping for {x}. \n")
rdm_slp_3_8(x)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').mouse_over()
time.sleep(1)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').click()
next_page_button_results.append(coin_toss_top_bottom)
print('Heads - SUCCESSFUL "Next Page" Top Button. \n')
return
except:
print("EXCEPTION - Top Next Page button mouse over and click UNSUCCESSFUL... ")
try:
x = random.randint(3, 8)
print(f"Mimic human behavior by randomly sleeping for {x}. \n")
rdm_slp_5_9(x)
print('Attempting to click the bottom "Next Page" Xpath Bottom Button. \n')
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').mouse_over()
time.sleep(4)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click()
print('EXCEPTION BYPASSED - Bottom Next Page Button SUCCESSFUL! \n')
except:
print("EXCEPTION - Top and Bottom Next Page Button Link not working... \n")
playsound('./sounds/break_pedal.wav')
break_pedal_xptb = input("Break Pedal - Please manually click next page. Then enter in any key and press enter to continue the scrape. \n ")
print("Continuing... \n")
print("="*60)
return
else: # If coin toss is tails or 1, then...
try:
print('Tails - Clicking "Next Page" Xpath Bottom Button. \n')
x = random.randint(3, 8)
print(f"Mimic human behavior by randomly sleeping for {x}. \n")
rdm_slp_5_9(x)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').mouse_over()
time.sleep(4)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click()
print('Tails - 1st Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n')
except:
print("EXCEPTION - 1st Bottom Xpath Failed. Sleep for 4 second then will try with 2nd Xpath bottom link. \n")
try:
time.sleep(4)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button').mouse_over()
time.sleep(4)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button').click()
print('EXCEPTION BYPASSED! Tails - 2nd Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n')
except:
print("EXCEPTION - 2nd Bottom Xpath Failed. Trying with 3rd Xpath bottom link. \n")
try:
time.sleep(4)
browser.find_by_xpath('/html/body/div[5]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').mouse_over()
time.sleep(4)
browser.find_by_xpath('/html/body/div[5]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click()
print('EXCEPTION BYPASSED! Tails - 3rd Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n')
except:
print("Last Bottom Next Page Xpath Button was unsuccessful... Will Attempt Top Next Page Button.... \n")
try:
x = random.randint(3, 8)
print(f"Mimic human behavior by randomly sleeping for {x}. \n")
rdm_slp_3_8(x)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').mouse_over()
time.sleep(1)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').click()
next_page_button_results.append(coin_toss_top_bottom)
print('EXCEPTION BYPASSED SUCCESSFUL "Next Page" Top Button worked. \n')
return
except:
print("EXCEPTION BYPASSES UNSUCCESSFUL - All 3 Xpath Bottom Button AND Top Next Page Xpath Button was not working due to JavaScipt Exceptions... \n")
playsound('./sounds/break_pedal.wav')
break_pedal_xptb = input("Break Pedal - Please manually click the next page button. Then enter in any key and press enter to continue the scrape. \n ")
return
# In[13]:
"""
This class takes in the dictionary from the webscraper function, and will be used in a list comprehension
to produce class "objects"
"""
class Laptops:
counter = 0
def __init__(self, **entries):
self.__dict__.update(entries)
def count(self):
print(f"Total Laptops scraped: {Laptops.counter}")
"""
Originally modeled out parent/child inheritance object structure.
After careful research, I found it much easier to export the Pandas Dataframe of the results to a dictionary,
and then into a class object.
"""
# class Product_catalog:
# all_prod_count = 0
# def __init__(self, general_category): # computer systems
# self.general_category = general_category
# Product_catalog.all_prod_count += 1
# def count_prod(self):
# return int(self.all_prod_count)
# #return '{}'.format(self.general_category)
# Sub_category was later changed to Laptops due to the scope of this project.
# class Sub_category(Product_catalog): # laptops/notebooks, gaming
# sub_category_ct = 0
# def __init__(self, general_category, sub_categ, item_num, brand, price, img_link, prod_link, model_specifications, current_promotions):
# super().__init__(general_category)
# Sub_category.sub_category_ct += 1
# self.sub_categ = sub_categ
# self.item_num = item_num
# self.brand = brand
# self.price = price
# self.img_link = img_link
# self.prod_link = prod_link
# self.model_specifications = model_specifications
# self.current_promotions = current_promotions
# ## Main Program Logic
# ---
# In[14]:
""" Welcome to the program message!
"""
print("=== NewEgg.Com Laptop - Supervised Web Crawler & Scraper Beta v1.0 ===")
print("=="*30)
print('Scope: This project is a beta and is only built to scrape the laptop section of NewEgg.com due to limited time. \n')
print("Instructions: \n")
return_dt()
print(f'Current Date And Time: {current_date} \n')
print("(1) Go to www.newegg.com, go to the laptop section, select your requirements (e.g. brand, screensize, and specifications - SSD size, processor brand and etc...) ")
print("(2) Copy and paste the url from your exact search when prompted ")
print('(3) This is a "Supervised Scraper", meaning it will mostly be automated, but you will be alerted to take action when necessary. ')
print('(4) You may run the program in the background after the initial set of instructions, as the program will alert you to take action (e.g. when Newegg suspects a bot. )')
print('(5) After the webscraping is successful, you will have an option to concatenate all of the pages you scraped together into one csv file')
print('(6) Lastly, you will have an option to clear out the processing folder (data scraped by each page)')
print('(7) If you have any issues or errors, "PRESS CTRL + C" to quit the program in the terminal ')
print('Disclaimer: Newegg may ban you for a 24 - 48 hours for webscraping their data, then you may resume. \n Also, please consider executing during the day, with tons of web traffic to their site in your respective area. \n')
print('Happy Scraping!')
# Set up Splinter requirements.
executable_path = {'executable_path': './chromedriver.exe'}
# Ask user to input in the laptop query link they would like to scrape.
url = input("Please copy and paste your laptop query that you want to webscrape, and press enter: \n")
browser = Browser('chrome', **executable_path, headless=False, incognito=True)
browser.visit(url)
# Allocating loading time.
time.sleep(3)
break_pedal_1 = input("Break Pedal - close any pop ups and go any item and add one to the cart and go to the first search query. ")
current_url = browser.url
response = requests.get(current_url)
print(f"{response} \n")
target_page_soup = soup(response.text, 'html.parser')
# Run the results_pages function to gather the total pages to be scraped.
results_pages(target_page_soup)
"""
This is the loop that performs the page by page scraping of data / results
of the user's query.
"""
# List set up for where class Laptop objects will be stored.
print("Beginning webscraping and activity log below... ")
print("="*60)
product_catalog = []
for turn_page in range(1, total_results_pages+1):
"""
If "reCAPTCHA" pops up, pause the program using an input. This allows the user to continue
to scrape after they're done completing the quiz by inputting any value.
"""
# Allocating loading time.
time.sleep(3)
# Check if the site believes we are a bot, if so alert the user to take action.
g_recaptcha_check()
print(f"Beginning mouse over activity... \n")
# Set up "containers" to be passed into main scraping function.
if turn_page == 1:
containers = target_page_soup.find_all("div", class_="item-container")
# Added this and moved it here to test new setup.
newegg_page_scraper(containers, turn_page)
else:
web_Scraper_part2()
print("Creating laptop objects for this page... \n")
# Create instances of class objects of the laptops/notebooks using a list comprehension.
objects = [Laptops(**prod_obj) for prod_obj in scraped_dict]
print(f"Finished creating Laptop objects for page {turn_page} ... \n")
# Append all of the objects to the main product_catalog list (List of List of Objects).
print(f"Adding {len(objects)} to laptop catalog... \n")
product_catalog.append(objects)
print("Flipping coin to decide mouse over on the page or not... ")
n = random.randint(0,1)
if n == 0:
print("Heads - will mouse over the page. ")
random_a_tag_mouse_over3()
else:
r = random.randint(8, 20)
print(f"Tails - will sleep for {r} seconds. and continue")
time.sleep(r)
#print("Will scrape pages, but will need to randomly sleep for max 35 seconds to emulate human behavior. \n")
if turn_page == total_results_pages:
print(f"Completed scraping {turn_page} / {total_results_pages} pages. \n ")
# Exit the broswer once complete webscraping.
browser.quit()
else:
try:
y = random.randint(4, 6)
print(f"Current Page: {turn_page}) | SLEEPING FOR {y} SECONDS THEN will click next page. \n")
time.sleep(y)
random_xpath_top_bottom()
except:
z = random.randint(3, 5)
print(f" (EXCEPTION) Current Page: {turn_page}) | SLEEPING FOR {z} SECONDS - Will click next page, if applicable. \n")
time.sleep(z)
random_xpath_top_bottom()
print("")
print("="*60)
print("")
# Prompt the user if they would like to concatenate all of the pages into one csv file
concat_y_n = input(f'All {total_results_pages} pages have been saved in the "processing" folder (1 page = csv files). Would you like for us concatenate all the files into one? Enter "y", if so. Otherwise, enter anykey to exit the program. \n')
if concat_y_n == 'y':
concatenate(total_results_pages)
print(f'WebScraping Complete! All {total_results_pages} have been scraped and saved as {current_date}_{pdt_category}_scraped_{total_results_pages}_pages_.csv in the "finished_outputs" folder \n')
# Prompt the user to if they would like to clear out processing folder function here - as delete everything to prevent clutter
clear_processing_y_n = input(f'The "processing" folder has {total_results_pages} csv files of each page that was scraped. Would you like to clear the files? Enter "y", if so. Otherwise, enter anykey to exit the program. \n')
if clear_processing_y_n == 'y':
clean_processing_fldr()
print('Thank you checking out my project, and hope you found this useful! \n')
``` |
{
"source": "jhustles/reading_level_app",
"score": 4
} |
#### File: jhustles/reading_level_app/readability.py
```python
import re
def main():
while True:
text = input("Text: ")
if len(text) > 0:
break
letters = count_letters(text)
words = count_words(text)
sentences = count_sentences(text)
cl_index = round(0.0588 * ((letters/words)*100) - 0.296 * ((sentences/words)*100) - 15.8)
if cl_index < 1:
print("Before Grade 1")
elif cl_index > 16:
print("Grade 16+")
else:
print(f"Grade {cl_index}")
return
def count_letters(target_text):
pattern = re.compile('[a-z]', re.IGNORECASE)
matches = pattern.findall(target_text)
# print(len(matches))
# letters = len(matches)
return len(matches)
def count_words(target_text):
pattern = re.compile('\w+')
matches = pattern.findall(target_text)
# words = len(matches)
return len(matches)
def count_sentences(target_text):
pattern = re.compile(r'[A-Z][^\.!?]*[.!?]')
matches = pattern.findall(target_text)
# sentences = len(matches)
return len(matches)
if __name__ == "__main__":
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.