prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
#TODO tensorflow version 2.X migration code changed the import tensorflow as tf line to two lines as seen below
# import tensorflow.compat.v1 as tf
# tf.disable_eager_execution()
import tensorflow as tf
import numpy as np
import time
import matplotlib.pyplot as plt
from skimage.measure import compare_ssim as ssim
import random
import LDAMP.sensing_methods as sensing_methods
import LDAMP.utils as utils
import os
os.environ["CUDA_VISIBLE_DEVICES"]='0'
def reconstruction_method(dset,sensing_method,specifics):
method = LDAMP_wrapper(sensing_method, specifics)
return method
def SetNetworkParams(new_height_img, new_width_img,new_channel_img, new_filter_height,new_filter_width,\
new_num_filters,new_n_DnCNN_layers,new_n_DAMP_layers, new_sampling_rate,\
new_BATCH_SIZE,new_sigma_w,new_n,new_m,new_training, iscomplex,use_adaptive_weights=False):
global height_img, width_img, channel_img, filter_height, filter_width, num_filters, n_DnCNN_layers, n_DAMP_layers,\
sampling_rate, BATCH_SIZE, sigma_w, n, m, n_fp, m_fp, is_complex, training, adaptive_weights
height_img = new_height_img
width_img = new_width_img
channel_img = new_channel_img
filter_height = new_filter_height
filter_width = new_filter_width
num_filters = new_num_filters
n_DnCNN_layers = new_n_DnCNN_layers
n_DAMP_layers = new_n_DAMP_layers
sampling_rate = new_sampling_rate
BATCH_SIZE = new_BATCH_SIZE
sigma_w = new_sigma_w
n = new_n
m = new_m
n_fp = np.float32(n)
m_fp = np.float32(m)
is_complex=iscomplex#Just the default
adaptive_weights=use_adaptive_weights
training=new_training
def ListNetworkParameters():
print ('height_img = ', height_img)
print ('width_img = ', width_img)
print ('channel_img = ', channel_img)
print ('filter_height = ', filter_height)
print ('filter_width = ', filter_width)
print ('num_filters = ', num_filters)
print ('n_DnCNN_layers = ', n_DnCNN_layers)
print ('n_DAMP_layers = ', n_DAMP_layers)
print ('sampling_rate = ', sampling_rate)
print ('BATCH_SIZE = ', BATCH_SIZE)
print ('sigma_w = ', sigma_w)
print ('n = ', n)
print ('m = ', m)
print('is_complex = ', is_complex)
## Count the total number of learnable parameters
def CountParameters():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value #TODO # originaly dim.value instead of dim, migration to tensorflow 2.X
total_parameters += variable_parameters
print('Total number of parameters: ')
print(total_parameters)
class LDAMP_wrapper():
def __init__(self, sensing_method, specifics):
#Global variables
self.specifics = specifics
self.height_img = specifics['height_img']
self.width_img = specifics['width_img']
self.channel_img = specifics['channel_img']
self.filter_height = specifics['filter_height']
self.filter_width = specifics['filter_width']
self.num_filters = specifics['num_filters']
self.n_DnCNN_layers = specifics['n_DnCNN_layers']
self.sampling_rate = specifics['sampling_rate']
self.BATCH_SIZE = specifics['BATCH_SIZE']
self.sigma_w = specifics['sigma_w']
self.n = specifics['n']
self.m = specifics['m']
self.n_fp = np.float32(self.n)
self.m_fp = np.float32(self.m)
self.is_complex = False
if(not(specifics['DenoiserbyDenoiser'])
and (sensing_method == 'complex-gaussian' or sensing_method == 'coded-diffraction')):
self.is_complex = True
self.adaptive_weights = False
self.training = True
# used as local variables
self.start_layer = specifics['start_layer']
self.max_n_DAMP_layers = specifics['max_n_DAMP_layers']
self.init_mu = specifics['init_mu']
self.init_sigma = specifics['init_sigma']
self.tie_weights = specifics['tie_weights']
self.LayerbyLayer = specifics['LayerbyLayer']
self.DenoiserbyDenoiser = specifics['DenoiserbyDenoiser']
self.sigma_w_min = specifics['sigma_w_min']
self.sigma_w_max = specifics['sigma_w_max']
self.alg = specifics['alg']
self.loss_func = specifics['loss_func']
self.measurement_mode = specifics['mode']
self.n_Train_Images = specifics['n_Train_Images']
self.n_Val_Images = specifics['n_Val_Images']
self.learning_rates = specifics['learning_rates']
self.ResumeTraining = specifics['ResumeTraining']
self.InitWeightsMethod = specifics['InitWeightsMethod']
self.EPOCHS = specifics['EPOCHS']
self.max_Epoch_Fails = specifics['max_Epoch_Fails']
self.validation_patch = specifics['validation_patch']
self.training_patch = specifics['training_patch']
def initialize(self,dataset,sensing, stage):
# do the preparation for the running.
self.dset = dataset
self.stage = stage
def run(self):
start_layer = self.start_layer
max_n_DAMP_layers = self.max_n_DAMP_layers
init_mu = self.init_mu
init_sigma = self.init_sigma
tie_weights = self.tie_weights
alg = self.alg
LayerbyLayer = self.LayerbyLayer
loss_func = self.loss_func
measurement_mode = self.measurement_mode
n_Train_Images = self.n_Train_Images
n_Val_Images = self.n_Val_Images
learning_rates = self.learning_rates
ResumeTraining = self.ResumeTraining
InitWeightsMethod = self.InitWeightsMethod
EPOCHS = self.EPOCHS
max_Epoch_Fails = self.max_Epoch_Fails
stage = self.stage
validation_patch = self.validation_patch
training_patch = self.training_patch
sigma_w_min = self.sigma_w_min
sigma_w_max = self.sigma_w_max
train_start_time = time.time()
print('Denoiser by Denoiser: ', self.DenoiserbyDenoiser)
print('sudo_rgb: ', self.specifics['sudo_rgb'])
if(self.DenoiserbyDenoiser):
if(stage == "training"):
if loss_func == 'SURE':
useSURE = True
else:
useSURE = False
## Problem Parameters
sigma_w_min = sigma_w_min / 255. # Noise std
sigma_w_max = sigma_w_max / 255. # Noise std
n = self.channel_img * self.height_img * self.width_img
# Parameters to to initalize weights. Won't be used if old weights are loaded
init_mu = 0
init_sigma = 0.1
## Clear all the old variables, tensors, etc.
tf.reset_default_graph()
SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img,
new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=None,
new_sampling_rate=None,
new_BATCH_SIZE=self.BATCH_SIZE, new_sigma_w=None, new_n=self.n, new_m=None,
new_training=True, iscomplex=self.is_complex)
sensing_methods.SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img,
new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=None,
new_sampling_rate=None,
new_BATCH_SIZE=self.BATCH_SIZE, new_sigma_w=None, new_n=self.n,
new_m=None, new_training=True, iscomplex=self.is_complex)
utils.SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img,
new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=None,
new_sampling_rate=None,
new_BATCH_SIZE=self.BATCH_SIZE, new_sigma_w=None, new_n=self.n, new_m=None,
new_training=True, iscomplex=self.is_complex)
ListNetworkParameters()
# tf Graph input
training_tf = tf.placeholder(tf.bool, name='training')
sigma_w_tf = tf.placeholder(tf.float32)
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])
## Construct the measurement model and handles/placeholders
y_measured = utils.AddNoise(x_true, sigma_w_tf)
## Initialize the variable theta which stores the weights and biases
theta_dncnn = init_vars_DnCNN(init_mu, init_sigma)
## Construct the reconstruction model
# x_hat = LDAMP.DnCNN(y_measured,None,theta_dncnn,training=training_tf)
[x_hat, div_overN] = DnCNN_wrapper(y_measured, None, theta_dncnn, training=training_tf)
## Define loss and optimizer
nfp = np.float32(height_img * width_img)
if useSURE:
cost = utils.MCSURE_loss(x_hat, div_overN, y_measured, sigma_w_tf)
else:
cost = tf.nn.l2_loss(x_true - x_hat) * 1. / nfp
CountParameters()
# TODO major change
# ## Load and Preprocess Training Data
train_images, val_images = utils.splitDataset(self.dset, self.specifics)
len_train = len(train_images)
len_val = len(val_images)
x_train = np.transpose(np.reshape(train_images, (-1, channel_img * height_img * width_img)))
x_val = np.transpose(np.reshape(val_images, (-1, channel_img * height_img * width_img)))
## Train the Model
for learning_rate in learning_rates:
optimizer0 = tf.train.AdamOptimizer(learning_rate=learning_rate) # Train all the variables
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step. Allows us to update averages w/in BN
optimizer = optimizer0.minimize(cost)
saver_best = tf.train.Saver() # defaults to saving all variables
saver_dict = {}
config = tf.ConfigProto(allow_soft_placement=True)
#TODO This is used to accommodate our RTX graphics card, don't need it otherwise
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(
tf.global_variables_initializer()) # Seems to be necessary for the batch normalization layers for some reason.
# if FLAGS.debug:
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
start_time = time.time()
print("Load Initial Weights ...")
if ResumeTraining or learning_rate != learning_rates[0]:
##Load previous values for the weights and BNs
saver_initvars_name_chckpt = utils.GenDnCNNFilename(sigma_w_min, sigma_w_max,useSURE=useSURE, specifics=self.specifics) + ".ckpt"
for l in range(0, n_DnCNN_layers):
saver_dict.update({"l" + str(l) + "/w": theta_dncnn[0][l]})
for l in range(1, n_DnCNN_layers - 1): # Associate variance, means, and beta
gamma_name = "l" + str(l) + "/BN/gamma:0"
beta_name = "l" + str(l) + "/BN/beta:0"
var_name = "l" + str(l) + "/BN/moving_variance:0"
mean_name = "l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
saver_dict.update({"l" + str(l) + "/BN/gamma": gamma})
saver_dict.update({"l" + str(l) + "/BN/beta": beta})
saver_dict.update({"l" + str(l) + "/BN/moving_variance": moving_variance})
saver_dict.update({"l" + str(l) + "/BN/moving_mean": moving_mean})
saver_initvars = tf.train.Saver(saver_dict)
saver_initvars.restore(sess, saver_initvars_name_chckpt)
# saver_initvars = tf.train.Saver()
# saver_initvars.restore(sess, saver_initvars_name_chckpt)
else:
pass
time_taken = time.time() - start_time
print("Training ...")
print()
save_name = utils.GenDnCNNFilename(sigma_w_min, sigma_w_max, useSURE=useSURE, specifics=self.specifics)
save_name_chckpt = save_name + ".ckpt"
val_values = []
print("Initial Weights Validation Value:")
rand_inds = np.random.choice(len_val, n_Val_Images, replace=False)
start_time = time.time()
for offset in range(0, n_Val_Images - BATCH_SIZE + 1,
BATCH_SIZE): # Subtract batch size-1 to avoid eerrors when len(train_images) is not a multiple of the batch size
end = offset + BATCH_SIZE
batch_x_val = x_val[:, rand_inds[offset:end]]
sigma_w_thisBatch = sigma_w_min + np.random.rand() * (sigma_w_max - sigma_w_min)
# Run optimization.
loss_val = sess.run(cost, feed_dict={x_true: batch_x_val, sigma_w_tf: sigma_w_thisBatch,
training_tf: False})
val_values.append(loss_val)
time_taken = time.time() - start_time
print(np.mean(val_values))
best_val_error = np.mean(val_values)
best_sess = sess
print("********************")
save_path = saver_best.save(best_sess, save_name_chckpt)
print("Initial session model saved in file: %s" % save_path)
failed_epochs = 0
for i in range(EPOCHS):
if failed_epochs >= max_Epoch_Fails:
break
train_values = []
print("This Training iteration ...")
rand_inds = np.random.choice(len_train, n_Train_Images, replace=False)
start_time = time.time()
for offset in range(0, n_Train_Images - BATCH_SIZE + 1,
BATCH_SIZE): # Subtract batch size-1 to avoid errors when len(train_images) is not a multiple of the batch size
end = offset + BATCH_SIZE
batch_x_train = x_train[:, rand_inds[offset:end]]
sigma_w_thisBatch = sigma_w_min + np.random.rand() * (sigma_w_max - sigma_w_min)
# Run optimization.
_, loss_val = sess.run([optimizer, cost],
feed_dict={x_true: batch_x_train, sigma_w_tf: sigma_w_thisBatch,
training_tf: True}) # Feed dict names should match with the placeholders
train_values.append(loss_val)
time_taken = time.time() - start_time
print(np.mean(train_values))
val_values = []
print("EPOCH ", i + 1, " Validation Value:")
rand_inds = np.random.choice(len_val, n_Val_Images, replace=False)
start_time = time.time()
for offset in range(0, n_Val_Images - BATCH_SIZE + 1,
BATCH_SIZE): # Subtract batch size-1 to avoid eerrors when len(train_images) is not a multiple of the batch size
end = offset + BATCH_SIZE
batch_x_val = x_val[:, rand_inds[offset:end]]
sigma_w_thisBatch = sigma_w_min + np.random.rand() * (sigma_w_max - sigma_w_min)
# Run optimization.
loss_val = sess.run(cost, feed_dict={x_true: batch_x_val, sigma_w_tf: sigma_w_thisBatch,
training_tf: False})
val_values.append(loss_val)
time_taken = time.time() - start_time
print(np.mean(val_values))
if (np.mean(val_values) < best_val_error):
failed_epochs = 0
best_val_error = np.mean(val_values)
best_sess = sess
print("********************")
save_path = saver_best.save(best_sess, save_name_chckpt)
print("Best session model saved in file: %s" % save_path)
else:
failed_epochs = failed_epochs + 1
print("********************")
total_train_time = time.time() - train_start_time
save_name_time = save_name + "_time.txt"
# f = open(save_name, 'wb') #TODO convert to python3.7?
# f.write("Total Training Time =" + str(total_train_time))
# f.close()
elif (stage == "testing"):
train_start_time = time.time()
## Clear all the old variables, tensors, etc.
tf.reset_default_graph()
SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img,
new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=None,
new_sampling_rate=None,
new_BATCH_SIZE=self.BATCH_SIZE, new_sigma_w=self.sigma_w, new_n=self.n, new_m=None,
new_training=False, iscomplex=self.is_complex)
sensing_methods.SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img,
new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=None,
new_sampling_rate=None,
new_BATCH_SIZE=self.BATCH_SIZE, new_sigma_w=self.sigma_w, new_n=self.n, new_m=None,
new_training=False, iscomplex=self.is_complex)
utils.SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img,
new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=None,
new_sampling_rate=None,
new_BATCH_SIZE=self.BATCH_SIZE, new_sigma_w=self.sigma_w, new_n=self.n, new_m=None,
new_training=False, iscomplex=self.is_complex)
n = self.n
useSURE = False
ListNetworkParameters()
# tf Graph input
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])
## Construct the measurement model and handles/placeholders
y_measured = utils.AddNoise(x_true, sigma_w)
## Initialize the variable theta which stores the weights and biases
theta_dncnn = init_vars_DnCNN(init_mu, init_sigma)
## Construct the reconstruction model
x_hat = DnCNN(y_measured, None, theta_dncnn, training=False)
CountParameters()
# TODO major change
## Load and Preprocess Test Data
test_images = utils.generate_testset(channel_img, width_img, height_img, self.specifics)
# test_images = test_images[:, 0, :, :]
assert (len(test_images) >= BATCH_SIZE), "Requested too much Test data"
x_test = np.transpose(
np.reshape(test_images[0:BATCH_SIZE], (BATCH_SIZE, height_img * width_img * channel_img)))
with tf.Session() as sess:
y_test = sess.run(y_measured, feed_dict={x_true: x_test})
## Train the Model
saver = tf.train.Saver() # defaults to saving all variables
saver_dict = {}
config = tf.ConfigProto(allow_soft_placement=True)
#TODO This is used to accommodate our RTX graphics card, don't need it otherwise
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# if 255.*sigma_w<10.:
# sigma_w_min=0.
# sigma_w_max=10.
# elif 255.*sigma_w<20.:
# sigma_w_min=10.
# sigma_w_max=20.
# elif 255.*sigma_w < 40.:
# sigma_w_min = 20.
# sigma_w_max = 40.
# elif 255.*sigma_w < 60.:
# sigma_w_min = 40.
# sigma_w_max = 60.
# elif 255.*sigma_w < 80.:
# sigma_w_min = 60.
# sigma_w_max = 80.
# elif 255.*sigma_w < 100.:
# sigma_w_min = 80.
# sigma_w_max = 100.
# elif 255.*sigma_w < 150.:
# sigma_w_min = 100.
# sigma_w_max = 150.
# elif 255.*sigma_w < 300.:
# sigma_w_min = 150.
# sigma_w_max = 300.
# else:
# sigma_w_min = 300.
# sigma_w_max = 500.
# sigma_w_min = sigma_w * 255.
# sigma_w_max = sigma_w * 255.
save_name = utils.GenDnCNNFilename(sigma_w_min / 255., sigma_w_max / 255., useSURE=useSURE, specifics=self.specifics)
save_name_chckpt = save_name + ".ckpt"
saver.restore(sess, save_name_chckpt)
print("Reconstructing Signal")
start_time = time.time()
[reconstructed_test_images] = sess.run([x_hat], feed_dict={y_measured: y_test})
time_taken = time.time() - start_time
# take first image in batch and display
if (self.specifics['sudo_rgb']):
fig1 = plt.figure()
x_recombined = np.reshape(np.transpose(x_test)[:3], (height_img * width_img * 3))
plt.imshow(np.reshape(x_recombined, (height_img, width_img, 3)))
plt.show()
fig2 = plt.figure()
x_recombined = np.reshape(np.transpose(y_test)[:3], (height_img * width_img * 3))
plt.imshow(np.reshape(x_recombined, (height_img, width_img, 3)))
plt.show()
fig3 = plt.figure()
x_recombined = np.reshape(np.transpose(reconstructed_test_images)[:3],
(height_img * width_img * 3))
plt.imshow(np.reshape(x_recombined, (height_img, width_img, 3)))
plt.show()
[_, _, PSNR] = utils.EvalError_np(x_test, reconstructed_test_images)
print(" PSNR: ", PSNR)
print(" Average: ", np.average(PSNR))
elif(channel_img == 1):
fig1 = plt.figure()
plt.imshow(np.transpose(np.reshape(x_test[:, 0], (height_img, width_img))), interpolation='nearest')
plt.show()
fig2 = plt.figure()
plt.imshow(np.transpose(np.reshape(y_test[:, 0], (height_img, width_img))), interpolation='nearest',
cmap='gray')
plt.show()
fig3 = plt.figure()
plt.imshow(np.transpose(np.reshape(reconstructed_test_images[:, 0], (height_img, width_img))),
interpolation='nearest', cmap='gray')
plt.show()
[_, _, PSNR] = utils.EvalError_np(x_test[:, 0], reconstructed_test_images[:, 0])
print(" PSNR: ", PSNR)
print(" Average: ", np.average(PSNR))
else:
fig1 = plt.figure()
plt.imshow(np.reshape(x_test[:, 0], (height_img, width_img, 3)))
plt.show()
fig2 = plt.figure()
plt.imshow(np.reshape(y_test[:, 0], (height_img, width_img, 3)))
plt.show()
fig3 = plt.figure()
plt.imshow(np.reshape(reconstructed_test_images[:, 0], (height_img, width_img, 3)))
plt.show()
[_, _, PSNR] = utils.EvalError_np(x_test, reconstructed_test_images)
print(" PSNR: ", PSNR)
print(" Average: ", np.average(PSNR))
else:
raise Exception("Unknown stage " + stage)
else:
if (stage == 'training'):
for n_DAMP_layers in range(start_layer, max_n_DAMP_layers + 1, 1):
## Clear all the old variables, tensors, etc.
tf.reset_default_graph()
SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img, new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=n_DAMP_layers,
new_sampling_rate=self.sampling_rate, new_BATCH_SIZE=self.BATCH_SIZE,
new_sigma_w=self.sigma_w, new_n=self.n, new_m=self.m, new_training=True, iscomplex=self.is_complex)
sensing_methods.SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img, new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=n_DAMP_layers,
new_sampling_rate=self.sampling_rate, new_BATCH_SIZE=self.BATCH_SIZE,
new_sigma_w=self.sigma_w, new_n=self.n, new_m=self.m, new_training=True, iscomplex=self.is_complex)
utils.SetNetworkParams(new_height_img=self.height_img, new_width_img=self.width_img, new_channel_img=self.channel_img,
new_filter_height=self.filter_height, new_filter_width=self.filter_width,
new_num_filters=self.num_filters,
new_n_DnCNN_layers=self.n_DnCNN_layers, new_n_DAMP_layers=n_DAMP_layers,
new_sampling_rate=self.sampling_rate, new_BATCH_SIZE=self.BATCH_SIZE,
new_sigma_w=self.sigma_w, new_n=self.n, new_m=self.m, new_training=True, iscomplex=self.is_complex)
n = self.n
ListNetworkParameters()
# tf Graph input
training_tf = tf.placeholder(tf.bool, name='training')
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])
## Initialize the variable theta which stores the weights and biases
if tie_weights == True:
n_layers_trained = 1
else:
n_layers_trained = n_DAMP_layers
theta = [None] * n_layers_trained
for iter in range(n_layers_trained):
with tf.variable_scope("Iter" + str(iter)):
theta_thisIter = init_vars_DnCNN(init_mu, init_sigma)
theta[iter] = theta_thisIter
## Construct the measurement model and handles/placeholders
[A_handle, At_handle, A_val, A_val_tf] = sensing_methods.GenerateMeasurementOperators(measurement_mode)
y_measured = utils.GenerateNoisyCSData_handles(x_true, A_handle, sigma_w, A_val_tf)
## Construct the reconstruction model
if alg == 'DAMP':
(x_hat, MSE_history, NMSE_history, PSNR_history, r_final, rvar_final, div_overN) = LDAMP(
y_measured, A_handle, At_handle, A_val_tf, theta, x_true, tie=tie_weights, training=training_tf,
LayerbyLayer=LayerbyLayer)
elif alg == 'DIT':
(x_hat, MSE_history, NMSE_history, PSNR_history) = LDIT(y_measured, A_handle, At_handle, A_val_tf,
theta, x_true, tie=tie_weights,
training=training_tf,
LayerbyLayer=LayerbyLayer)
else:
raise ValueError('alg was not a supported option')
## Define loss and determine which variables to train
nfp = np.float32(height_img * width_img)
if loss_func == 'SURE':
assert alg == 'DAMP', "Only LDAMP supports training with SURE"
cost = utils.MCSURE_loss(x_hat, div_overN, r_final, tf.sqrt(rvar_final))
elif loss_func == 'GSURE':
assert alg == 'DAMP', "Only LDAMP currently supports training with GSURE"
temp0 = tf.matmul(A_val_tf, A_val_tf, transpose_b=True)
temp1 = tf.matrix_inverse(temp0)
pinv_A = tf.matmul(A_val_tf, temp1, transpose_a=True)
P = tf.matmul(pinv_A, A_val_tf)
# Treat LDAMP/LDIT as a function of A^ty to calculate the divergence
Aty_tf = At_handle(A_val_tf, y_measured)
# Overwrite existing x_hat def
(x_hat, _, _, _, _, _, _) = LDAMP_Aty(Aty_tf, A_handle, At_handle, A_val_tf, theta, x_true,
tie=tie_weights, training=training_tf,
LayerbyLayer=LayerbyLayer)
if sigma_w == 0.: # Not sure if TF is smart enough to avoid computing MCdiv when it doesn't have to
MCdiv = 0.
else:
# Calculate MC divergence of P*LDAMP(Aty)
epsilon = tf.maximum(.001 * tf.reduce_max(Aty_tf, axis=0), .00001)
eta = tf.random_normal(shape=Aty_tf.get_shape(), dtype=tf.float32)
Aty_perturbed_tf = Aty_tf + tf.multiply(eta, epsilon)
(x_hat_perturbed, _, _, _, _, _, _) = LDAMP_Aty(Aty_perturbed_tf, A_handle, At_handle,
A_val_tf, theta, x_true,
tie=tie_weights, training=training_tf,
LayerbyLayer=LayerbyLayer)
Px_hat_perturbed = tf.matmul(P, x_hat_perturbed)
Px_hat = tf.matmul(P, x_hat)
eta_dx = tf.multiply(eta, Px_hat_perturbed - Px_hat)
mean_eta_dx = tf.reduce_mean(eta_dx, axis=0)
MCdiv = tf.divide(mean_eta_dx, epsilon) * n
x_ML = tf.matmul(pinv_A, y_measured)
cost = utils.MCGSURE_loss(x_hat, x_ML, P, MCdiv, sigma_w)
# Note: This cost is missing a ||Px||^2 term and so is expected to go negative
else:
cost = tf.nn.l2_loss(x_true - x_hat) * 1. / nfp
iter = n_DAMP_layers - 1
if LayerbyLayer == True:
vars_to_train = [] # List of only the variables in the last layer.
for l in range(0, n_DnCNN_layers):
# vars_to_train.extend([theta[iter][0][l], theta[iter][1][l]])
vars_to_train.extend([theta[iter][0][l]])
for l in range(1, n_DnCNN_layers - 1): # Associate variance, means, beta, and gamma
gamma_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/gamma:0"
beta_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/beta:0"
var_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance:0"
mean_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
vars_to_train.extend([gamma, beta, moving_variance, moving_mean])
else:
vars_to_train = tf.trainable_variables()
CountParameters()
#TODO major change
# ## Load and Preprocess Training Data
train_images, val_images = utils.splitDataset(self.dset, self.specifics)
len_train = len(train_images)
len_val = len(val_images)
x_train = np.transpose(np.reshape(train_images, (-1, channel_img * height_img * width_img)))
x_val = np.transpose(np.reshape(val_images, (-1, channel_img * height_img * width_img)))
## Train the Model
for learning_rate in learning_rates:
# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost, var_list=vars_to_train)
optimizer0 = tf.train.AdamOptimizer(learning_rate=learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step. Allows us to update averages w/in BN
optimizer = optimizer0.minimize(cost, var_list=vars_to_train)
saver_best = tf.train.Saver() # defaults to saving all variables
saver_dict = {}
config = tf.ConfigProto(allow_soft_placement=True)
#TODO This is used to accommodate our RTX graphics card, don't need it otherwise
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(
tf.global_variables_initializer()) # Seems to be necessary for the batch normalization layers for some reason.
# if FLAGS.debug:
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
start_time = time.time()
print("Load Initial Weights ...")
if ResumeTraining or learning_rate != learning_rates[0]:
##Load previous values for the weights
saver_initvars_name_chckpt = utils.GenLDAMPFilename(alg, tie_weights, LayerbyLayer,loss_func=loss_func, specifics=self.specifics) + ".ckpt"
for iter in range(n_layers_trained): # Create a dictionary with all the variables except those associated with the optimizer.
for l in range(0, n_DnCNN_layers):
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/w": theta[iter][0][l]}) # ,
# "Iter" + str(iter) + "/l" + str(l) + "/b": theta[iter][1][l]})
for l in range(1, n_DnCNN_layers - 1): # Associate variance, means, and beta
gamma_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/gamma:0"
beta_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/beta:0"
var_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance:0"
mean_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/gamma": gamma})
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/beta": beta})
saver_dict.update(
{"Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance": moving_variance})
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean": moving_mean})
saver_initvars = tf.train.Saver(saver_dict)
saver_initvars.restore(sess, saver_initvars_name_chckpt)
print("Loaded wieghts from %s" % saver_initvars_name_chckpt)
else:
## Load initial values for the weights.
# To do so, one associates each variable with a key (e.g. theta[iter][0][0] with l1/w_DnCNN) and loads the l1/w_DCNN weights that were trained on the denoiser
# To confirm weights were actually loaded, run sess.run(theta[0][0][0][0][0])[0][0]) before and after this statement. (Requires running sess.run(tf.global_variables_initializer()) first
if InitWeightsMethod == 'layer_by_layer':
# load the weights from an identical network that was trained layer-by-layer
saver_initvars_name_chckpt = utils.GenLDAMPFilename(alg, tie_weights, LayerbyLayer=True,loss_func=loss_func, specifics=self.specifics) + ".ckpt"
for iter in range(n_layers_trained): # Create a dictionary with all the variables except those associated with the optimizer.
for l in range(0, n_DnCNN_layers):
saver_dict.update(
{"Iter" + str(iter) + "/l" + str(l) + "/w": theta[iter][0][l]}) # ,
# "Iter" + str(iter) + "/l" + str(l) + "/b": theta[iter][1][l]})
for l in range(1, n_DnCNN_layers - 1): # Associate variance, means, and beta
gamma_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/gamma:0"
beta_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/beta:0"
var_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance:0"
mean_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/gamma": gamma})
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/beta": beta})
saver_dict.update(
{"Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance": moving_variance})
saver_dict.update(
{"Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean": moving_mean})
saver_initvars = tf.train.Saver(saver_dict)
saver_initvars.restore(sess, saver_initvars_name_chckpt)
if InitWeightsMethod == 'denoiser':
# load initial weights that were trained on a denoising problem
saver_initvars_name_chckpt = utils.GenDnCNNFilename(300. / 255., 500. / 255., specifics=self.specifics) + ".ckpt"
iter = 0
for l in range(0, n_DnCNN_layers):
saver_dict.update({"l" + str(l) + "/w": theta[iter][0][
l]}) # , "l" + str(l) + "/b": theta[iter][1][l]})
for l in range(1, n_DnCNN_layers - 1): # Associate variance, means, and beta
gamma_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/gamma:0"
beta_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/beta:0"
var_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance:0"
mean_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
saver_dict.update({"l" + str(l) + "/BN/gamma": gamma})
saver_dict.update({"l" + str(l) + "/BN/beta": beta})
saver_dict.update({"l" + str(l) + "/BN/moving_variance": moving_variance})
saver_dict.update({"l" + str(l) + "/BN/moving_mean": moving_mean})
saver_initvars = tf.train.Saver(saver_dict)
saver_initvars.restore(sess, saver_initvars_name_chckpt)
elif InitWeightsMethod == 'smaller_net' and n_DAMP_layers != 1:
# Initialize wieghts using a smaller network's weights
saver_initvars_name_chckpt = utils.GenLDAMPFilename(alg, tie_weights, LayerbyLayer,
n_DAMP_layer_override=n_DAMP_layers - 1,
loss_func=loss_func,
specifics=self.specifics) + ".ckpt"
# Load the first n-1 iterations weights from a previously learned network
for iter in range(n_DAMP_layers - 1):
for l in range(0, n_DnCNN_layers):
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/w": theta[iter][0][
l]}) # , "Iter"+str(iter)+"/l" + str(l) + "/b": theta[iter][1][l]})
for l in range(1, n_DnCNN_layers - 1): # Associate variance, means, and beta
gamma_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/gamma:0"
beta_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/beta:0"
var_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance:0"
mean_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/gamma": gamma})
saver_dict.update({"Iter" + str(iter) + "/l" + str(l) + "/BN/beta": beta})
saver_dict.update(
{"Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance": moving_variance})
saver_dict.update(
{"Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean": moving_mean})
saver_initvars = tf.train.Saver(saver_dict)
saver_initvars.restore(sess, saver_initvars_name_chckpt)
# Initialize the weights of layer n by using the weights from layer n-1
iter = n_DAMP_layers - 1
saver_dict = {}
for l in range(0, n_DnCNN_layers):
saver_dict.update({"Iter" + str(iter - 1) + "/l" + str(l) + "/w": theta[iter][0][
l]}) # ,"Iter" + str(iter-1) + "/l" + str(l) + "/b": theta[iter][1][l]})
for l in range(1, n_DnCNN_layers - 1): # Associate variance, means, and beta
gamma_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/gamma:0"
beta_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/beta:0"
var_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_variance:0"
mean_name = "Iter" + str(iter) + "/l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
saver_dict.update({"Iter" + str(iter - 1) + "/l" + str(l) + "/BN/gamma": gamma})
saver_dict.update({"Iter" + str(iter - 1) + "/l" + str(l) + "/BN/beta": beta})
saver_dict.update(
{"Iter" + str(iter - 1) + "/l" + str(l) + "/BN/moving_variance": moving_variance})
saver_dict.update(
{"Iter" + str(iter - 1) + "/l" + str(l) + "/BN/moving_mean": moving_mean})
saver_initvars = tf.train.Saver(saver_dict)
saver_initvars.restore(sess, saver_initvars_name_chckpt)
else:
# use random weights. This will occur for 1 layer networks if set to use smaller_net initialization
pass
time_taken = time.time() - start_time
print("Training ...")
print()
save_name = utils.GenLDAMPFilename(alg, tie_weights, LayerbyLayer, loss_func=loss_func, specifics=self.specifics)
save_name_chckpt = save_name + ".ckpt"
val_values = []
print("Initial Weights Validation Value:")
rand_inds = np.random.choice(len_val, n_Val_Images, replace=False)
start_time = time.time()
for offset in range(0, n_Val_Images - BATCH_SIZE + 1,
BATCH_SIZE): # Subtract batch size-1 to avoid eerrors when len(train_images) is not a multiple of the batch size
end = offset + BATCH_SIZE
# Generate a new measurement matrix
A_val = sensing_methods.GenerateMeasurementMatrix(measurement_mode)
batch_x_val = x_val[:, rand_inds[offset:end]]
# Run optimization. This will both generate compressive measurements and then recontruct from them.
loss_val = sess.run(cost,
feed_dict={x_true: batch_x_val, A_val_tf: A_val, training_tf: False})
val_values.append(loss_val)
time_taken = time.time() - start_time
print(np.mean(val_values))
if not LayerbyLayer: # For end-to-end training save the initial state so that LDAMP end-to-end doesn't diverge when using a high training rate
best_val_error = np.mean(val_values)
best_sess = sess
print("********************")
save_path = saver_best.save(best_sess, save_name_chckpt)
print("Initial session model saved in file: %s" % save_path)
else: # For layerbylayer training don't save the initial state. With LDIT the initial validation error was often better than the validation error after training 1 epoch. This caused the network not to update and eventually diverge as it got longer and longer
best_val_error = np.inf
failed_epochs = 0
for i in range(EPOCHS):
if failed_epochs >= max_Epoch_Fails:
break
train_values = []
print("This Training iteration ...")
rand_inds = np.random.choice(len_train, n_Train_Images, replace=False)
start_time = time.time()
for offset in range(0, n_Train_Images - BATCH_SIZE + 1,
BATCH_SIZE): # Subtract batch size-1 to avoid errors when len(train_images) is not a multiple of the batch size
end = offset + BATCH_SIZE
# Generate a new measurement matrix
A_val = sensing_methods.GenerateMeasurementMatrix(measurement_mode)
batch_x_train = x_train[:, rand_inds[offset:end]]
# Run optimization. This will both generate compressive measurements and then recontruct from them.
_, loss_val = sess.run([optimizer, cost],
feed_dict={x_true: batch_x_train, A_val_tf: A_val,
training_tf: True}) # Feed dict names should match with the placeholders
train_values.append(loss_val)
time_taken = time.time() - start_time
print(
|
np.mean(train_values)
|
numpy.mean
|
import rospy
from gazebo_msgs.msg import LinkStates
from geometry_msgs.msg import Twist, Pose, TransformStamped
from sensor_msgs.msg import Image, CameraInfo
from tf2_ros import TransformListener, Buffer
import sys
import cv2
import math
import numpy as np
from scipy.spatial.transform import Rotation as R
from cv_bridge import CvBridge
bridge = CvBridge()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
class Tracker():
def __init__(self, vehicle_type, vehicle_id):
self.K = []
self.E = np.hstack((np.eye(3),np.array([[0.25],[0.25],[1]])))
self.lines_3d = np.array([[[0.05,0,0],[0.05,0.05,0]],[[0.05,0.05,0],[0.05,0.05,0.05]],[[0.05,0.05,0.05],[0.05,0,0.05]],[[0.05,0,0.05],[0.05,0,0]],[[0,0,0],[0,0.05,0]],[[0,0.05,0],[0,0.05,0.05]],[[0,0.05,0.05],[0,0,0.05]],[[0,0,0.05],[0,0,0]],[[0,0,0],[0.05,0,0]],[[0,0.05,0],[0.05,0.05,0]],[[0,0,0.05],[0.05,0,0.05]],[[0,0.05,0.05],[0.05,0.05,0.05]]])
self.lines_3d = self.lines_3d - np.array([0.02,0.02,0.02])
self.points_2d = []
self.box_pose = Pose()
self.camera_pose = Pose()
# self.plot_3d()
rospy.init_node(vehicle_type+'_'+vehicle_id+'_visual_servo')
rospy.Subscriber(vehicle_type+'_'+vehicle_id+'/realsense/depth_camera/color/image_raw', Image, self.image_callback)
rospy.Subscriber(vehicle_type+'_'+vehicle_id+'/realsense/depth_camera/color/camera_info', CameraInfo, self.camera_info_callback)
rospy.Subscriber("/gazebo/link_states", LinkStates, self.link_states_callback)
self.image_pub = rospy.Publisher(vehicle_type+'_'+vehicle_id+'/visual_servo/image', Image, queue_size=2)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
rate.sleep()
def project(self):
for line_3d in self.lines_3d:
for point_3d in line_3d:
point_2d = self.K.dot(self.E).dot(np.vstack((point_3d.reshape(3,1),1)))
if self.points_2d == []:
self.points_2d = point_2d
else:
self.points_2d = np.hstack((self.points_2d, point_2d))
def plot_3d(self):
for line_3d in self.lines_3d:
ax.plot(*zip(line_3d[0],line_3d[1]),color="b")
plt.show()
def draw_lines(self, img, points, color=[255, 0, 0], thickness=3):
line_img = np.zeros(
(
img.shape[0],
img.shape[1],
1
),
dtype=np.uint8
)
img = np.copy(img)
if points is None:
return
for i in range(points.shape[1]/2):
cv2.line(line_img, (int(points[0,2*i]/points[2,2*i]), int(points[1,2*i]/points[2,2*i])), (int(points[0,2*i+1]/points[2,2*i+1]), int(points[1,2*i+1]/points[2,2*i+1])), color, thickness)
img = cv2.addWeighted(img, 0.8, line_img, 1.0, 0.0)
return img
def link_states_callback(self, msg):
try:
box_id = msg.name.index("box::base_link")
camera_id = msg.name.index("iris_0::realsense_camera::link")
box_pose = msg.pose[box_id]
box_R = R.from_quat([box_pose.orientation.x, box_pose.orientation.y, box_pose.orientation.z, box_pose.orientation.w]).as_dcm()
box_t = np.array([[box_pose.position.x],[box_pose.position.y],[box_pose.position.z]])
box_pose = np.hstack((box_R,box_t))
box_pose = np.vstack((box_pose,np.array([0,0,0,1])))
camera_pose = msg.pose[camera_id]
camera_R = R.from_quat([camera_pose.orientation.x, camera_pose.orientation.y, camera_pose.orientation.z, camera_pose.orientation.w]).as_dcm()
camera_R = camera_R.dot(R.from_euler('z',-90,degrees=True).as_dcm()).dot(R.from_euler('x',-90,degrees=True).as_dcm())
camera_t = np.array([[camera_pose.position.x],[camera_pose.position.y],[camera_pose.position.z]])
camera_pose =
|
np.hstack((camera_R,camera_t))
|
numpy.hstack
|
"""Script containing the UR5 and Pendulum environments."""
import numpy as np
import gym
from gym.spaces import Box
import os
from hbaselines.envs.hac.env_utils import check_validity
from hbaselines.utils.reward_fns import negative_distance
try:
import mujoco_py
except ImportError:
# for testing purposes
import hbaselines.envs.hac.dummy_mujoco as mujoco_py
class Environment(gym.Env):
"""Base environment class.
Supports the UR5 and Pendulum environments from:
Levy, Andrew, et al. "Learning Multi-Level Hierarchies with Hindsight."
(2018).
Attributes
----------
name : str
name of the environment; adopted from the name of the model
model : object
the imported MuJoCo model
sim : mujoco_py.MjSim
the MuJoCo simulator object, used to interact with and advance the
simulation
end_goal_thresholds : array_like
goal achievement thresholds. If the agent is within the threshold for
each dimension, the end goal has been achieved and the reward of 0 is
granted.
initial_state_space : list of (float, float)
bounds for the initial values for all elements in the state space.
This is achieved during the reset procedure.
max_actions : int
maximum number of atomic actions. This will typically be
flags.time_scale**(flags.layers).
visualize : bool
specifies whether to render the environment
viewer : mujoco_py.MjViewer
a display GUI showing the scene of an MjSim object
num_frames_skip : int
number of time steps per atomic action
num_steps : int
number of steps since the start of the current rollout
"""
def __init__(self,
model_name,
project_state_to_end_goal,
end_goal_thresholds,
initial_state_space,
contextual_reward,
use_contexts=False,
random_contexts=False,
context_range=None,
max_actions=1200,
num_frames_skip=1,
show=False):
"""Instantiate the Environment object.
Parameters
----------
model_name : str
name of the xml file in './mujoco_files/' that the model is
generated from
end_goal_thresholds : array_like
goal achievement thresholds. If the agent is within the threshold
for each dimension, the end goal has been achieved and the reward
of 0 is granted.
initial_state_space : list of (float, float)
bounds for the initial values for all elements in the state space.
This is achieved during the reset procedure.
max_actions : int, optional
maximum number of atomic actions. Defaults to 1200.
num_frames_skip : int, optional
number of time steps per atomic action. Defaults to 10.
show : bool, optional
specifies whether to render the environment. Defaults to False.
"""
# Ensure environment customization have been properly entered.
check_validity(model_name, initial_state_space, max_actions,
num_frames_skip)
self.name = model_name
self.project_state_to_end_goal = project_state_to_end_goal
self.end_goal_thresholds = end_goal_thresholds
self.initial_state_space = initial_state_space
self.max_actions = max_actions
# Create Mujoco Simulation
mujoco_file_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'assets'))
self.model = mujoco_py.load_model_from_path(
os.path.join(mujoco_file_path, model_name))
self.sim = mujoco_py.MjSim(self.model)
# contextual variables
self.use_contexts = use_contexts
self.random_contexts = random_contexts
self.context_range = context_range
self.contextual_reward = contextual_reward
self.current_context = None
# Implement visualization if necessary
self.visualize = show # Visualization boolean
if self.visualize:
self.viewer = mujoco_py.MjViewer(self.sim) # pragma: no cover
else:
self.viewer = None
self.num_frames_skip = num_frames_skip
self.num_steps = 0
def get_state(self):
"""Get state, which concatenates joint positions and velocities."""
raise NotImplementedError
def reset(self):
"""Reset simulation to state within initial state specified by user.
Returns
-------
array_like
the initial observation
"""
# Reset the time counter.
self.num_steps = 0
# Reset joint positions and velocities
for i in range(len(self.sim.data.qpos)):
self.sim.data.qpos[i] = np.random.uniform(
self.initial_state_space[i][0], self.initial_state_space[i][1])
for i in range(len(self.sim.data.qvel)):
self.sim.data.qvel[i] = np.random.uniform(
self.initial_state_space[len(self.sim.data.qpos) + i][0],
self.initial_state_space[len(self.sim.data.qpos) + i][1])
# Update the goal.
if self.use_contexts:
self.current_context = self.get_next_goal()
# Return state
return self.get_state()
def step(self, action):
"""Advance the simulation by one step.
This method executes the low-level action. This is done for number of
frames specified by num_frames_skip.
Parameters
----------
action : array_like
the low level primitive action
Returns
-------
array_like
the next observation
float
reward
bool
done mask
dict
extra info (set to an empty dictionary by default)
"""
# Perform the requested action for a given number of steps.
self.sim.data.ctrl[:] = action
for _ in range(self.num_frames_skip):
self.sim.step()
self.num_steps += 1
if self.visualize:
self.render() # pragma: no cover
obs = self.get_state()
# check whether the goal is reached.
is_success = all(
np.absolute(self.project_state_to_end_goal(self.sim, obs)
- self.current_context)
< self.end_goal_thresholds)
# Reward of 0 when the goal is reached, and -1 otherwise.
reward = self.contextual_reward(obs, self.current_context, obs)
# If the time horizon is met, set done to True.
done = self.num_steps >= self.max_actions or is_success
# Success is defined as getting within a distance threshold from the
# target.
info_dict = {'is_success': is_success}
return obs, reward, done, info_dict
def display_end_goal(self, end_goal):
"""Visualize end goal.
The goal can be visualized by changing the location of the relevant
site object.
Parameters
----------
end_goal : array_like
the desired end goals to be displayed
"""
raise NotImplementedError
def get_next_goal(self):
"""Return an end goal.
Returns
-------
array_like
the end goal
"""
raise NotImplementedError
def render(self, mode='human'):
"""Render the environment."""
self.viewer.render() # pragma: no cover
@property
def horizon(self):
"""Return the environment horizon."""
return self.max_actions
@property
def context_space(self):
"""Return the shape and bounds of the contextual term."""
# Check if the environment is using contexts, and if not, return a None
# value as the context space.
if self.use_contexts:
# If the context space is random, use the min and max values of
# each context to specify the space range. Otherwise, the min and
# max values are both the deterministic context value.
if self.random_contexts:
context_low = []
context_high = []
for context_i in self.context_range:
low, high = context_i
context_low.append(low)
context_high.append(high)
return Box(low=np.asarray(context_low),
high=np.asarray(context_high),
dtype=np.float32)
else:
return Box(low=np.asarray(self.context_range),
high=np.asarray(self.context_range),
dtype=np.float32)
else:
return None
class UR5(Environment):
"""UR5 environment class.
In this environment, a UR5 reacher object is tasked with reaching an end
goal consisting of the desired joint positions for the 3 main joints.
"""
def __init__(self,
use_contexts=False,
random_contexts=False,
context_range=None,
show=False):
"""Initialize the UR5 environment.
Parameters
----------
use_contexts : bool, optional
specifies whether to add contexts to the observations and add the
contextual rewards
random_contexts : bool
specifies whether the context is a single value, or a random set of
values between some range
context_range : list of float or list of (float, float)
the desired context / goal, or the (lower, upper) bound tuple for
each dimension of the goal
show : bool
specifies whether to render the environment
Raises
------
AssertionError
If the context_range is not the right form based on whether
contexts are a single value or random across a range.
"""
# max number of atomic actions
max_actions = 600
# number of time steps per atomic action
timesteps_per_action = 1 # 15
# file name of Mujoco model. This file is stored in "assets" folder
model_name = "ur5.xml"
# initial state space consisting of the ranges for all joint angles and
# velocities. In the UR5 Reacher task, we use a random initial shoulder
# position and use fixed values for the remainder. Initial joint
# velocities are set to 0.
initial_joint_pos = [(-np.pi / 8, np.pi / 8),
(3.22757851e-03, 3.22757851e-03),
(-1.27944547e-01, -1.27944547e-01)]
initial_joint_speed = [(0, 0) for _ in range(len(initial_joint_pos))]
initial_state_space = initial_joint_pos + initial_joint_speed
# Supplementary function that will ensure all angles are between
# [-2*np.pi,2*np.pi]
def bound_angle(angle):
bounded_angle = np.absolute(angle) % (2 * np.pi)
if angle < 0:
bounded_angle = -bounded_angle
return bounded_angle
# function that maps from the state space to the end goal space
def project_state_to_end_goal(sim, *_):
return np.array([bound_angle(sim.data.qpos[i])
for i in range(len(sim.data.qpos))])
# end goal achievement thresholds. If the agent is within the threshold
# for each dimension, the end goal has been achieved.
angle_threshold = np.deg2rad(10)
end_goal_thresholds = np.array([angle_threshold for _ in range(3)])
def contextual_reward(states, goals, next_states):
return negative_distance(
states=states,
goals=goals,
next_states=next_states,
state_indices=[0, 1, 2],
relative_context=False,
offset=0.0,
reward_scales=1.0
)
super(UR5, self).__init__(
model_name=model_name,
project_state_to_end_goal=project_state_to_end_goal,
end_goal_thresholds=end_goal_thresholds,
initial_state_space=initial_state_space,
max_actions=max_actions,
num_frames_skip=timesteps_per_action,
show=show,
contextual_reward=contextual_reward,
use_contexts=use_contexts,
random_contexts=random_contexts,
context_range=context_range,
)
@property
def observation_space(self):
"""Return the observation space."""
return gym.spaces.Box(
low=-float("inf"),
high=float("inf"),
shape=(len(self.sim.data.qpos) + len(self.sim.data.qvel),),
dtype=np.float32,
)
@property
def action_space(self):
"""Return the action space."""
return Box(
low=-self.sim.model.actuator_ctrlrange[:, 1],
high=self.sim.model.actuator_ctrlrange[:, 1],
dtype=np.float32,
)
def get_state(self):
"""See parent class."""
return np.concatenate((self.sim.data.qpos, self.sim.data.qvel))
def get_next_goal(self):
"""See parent class."""
end_goal = np.zeros(shape=(len(self.context_range),))
goal_possible = False
while not goal_possible:
end_goal = np.zeros(shape=(len(self.context_range),))
end_goal[0] = np.random.uniform(self.context_range[0][0],
self.context_range[0][1])
end_goal[1] = np.random.uniform(self.context_range[1][0],
self.context_range[1][1])
end_goal[2] = np.random.uniform(self.context_range[2][0],
self.context_range[2][1])
# Next need to ensure chosen joint angles result in achievable
# task (i.e., desired end effector position is above ground)
theta_1 = end_goal[0]
theta_2 = end_goal[1]
theta_3 = end_goal[2]
# shoulder_pos_1 = np.array([0, 0, 0, 1])
# upper_arm_pos_2 = np.array([0, 0.13585, 0, 1])
forearm_pos_3 = np.array([0.425, 0, 0, 1])
wrist_1_pos_4 = np.array([0.39225, -0.1197, 0, 1])
# Transformation matrix from shoulder to base reference frame
t_1_0 = np.array([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 1, 0.089159], [0, 0, 0, 1]])
# Transformation matrix from upper arm to shoulder reference frame
t_2_1 = np.array([[np.cos(theta_1), -np.sin(theta_1), 0, 0],
[np.sin(theta_1),
|
np.cos(theta_1)
|
numpy.cos
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the _License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for experimental.sequential.extended_kalman_filter."""
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class ExtendedKalmanFilterTest(test_util.TestCase):
def test_simple_nonlinear_system(self):
initial_state_prior = tfd.MultivariateNormalDiag(
0., scale_diag=[1., 0.3], validate_args=True)
observation_noise_scale = 0.5
# x_{0, t+1} = x_{0, t} - 0.1 * x_{1, t}**3; x_{1, t+1} = x_{1, t}
def transition_fn(x):
return tfd.MultivariateNormalDiag(
tf.stack(
[x[..., 0] - 0.1 * tf.pow(x[..., 1], 3), x[..., 1]], axis=-1),
scale_diag=[0.5, 0.05], validate_args=True)
def transition_jacobian_fn(x):
return tf.reshape(
tf.stack(
[1., -0.3 * x[..., 1]**2,
tf.zeros(x.shape[:-1]), tf.ones(x.shape[:-1])], axis=-1),
[2, 2])
def observation_fn(x):
return tfd.MultivariateNormalDiag(
x[..., :1],
scale_diag=[observation_noise_scale],
validate_args=True)
observation_jacobian_fn = lambda x: [[1., 0.]]
x = [
|
np.zeros((2,), dtype=np.float32)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 25 13:25:10 2021
@author: Kaneki
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from scipy.spatial import distance
def Periodicity(pos,l):
if pos >= -l and pos <= l :
return pos
elif pos < -l:
return pos + 2*l
elif pos > l:
return pos - 2*l
def Populate_distance_matrix(N,i):
Pos_vec = []
for num in range(N):
Pos_vec.append((x[num,i],y[num,i]))
Dist_mat = distance.cdist(Pos_vec,Pos_vec, metric = 'euclidean')
return Dist_mat
def Angle(vec): return np.arctan2(vec[1],np.dot(vec,[1,0]))
def Boundary_condition(b_type,x,y, x_nojump,y_nojump,dx, dy, par,i):
if b_type == "periodic":
x_new = x[par,i] + dx
y_new = y[par,i] + dy
# Periodic boundary condition on x
x[par,i+1] = Periodicity(x_new, l)
# Periodic boundary condition on y
y[par,i+1] = Periodicity(y_new, l)
# x position if there is no jump
x_nojump[par,i+1] = x_nojump[par,i] + dx
# y position if there is no jump
y_nojump[par,i+1] = y_nojump[par,i] + dy
else:
print("Wrong input")
x=1
return x
def ABP_move(t, dt, N, l):
for i in range(0, int(t/dt)-1): # time evolution
Dist_mat = Populate_distance_matrix(N, i) # Distance mattrice for one instance of time
#print(Dist_mat)
for p1 in range(N):
# Increment initializing for theta,x,y
theta_p1_new = 0 # np.sqrt(2*Dr*dt) * np.random.randn()
r_attr = np.sqrt(x[p1,i]**2 + y[p1,i]**2)
dx_p1_new = vx[p1,i] * np.cos(theta[p1,i]) * dt #+ np.sqrt(2*Dt*dt) * np.random.randn()
dy_p1_new = vy[p1,i] * np.sin(theta[p1,i]) * dt #+ np.sqrt(2*Dt*dt) * np.random.randn()
d_vx = -1 * G_attr * x[p1,i] /r_attr**2
d_vy = -1 * G_attr * x[p1,i] /r_attr**2
for p2 in range(N):
if p1 == p2:
continue
r = Dist_mat[p1,p2] # r=rji = rij is distance between pairs
#### Interaction Region ####
if r > epsilon_int:
continue
# Positional exclusion
if r < zeta_ex:
dx_p1_new = dx_p1_new - k_pos * (x[p2,i] - x[p1,i]) * dt
dy_p1_new = dy_p1_new - k_pos * (y[p2,i] - y[p1,i]) * dt
# Angular exclusion
if r <= epsilon_ex: # Exclusion region
alpha_ji = Angle([x[p2,i]-x[p1,i], y[p2,i]-y[p1,i]])
theta_p1_new = theta_p1_new - k_rad * np.sin(alpha_ji-theta[p1,i]) * dt
# Angular Alignment
if r <= epsilon_aa: # Alignment region
alig_incre_p1 = mu_plus * (1-(r/epsilon_aa)**2) * dt * np.sin(theta[p2,i] - theta[p1,i])
theta_p1_new = theta_p1_new + alig_incre_p1
'''
# Angular Disalignment
if r > epsilon_aa: # Anti-alignment region
anti_incre_p1 = mu_minus * 4 * (r-epsilon_aa)*(epsilon_int-r)/ \
(epsilon_int - epsilon_aa)**2 * dt * np.sin(theta[p2,i] - theta[p1,i])
theta_p1_new = theta_p1_new - anti_incre_p1
'''
# Adding total increament of potential from all other particles to p1 (ANGULAR)
x[p1, i+1] = x[p1, i+1] + x[p1, i] + dx_p1_new
y[p1, i+1] = y[p1, i+1] + y[p1, i] + dy_p1_new
theta[p1,i+1] = theta[p1,i+1] + theta[p1,i] + theta_p1_new
vx[p1,i+1] = vx[p1,i+1] + vx[p1,i] + d_vx
vy[p1,i+1] = vy[p1,i+1] + vy[p1,i] + d_vy
Boundary_condition('periodic', x,y, x_nojump,y_nojump,dx_p1_new, dy_p1_new, p1,i)
if i ==1:
print(Dist_mat)
print("Time Step: ", i)
return x, y, theta, vx, vy
# CONSTANTS
v = 37 # swimming speed of B. Subtilis [m/s]
Dr = 0.3 # 1 # rotational diffusion coefficient of B. Subtilis
epsilon_int = 30 # 1 # beyound which interaction will be gone
epsilon_aa = 15 # 0.2 # allignment & antialignment transition radius
epsilon_ex = 6 # 0.1 # angular exclusion radius
zeta_ex = 3.5 # 0 # positional exclusion radius
k_rad = 5 # 10 # anuglar exclusion strength
k_pos = 0.02 # 0 # positional exclusion strength
G_attr = 5 # # gravitational pull
# ADJUSTABLE PARAMETERS
t = 10 # time over which motion is observed [s]
dt = 0.005 # time step between recorded positions
N = 100 #2000 # number of cells
l = 100 # 5 # # box width
mu_plus = 0.3 #4# # alignment strength
mu_minus = 0.1 #0.004 # anti-alignment strength
# Packing fraction & density (Grossman et al. 2014 PRL)
psi = N * np.pi * epsilon_ex ** 2 / (2*l)**2
rho = N / (2*l)**2
# INITIAL CONDITIONS
theta = np.zeros((N,int(t/dt))) # initial swimming orientation [radians]
x = np.zeros((N,int(t/dt))) # initial x position [m]
y = np.zeros((N,int(t/dt))) # initial y position [m]
vx = np.zeros((N,int(t/dt)))
vy = np.zeros((N,int(t/dt)))
x_nojump = np.zeros((N,int(t/dt))) # x position without jump
y_nojump = np.zeros((N,int(t/dt))) # y position without jump
# Initializing x y theta; vx vy will be initialized in ABP move
for n in range(N):
# x positions
x[n,0] = np.random.uniform(-l,l)
x_nojump[n,0] = x[n,0]
# y positions
y[n,0] =
|
np.random.uniform(-l,l)
|
numpy.random.uniform
|
import numpy as np
def generate_random_power_law_distribution(a, b, g, size=1, seed=None):
"""
Power-law generator for pdf(x)\propto x^{g-1} for a<=x<=b
"""
if seed is not None:
np.random.seed(seed)
r = np.random.random(size=size)
ag, bg = a**g, b**g
return (ag + (bg - ag)*r)**(1./g)
def logit(function):
'''Make a probability distribution
a log probability distribution.'''
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
np.seterr(divide='ignore') # ignore division by zero because you want to have the -np.inf results
result = np.log(result)
return result
return wrapper
def generate_synthetic_bfa_input(flares_per_day=10.,
mined=100, Tprime=50, deltaT=1.,
alpha_prior=2., threshed=1., cadence=4,
t0=3000., seed=None, maxed=1e4,
estimate_starting_points=False):
"""Generate a dictionary of inputs for
BayesianFlaringAnalysis.
Parameters:
-------------
flares_per_day : float
average flaring rate in flares per day
mined : float
energy at which the cumulative
flare frequency is evaluated
threshed : float
detection threshold for flares
in the sample (not very well defined
because it is energy dependent here)
deltaT : float
time interval considered for prediction
of flaring rate above mined
Tprime : float
total observation time (light curve length)
in days
alpha_prior : float
prior on alpha (e.g. 2.0)
cadence : int
number of observations per hour
t0 : float
time offset
seed : float
seed the random generator with a number
if needed
maxed: float
set a maximum value for ED, set >> mined
to simulate a power law without coutoff
estimate_starting_points : bool, default False
If True will find MLE for alpha and eps to use as
starting points for MCMC.
"""
#time related stuff:
size = int(np.rint(Tprime*24*cadence))
obstimes = np.linspace(t0,t0+Tprime,size) # 15 min cadence observations
|
np.random.seed(seed=seed)
|
numpy.random.seed
|
import warnings
import numpy as np
def dftups(inp,nor=None,noc=None,usfac=1,roff=0,coff=0):
"""
Translated from matlab:
* `Original Source <http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html>`_
* <NAME> - Dec 13, 2007
* Modified from dftus, by <NAME> 7/31/06
Upsampled DFT by matrix multiplies, can compute an upsampled DFT in just
a small region.
This code is intended to provide the same result as if the following
operations were performed:
* Embed the array "in" in an array that is usfac times larger in each
dimension. ifftshift to bring the center of the image to (1,1).
* Take the FFT of the larger array
* Extract an [nor, noc] region of the result. Starting with the
[roff+1 coff+1] element.
It achieves this result by computing the DFT in the output array without
the need to zeropad. Much faster and memory efficient than the
zero-padded FFT approach if [nor noc] are much smaller than [nr*usfac nc*usfac]
Parameters
----------
usfac : int
Upsampling factor (default usfac = 1)
nor,noc : int,int
Number of pixels in the output upsampled DFT, in units of upsampled
pixels (default = size(in))
roff, coff : int, int
Row and column offsets, allow to shift the output array to a region of
interest on the DFT (default = 0)
"""
# this function is translated from matlab, so I'm just going to pretend
# it is matlab/pylab
from numpy.fft import ifftshift,fftfreq
from numpy import pi,newaxis,floor
nr,nc=np.shape(inp);
# Set defaults
if noc is None: noc=nc;
if nor is None: nor=nr;
# Compute kernels and obtain DFT by matrix products
term1c = ( ifftshift(np.arange(nc,dtype='float') - floor(nc/2)).T[:,newaxis] )/nc # fftfreq
term2c = (( np.arange(noc,dtype='float') - coff )/usfac)[newaxis,:] # output points
kernc=np.exp((-1j*2*pi)*term1c*term2c);
term1r = ( np.arange(nor,dtype='float').T - roff )[:,newaxis] # output points
term2r = ( ifftshift(np.arange(nr,dtype='float')) - floor(nr/2) )[newaxis,:] # fftfreq
kernr=np.exp((-1j*2*pi/(nr*usfac))*term1r*term2r);
#kernc=exp((-i*2*pi/(nc*usfac))*( ifftshift([0:nc-1]).' - floor(nc/2) )*( [0:noc-1] - coff ));
#kernr=exp((-i*2*pi/(nr*usfac))*( [0:nor-1].' - roff )*( ifftshift([0:nr-1]) - floor(nr/2) ));
out=np.dot(np.dot(kernr,inp),kernc);
#return np.roll(np.roll(out,-1,axis=0),-1,axis=1)
return out
def dftregistration(buf1ft, buf2ft, usfac=1, return_registered=False,
return_error=False, zeromean=True, DEBUG=False, maxoff=None):
"""
translated from matlab:
http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html
Efficient subpixel image registration by crosscorrelation. This code
gives the same precision as the FFT upsampled cross correlation in a
small fraction of the computation time and with reduced memory
requirements. It obtains an initial estimate of the crosscorrelation peak
by an FFT and then refines the shift estimation by upsampling the DFT
only in a small neighborhood of that estimate by means of a
matrix-multiply DFT. With this procedure all the image points are used to
compute the upsampled crosscorrelation.
<NAME> - Dec 13, 2007
Portions of this code were taken from code written by <NAME>
and <NAME>.
<NAME> and <NAME>, "Phase retrieval for a complex-valued
object by using a low-resolution image," J. Opt. Soc. Am. A 7, 450-458
(1990).
Citation for this algorithm:
<NAME>, <NAME>, and <NAME>,
"Efficient subpixel image registration algorithms," Opt. Lett. 33,
156-158 (2008).
Inputs
buf1ft Fourier transform of reference image,
DC in (1,1) [DO NOT FFTSHIFT]
buf2ft Fourier transform of image to register,
DC in (1,1) [DO NOT FFTSHIFT]
usfac Upsampling factor (integer). Images will be registered to
within 1/usfac of a pixel. For example usfac = 20 means the
images will be registered within 1/20 of a pixel. (default = 1)
Outputs
output = [error,diffphase,net_row_shift,net_col_shift]
error Translation invariant normalized RMS error between f and g
diffphase Global phase difference between the two images (should be
zero if images are non-negative).
net_row_shift net_col_shift Pixel shifts between images
Greg (Optional) Fourier transform of registered version of buf2ft,
the global phase difference is compensated for.
"""
# this function is translated from matlab, so I'm just going to pretend
# it is matlab/pylab
from numpy import conj,abs,arctan2,sqrt,real,imag,shape,zeros,trunc,ceil,floor,fix
from numpy.fft import fftshift,ifftshift
from numpy.fft import fft2, ifft2
# Compute error for no pixel shift
if usfac == 0:
raise ValueError("Upsample Factor must be >= 1")
CCmax = sum(sum(buf1ft * conj(buf2ft)));
rfzero = sum(abs(buf1ft)**2);
rgzero = sum(abs(buf2ft)**2);
error = 1.0 - CCmax * conj(CCmax)/(rgzero*rfzero);
error = sqrt(abs(error));
diffphase=arctan2(imag(CCmax),real(CCmax));
output=[error,diffphase];
# Whole-pixel shift - Compute crosscorrelation by an IFFT and locate the
# peak
elif usfac == 1:
[m,n]=shape(buf1ft);
CC = ifft2(buf1ft * conj(buf2ft));
if maxoff is None:
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax=CC[rloc,cloc];
else:
# set the interior of the shifted array to zero
# (i.e., ignore it)
CC[maxoff:-maxoff,:] = 0
CC[:,maxoff:-maxoff] = 0
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax=CC[rloc,cloc];
rfzero = sum(abs(buf1ft)**2)/(m*n);
rgzero = sum(abs(buf2ft)**2)/(m*n);
error = 1.0 - CCmax * conj(CCmax)/(rgzero*rfzero);
error = sqrt(abs(error));
diffphase=arctan2(imag(CCmax),real(CCmax));
md2 = fix(m/2);
nd2 = fix(n/2);
if rloc > md2:
row_shift = rloc - m;
else:
row_shift = rloc;
if cloc > nd2:
col_shift = cloc - n;
else:
col_shift = cloc;
#output=[error,diffphase,row_shift,col_shift];
output=[row_shift,col_shift]
# Partial-pixel shift
else:
if DEBUG: import pylab
# First upsample by a factor of 2 to obtain initial estimate
# Embed Fourier data in a 2x larger array
[m,n]=shape(buf1ft);
mlarge=m*2;
nlarge=n*2;
CClarge=zeros([mlarge,nlarge], dtype='complex');
#CClarge[m-fix(m/2):m+fix((m-1)/2)+1,n-fix(n/2):n+fix((n-1)/2)+1] = fftshift(buf1ft) * conj(fftshift(buf2ft));
CClarge[round(mlarge/4.):round(mlarge/4.*3),round(nlarge/4.):round(nlarge/4.*3)] = fftshift(buf1ft) * conj(fftshift(buf2ft));
# note that matlab uses fix which is trunc... ?
# Compute crosscorrelation and locate the peak
CC = ifft2(ifftshift(CClarge)); # Calculate cross-correlation
if maxoff is None:
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax=CC[rloc,cloc];
else:
# set the interior of the shifted array to zero
# (i.e., ignore it)
CC[maxoff:-maxoff,:] = 0
CC[:,maxoff:-maxoff] = 0
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax=CC[rloc,cloc];
if DEBUG:
pylab.figure(1)
pylab.clf()
pylab.subplot(131)
pylab.imshow(real(CC)); pylab.title("Cross-Correlation (upsampled 2x)")
pylab.subplot(132)
ups = dftups((buf1ft) * conj((buf2ft)),mlarge,nlarge,2,0,0); pylab.title("dftups upsampled 2x")
pylab.imshow(real(((ups))))
pylab.subplot(133)
pylab.imshow(real(CC)/real(ups)); pylab.title("Ratio upsampled/dftupsampled")
print("Upsample by 2 peak: ",rloc,cloc," using dft version: ",np.unravel_index(abs(ups).argmax(), ups.shape))
#print np.unravel_index(ups.argmax(),ups.shape)
# Obtain shift in original pixel grid from the position of the
# crosscorrelation peak
[m,n] = shape(CC); md2 = trunc(m/2); nd2 = trunc(n/2);
if rloc > md2 :
row_shift2 = rloc - m;
else:
row_shift2 = rloc;
if cloc > nd2:
col_shift2 = cloc - n;
else:
col_shift2 = cloc;
row_shift2=row_shift2/2.;
col_shift2=col_shift2/2.;
if DEBUG: print("row_shift/col_shift from ups2: ",row_shift2,col_shift2)
# If upsampling > 2, then refine estimate with matrix multiply DFT
if usfac > 2:
#%% DFT computation %%%
# Initial shift estimate in upsampled grid
zoom_factor=1.5
if DEBUG: print(row_shift2, col_shift2)
row_shift0 = round(row_shift2*usfac)/usfac;
col_shift0 = round(col_shift2*usfac)/usfac;
dftshift = trunc(ceil(usfac*zoom_factor)/2); #% Center of output array at dftshift+1
if DEBUG: print('dftshift,rs,cs,zf:',dftshift, row_shift0, col_shift0, usfac*zoom_factor)
# Matrix multiply DFT around the current shift estimate
roff = dftshift-row_shift0*usfac
coff = dftshift-col_shift0*usfac
upsampled = dftups(
(buf2ft * conj(buf1ft)),
ceil(usfac*zoom_factor),
|
ceil(usfac*zoom_factor)
|
numpy.ceil
|
import torch
from torchvision import transforms
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as Data
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
import io
import PIL
import math
import csv
from cirtorch.datasets.genericdataset import ImagesFromList
from cirtorch.datasets.genericdataset import ImagesFromList
from cirtorch.networks.imageretrievalnet import init_network, extract_vectors
from cirtorch.datasets.traindataset import TuplesDataset
from cirtorch.datasets.datahelpers import collate_tuples, cid2filename
torch.manual_seed(1)
"""
PARAMS
"""
INPUT_DIM = 2048
HIDDEN_DIM1 = 1024
HIDDEN_DIM2 = 512
HIDDEN_DIM3 = 256
OUTPUT_DIM = 128 # TODO: Is this right?
datasets_names = ['mapillary']
place_model_path = 'data/exp_outputs1/mapillary_resnet50_gem_contrastive_m0.70_adam_lr1.0e-06_wd1.0e-06_nnum5_qsize2000_psize20000_bsize5_uevery5_imsize1024/model_epoch38.pth.tar'
correlation_model_path = 'data/localcorrelationnet/model_2048_128_0.01_Epoch_199.pth'
multiscale = '[1]'
imsize = 320
posDistThr = 25
negDistThr = 25
workers = 8
query_size = 2000
pool_size = 20000
t = time.strftime("%Y-%d-%m_%H:%M:%S", time.localtime())
"""
Network
"""
class CorrelationNet(torch.nn.Module):
def __init__(self):
super(CorrelationNet, self).__init__()
self.input = torch.nn.Linear(INPUT_DIM, HIDDEN_DIM1)
self.hidden1 = torch.nn.Linear(HIDDEN_DIM1, HIDDEN_DIM2)
self.hidden2 = torch.nn.Linear(HIDDEN_DIM2, HIDDEN_DIM3)
self.output = torch.nn.Linear(HIDDEN_DIM3, OUTPUT_DIM)
def forward(self, x):
x = F.leaky_relu(self.input(x))
x = F.leaky_relu(self.hidden1(x))
x = F.leaky_relu(self.hidden2(x))
x = self.output(x)
return x
"""
Dataset
"""
def load_placereg_net(network_path):
# loading network from path
if network_path is not None:
state = torch.load(network_path)
# parsing net params from meta
# architecture, pooling, mean, std required
# the rest has default values, in case that is doesnt exist
net_params = {}
net_params['architecture'] = state['meta']['architecture']
net_params['pooling'] = state['meta']['pooling']
net_params['local_whitening'] = state['meta'].get(
'local_whitening', False)
net_params['regional'] = state['meta'].get('regional', False)
net_params['whitening'] = state['meta'].get('whitening', False)
net_params['mean'] = state['meta']['mean']
net_params['std'] = state['meta']['std']
net_params['pretrained'] = False
# load network
net = init_network(net_params)
net.load_state_dict(state['state_dict'])
# if whitening is precomputed
if 'Lw' in state['meta']:
net.meta['Lw'] = state['meta']['Lw']
print(">>>> loaded network: ")
print(net.meta_repr())
# setting up the multi-scale parameters
ms = list(eval(multiscale))
if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta['regional'] and not net.meta['whitening']:
msp = net.pool.p.item()
print(">> Set-up multiscale:")
print(">>>> ms: {}".format(ms))
print(">>>> msp: {}".format(msp))
else:
msp = 1
return net
def load_correlationnet(model_path):
correlationnet = CorrelationNet()
correlationnet.load_state_dict(torch.load(model_path))
correlationnet.eval()
return correlationnet
placenet = load_placereg_net(place_model_path)
correlationnet = load_correlationnet(correlation_model_path)
# moving network to gpu and eval mode
placenet.cuda()
correlationnet.cuda()
placenet.eval()
correlationnet.eval()
# set up the transform
resize = transforms.Resize((240, 320), interpolation=2)
normalize = transforms.Normalize(
mean=placenet.meta['mean'],
std=placenet.meta['std']
)
transform = transforms.Compose([
resize,
transforms.ToTensor(),
normalize
])
posDistThr = 25
negDistThr = 25
test_dataset = TuplesDataset(
name='mapillary',
mode='train',
#mode='val',
imsize=imsize,
transform=transform,
posDistThr=posDistThr,
negDistThr=negDistThr,
tuple_mining='gps'
)
qidxs, pidxs = test_dataset.get_loaders()
opt = {'batch_size': 1, 'shuffle': False, 'num_workers': 8, 'pin_memory': True}
start = time.time()
# Step 1: Extract Database Images - dbLoader
print('>> {}: Extracting Database Images...')
dbLoader = torch.utils.data.DataLoader(
ImagesFromList(root='', images=[test_dataset.dbImages[i] for i in range(
len(test_dataset.dbImages))], imsize=imsize, transform=transform),
**opt)
poolvecs = torch.zeros(OUTPUT_DIM, len(test_dataset.dbImages)).cuda()
for i, input in enumerate(dbLoader):
poolvecs[:, i] = correlationnet(placenet(input.cuda()).data.squeeze())
# Step 2: Extract Query Images - qLoader
print('>> {}: Extracting Query Images...')
qLoader = torch.utils.data.DataLoader(
ImagesFromList(root='', images=[
test_dataset.qImages[i] for i in qidxs], imsize=imsize, transform=transform),
**opt)
qvecs = torch.zeros(OUTPUT_DIM, len(qidxs)).cuda()
for i, input in enumerate(qLoader):
qvecs[:, i] = correlationnet(placenet(input.cuda()).data.squeeze())
# Step 3: Ranks
#scores = torch.mm(poolvecs.t(), qvecs)
D, N = qvecs.size()
D, PoolN = poolvecs.size()
scores = torch.zeros((N, PoolN))
for im in range(N):
a = torch.norm(poolvecs.t() - qvecs[:, im], dim=1)
scores[im, :] = torch.norm(poolvecs.t() - qvecs[:, im], dim=1)
scores = scores.cpu()#.numpy()
# GPS: get query and pool coordinates
querycoordinates = torch.tensor(
[test_dataset.gpsInfo[test_dataset.qImages[i][-26:-4]] for i in qidxs], dtype=torch.float)
poolcoordinates = torch.tensor([test_dataset.gpsInfo[test_dataset.dbImages[i][-26:-4]]
for i in range(len(test_dataset.dbImages))], dtype=torch.float)
# GPS: Compute distances
distances = torch.norm(querycoordinates[:, None] - poolcoordinates, dim=2)
# GPS: Sort distances
distances, indicies = torch.sort(distances, dim=1, descending=False)
print('>>> {}: Generating Correlation Data')
gpsinfo = test_dataset.gpsInfo
angleInfo = test_dataset.angleInfo
all_gps = np.zeros((len(qidxs), 10))
all_emb = np.zeros((len(qidxs), 10))
all_ang = np.zeros((len(qidxs), 10))
all_pics = []
for q in range(len(qidxs)):
positive = 0
gps = []
emb = []
pictures = [test_dataset.qImages[qidxs[q]].split('/')[-1][:-4]]
angles = []
while distances[q, positive] < 50 and positive < 10:
index = indicies[q, positive]
emb.append(scores[q, index].item())
gps.append(distances[q, positive])
pictures.append(test_dataset.dbImages[index])
key = test_dataset.dbImages[index].split('/')[-1][:-4]
angles.append(angleInfo[key])
positive += 1
emb = np.array(emb)
gps =
|
np.array(gps)
|
numpy.array
|
"""
Tests the fit methods
"""
# --------------------------------------------------------------------------------
# Programmer: <NAME>
# Date 1/25/2019 3:34:02 PM
# Language: Python (.py) Version 2.7 or 3.5
# Usage:
#
# Test all model types
#
# \SparseSC > python -m unittest test/test_fit.py
#
# Test a specific model type (e.g. "prospective-restricted"):
#
# \SparseSC > python -m unittest test.test_fit.TestFit.test_retrospective
#
# --------------------------------------------------------------------------------
from __future__ import print_function # for compatibility with python 2.7
import sys
import random
import unittest
import warnings
from scipy.optimize.linesearch import LineSearchWarning
import numpy as np
import traceback
try:
import SparseSC
from SparseSC.fit import fit
from SparseSC.fit_fast import fit_fast
except ImportError:
raise RuntimeError("SparseSC is not installed. Use 'pip install -e .' or 'conda develop .' from repo root to install in dev mode")
#import warnings
#warnings.simplefilter("error")
SparseSC.keras_reproducible() #for when I start testing for correctness
# pylint: disable=missing-docstring
class TestFitForErrors(unittest.TestCase):
def setUp(self):
random.seed(12345)
np.random.seed(101101001)
control_units = 50
treated_units = 20
features = 10
targets = 5
self.X = np.random.rand(control_units + treated_units, features)
self.Y =
|
np.random.rand(control_units + treated_units, targets)
|
numpy.random.rand
|
import cv2
import argparse
import configparser
import time
import os.path
import numpy as np
import skimage.morphology
### Module imports ###
import sys
sys.path.append('../../')
from common.utility import *
class BgDetector:
"""
Class implementation for detecting fish keypoints.
Utilizes an extracted background image (From ExtractBackground.py)
Image is thresholded using either Entropy split (front) or Intermodal split (Top)
"""
def __init__(self, camId, dataPath):
"""
Initialize object
Input:
camId: Camera view of the video to be analysed. 1 = Top, 2 = Front
dataPath: Path to the video files
"""
self.timer = False
self.camId = camId
self.onlyHeads = (camId == 1)
self.loadSettings(dataPath)
# Load static background and downsample it
bgPath = os.path.join(dataPath, 'background_cam{0}.png'.format(self.camId))
bg = cv2.imread(bgPath)
self.bg = bg[::self.downsample,::self.downsample]
# Frame at different stages
self.frame = None # Original frame
self.dif = None # After background subtraction
self.blur = None # After applying blur
self.thresh = None # After thresholding (i.e. binary)
self.thin = None # After skeletonization
def loadSettings(self, path):
"""
Load settings from config file in the provided path.
Config file includes information on the following, which is set in the object:
downsample_factor: How much the images should be downsampled during processing
blur_size: Size of the the median blur filter
min_blob_size: MInimum size of the detected blobs
Input:
path: String path to the folder where the settings.ini file is located
"""
config = readConfig(path)
c = config['Detector']
self.n_fish = c.getint('n_fish')
self.detectorType = c.get('cam{}_type'.format(self.camId))
self.downsample = c.getint('downsample_factor') # How much to downsample the image by
self.blurSize = c.getint('blur_size') # Size of median blur
self.minBlobSize = c.getint('min_blob_size') # used to filter BLOBs in the "blob" function
self.minPatchArea = c.getint("min_patch_area") # used to filter BLOBs in calceig
self.minSkeletonSize = c.getint("min_skeleton_length") # minimum length between two keypoint in the skeleton (cam1), for the distance to be considered when finding best keypoint
self.winSize = c.getint("window_size") # Size of window around keypoint in calcEig
self.nms_thresh = c.getfloat("nms_threshold") # Threshold for how large an overlap there can be before applying NMS
if self.camId == 1:
self.max_frame = c.getint("cam1_maxframe")
self.min_frame = c.getint("cam1_minframe")
else:
self.max_frame = c.getint("cam2_maxframe")
self.min_frame = c.getint("cam2_minframe")
tl, br = getROI(path, self.camId)
self.tl = tl // self.downsample
self.br = br // self.downsample
print(self.tl, self.br)
def detect(self, frame, bboxes):
"""
Performs the detection step
Input:
frame: The current frame
camId: Which camera view the fram eis from (1 = Top, 2 = Front)
Output:
filtered: The detected keypoints after filtering. List of cv2.KeyPoints
bbs: The rotated bounding boxes of the filtered keypoints. List of dicts, with the following keys, containing floats:
tl_x: Top left x coordinate of rotated bounding box
tl_y: Top left y coordinate of rotated bounding box
c_x: x center coordiante of origianl bounding box
c_y: y center coordiante of original bounding box
w: Width ofthe rotated bounding box
h: Height of the rotated bounding box
theta: The angle of the rotated bounding box
"""
## Downsample video
_start = time.time()
self.frame = frame[::self.downsample,::self.downsample]
_end = time.time()
if(self.timer):
print("Downsample time: {0}".format(_end-_start))
## Subtract background
self.diff = self.bgSubtract(self.frame)
## Blur image
_start = time.time()
self.blur = cv2.medianBlur(self.diff,self.blurSize)
_end = time.time()
if(self.timer):
print("Blur time: {0}".format(_end-_start))
## Threshold image. Method is dependent on camera view
if(self.camId == 1):
# Threshold image using intermodes algorithm
th = self.intermodesSplit(self.blur)
self.thresh = self.blur > th
elif(self.camId == 2):
# Threshold image using max entropy
th = self.entropySplit(self.blur)
self.thresh = self.blur > th
# Remove everything outside of the ROI
self.thresh = self.applyROIMat(self.thresh)
self.bboxes = applyROIBBs(bboxes, self.tl, self.br)
# Find keypoints and boundingbox of the objects based on the detector method
if(self.detectorType == 'blob'):
filtered, bbs = self.blob()
elif(self.detectorType == 'skeleton'):
filtered, bbs = self.skeleton()
else:
print("Error. Unknown detector type. Check settings.ini to see whether detector type is [blob] or [skeleton].")
sys.exit()
return filtered, bbs
def applyROIMat(self, mat):
"""
Sets everything outside of the ROI to 0
Input:
Mat: Input image
Output:
mat: ROI output image
"""
if mat.ndim == 2:
mat[:,:self.tl[0]] = 0
mat[:,self.br[0]+1:] = 0
mat[:self.tl[1]] = 0
mat[self.br[1]+1:] = 0
elif mat.ndim == 3:
mat[:,:self.tl[0],:] = 0
mat[:,self.br[0]+1:,:] = 0
mat[:self.tl[1],:] = 0
mat[self.br[1]+1:,:] = 0
return mat
def blob(self):
"""
Detection method that finds the BLOBs consisting of the most pixels
Input:
Output:
filtered: The detected keypoints after filtering. List of cv2.KeyPoints
bbs: The rotated bounding boxes of the filtered keypoints. List of dicts, with the following keys, containing floats:
tl_x: Top left x coordinate of rotated bounding box
tl_y: Top left y coordinate of rotated bounding box
c_x: x center coordiante of origianl bounding box
c_y: y center coordiante of original bounding box
w: Width ofthe rotated bounding box
h: Height of the rotated bounding box
theta: The angle of the rotated bounding box
"""
## Find BLOBs
img = self.thresh.astype(np.uint8)*255
ret, self.labels = cv2.connectedComponents(img)
## Sort BLOBs based on their pixel count. Assuming the background (label = 0) is the largest
unq_labels, counts = np.unique(self.labels, return_counts = True)
unq_labels = unq_labels[1:]
counts = counts[1:]
sorted_indecies = np.argsort(counts)[::-1]
unq_labels = unq_labels[sorted_indecies]
counts = counts[sorted_indecies]
counts = counts[counts > self.minBlobSize] # Ignore tiny BLOBs
# Find the largest BLOBs
numBlobs = self.n_fish * 2
if len(counts) < numBlobs:
numBlobs = len(counts)
unq_labels = unq_labels[:numBlobs]
## Find rotated bounding boxes of the detected keypoints
bbs = self.findRotatedBB(unq_labels)
## Keypoints are determined by the center-point
filtered = []
for b in bbs:
filtered.append(cv2.KeyPoint(x=b["c_x"],y=b["c_y"], _size = 1))
return filtered, bbs
def skeleton(self):
"""
Detection method that find keypoints in the skeleton of the BLOBs
Input:
Output:
filtered: The detected keypoints after filtering. List of cv2.KeyPoints
bbs: The rotated bounding boxes of the filtered keypoints. List of dicts, with the following keys, containing floats:
tl_x: Top left x coordinate of rotated bounding box
tl_y: Top left y coordinate of rotated bounding box
c_x: x center coordiante of origianl bounding box
c_y: y center coordiante of original bounding box
w: Width ofthe rotated bounding box
h: Height of the rotated bounding box
theta: The angle of the rotated bounding box
"""
## Fill holdes in the thresholded BLOBs
self.thresh = self.fillHoles(self.thresh)
## Extract skeletons of BLOBs
self.thin = skimage.morphology.skeletonize(self.thresh)
## Detect potential keypoints
detections = self.interestPoints(findJunctions=True)
filtered = []
for label in detections:
kps = detections[label]
kps.sort(key=lambda x: x[0].size)
# Remove small detections
kps = [x for x in kps if x[0].size > 1]
# Find the largest of the two keypoints placed furthest from each other
bestkp = self.filterKeypoints(kps)
# Remove the smallest half of the keypoints (in order to remove tail-points etc)
if(self.onlyHeads and len(kps) > 1):
numPts = len(kps)//2
kps = kps[-numPts:]
# If the bestkp has been removed, add it again (largest of the two keypoints placed furthest from each other)
if bestkp and (not bestkp[0] in kps):
kps.extend(bestkp)
#kps.sort(key=lambda x: x.size)
#filtered += [kps[-1]]
filtered += kps
## Find rotated bounding boxes of the detected keypoints
bbs = self.findRotatedBB(filtered)
filtered = [x[0] for x in filtered]
return filtered, bbs
def findRotation(self, img):
"""
Calculates the rotation of the foreground pixels in the binary image
Input:
img: Binary input image
Output:
theta : The orientation in degrees from [-pi/2 : pi/2]
"""
_, cov = self.estimateGaussian(img)
## Get the eigenvalues/vectors and sort them by descending eigenvalues
U, S, _ = np.linalg.svd(cov)
x_v1, y_v1 = U[:,0]
theta = np.arctan((y_v1)/(x_v1)) # arctan vs arctan2. Use arctan2 to handled x_v1 = 0?
return np.rad2deg(theta)
def closestPos(self, img, target):
"""
Finds the closest non-zero pixel position in the image to the given target position
Input:
img: Input image
target: Tuple with the x, y coordinates of the target position
Output:
pos: The position of the closest non-zero pixel
dist: The distance between the target and pos pixels
"""
y, x = np.nonzero(img)
distances = np.sqrt((x-target[0])**2 + (y-target[1])**2)
nearest_index = np.argmin(distances)
dist = np.min(distances)
pos = (x[nearest_index], y[nearest_index])
return pos, dist
def getBB(self, img):
"""
Computes the Axis-aligned bounding box for the provide image.
It is assumed that the input image is a binary image with a single BLOB in it
Input:
img: Input binary image
Output:
tl: Tuple containing the coordinates of the top left point of the BB
br: Tuple containing the coordinates of the bottom right point of the BB
center: Tuple containing the coordinates of the center of the BB
"""
y, x = np.nonzero(img)
if len(y) == 0:
return (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1)
tl = (np.min(x), np.min(y))
br = (np.max(x), np.max(y))
center = (np.mean(x), np.mean(y))
if br[0] - tl[0] > br[1] - tl[1]: #If width > height
left = (np.min(x), np.mean(y[x == np.min(x)])) # Middle of the LEFT edge of the BLOB
right = (np.max(x), np.mean(y[x == np.max(x)])) # Middle of the RIGHT edge of the BLOB
else:
left = (np.mean(x[y == np.min(y)]), np.min(y)) # Middle of the TOP edge of the BLOB
right = (np.mean(x[y == np.max(y)]), np.max(y)) # Middle of the BOTTOM edge of the BLOB
return tl, br, center, left, right
def estimateGaussian(self, img):
"""
Computes the mean and covariance of a gaussian approximated to the foreground pixels in the provided image
Input:
img: Input binary image
Output:
params: tuple containing the mean and covariance of a unimodal multivariate gaussian
"""
y, x =
|
np.nonzero(img)
|
numpy.nonzero
|
import helpers
import numpy as np
import pytest
import toughio
write_read = lambda x, **kwargs: helpers.write_read(
"INFILE", x, toughio.write_input, toughio.read_input, **kwargs
)
write_read_tough = lambda x: write_read(
x, writer_kws={"file_format": "tough"}, reader_kws={"file_format": "tough"},
)
write_read_json = lambda x: write_read(
x, writer_kws={"file_format": "json"}, reader_kws={"file_format": "json"},
)
@pytest.mark.parametrize(
"write_read, single",
[
(write_read_tough, True),
(write_read_tough, False),
(write_read_json, True),
(write_read_json, False),
],
)
def test_title(write_read, single):
parameters_ref = {
"title": (
helpers.random_string(80)
if single
else [helpers.random_string(80) for _ in range(np.random.randint(5) + 2)]
),
}
parameters = write_read(parameters_ref)
assert parameters_ref["title"] == parameters["title"]
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_rocks(write_read):
keys = [
"density",
"porosity",
"permeability",
"conductivity",
"specific_heat",
"compressibility",
"expansivity",
"conductivity_dry",
"tortuosity",
"klinkenberg_parameter",
"distribution_coefficient_3",
"distribution_coefficient_4",
]
parameters_ref = {
"rocks": {
helpers.random_string(5): {key: np.random.rand() for key in keys[:5]},
helpers.random_string(5): {
key: np.random.rand() if key != "permeability" else np.random.rand(3)
for key in keys[:5]
},
helpers.random_string(5): {key: np.random.rand() for key in keys},
helpers.random_string(5): {key: np.random.rand() for key in keys},
helpers.random_string(5): {key: np.random.rand() for key in keys},
helpers.random_string(5): {key: np.random.rand() for key in keys},
}
}
names = list(parameters_ref["rocks"].keys())
parameters_ref["rocks"][names[-1]].update(
{
"relative_permeability": {
"id": np.random.randint(10),
"parameters": np.random.rand(np.random.randint(7) + 1),
},
}
)
parameters_ref["rocks"][names[-2]].update(
{
"capillarity": {
"id": np.random.randint(10),
"parameters": np.random.rand(np.random.randint(7) + 1),
},
}
)
parameters_ref["rocks"][names[-3]].update(
{
"relative_permeability": {
"id": np.random.randint(10),
"parameters": np.random.rand(np.random.randint(7) + 1),
},
"capillarity": {
"id": np.random.randint(10),
"parameters": np.random.rand(np.random.randint(7) + 1),
},
}
)
parameters = write_read(parameters_ref)
assert sorted(parameters_ref["rocks"].keys()) == sorted(parameters["rocks"].keys())
for k, v in parameters_ref["rocks"].items():
for kk, vv in v.items():
if not isinstance(vv, dict):
assert np.allclose(vv, parameters["rocks"][k][kk], atol=1.0e-4)
else:
helpers.allclose_dict(vv, parameters["rocks"][k][kk], atol=1.0e-4)
@pytest.mark.parametrize(
"write_read, rpcap",
[
(write_read_tough, "rp"),
(write_read_tough, "cap"),
(write_read_tough, "both"),
(write_read_json, "rp"),
(write_read_json, "cap"),
(write_read_json, "both"),
],
)
def test_rpcap(write_read, rpcap):
parameters_ref = {"default": {}}
if rpcap in {"rp", "both"}:
parameters_ref["default"]["relative_permeability"] = {
"id": np.random.randint(10),
"parameters": np.random.rand(np.random.randint(7) + 1),
}
if rpcap in {"cap", "both"}:
parameters_ref["default"]["capillarity"] = {
"id": np.random.randint(10),
"parameters": np.random.rand(np.random.randint(7) + 1),
}
parameters = write_read(parameters_ref)
for k, v in parameters_ref["default"].items():
helpers.allclose_dict(v, parameters["default"][k], atol=1.0e-4)
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_flac(write_read):
parameters_ref = {
"flac": {
"creep": bool(np.random.randint(2)),
"porosity_model": np.random.randint(10),
"version": np.random.randint(10),
},
"rocks": {
helpers.random_string(5): {
"permeability_model": {
"id": np.random.randint(10),
"parameters": np.random.rand(np.random.randint(7) + 1),
},
"equivalent_pore_pressure": {
"id": np.random.randint(10),
"parameters": np.random.rand(np.random.randint(7) + 1),
},
}
for _ in np.random.rand(10) + 1
},
}
parameters = write_read(parameters_ref)
helpers.allclose_dict(parameters_ref["flac"], parameters["flac"])
for k, v in parameters_ref["rocks"].items():
for kk, vv in v.items():
helpers.allclose_dict(vv, parameters["rocks"][k][kk], atol=1.0e-4)
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_chemp(write_read):
parameters_ref = {
"chemical_properties": {
helpers.random_string(20): {
"temperature_crit": np.random.rand(),
"pressure_crit": np.random.rand(),
"compressibility_crit": np.random.rand(),
"pitzer_factor": np.random.rand(),
"dipole_moment": np.random.rand(),
"boiling_point": np.random.rand(),
"vapor_pressure_a": np.random.rand(),
"vapor_pressure_b": np.random.rand(),
"vapor_pressure_c": np.random.rand(),
"vapor_pressure_d": np.random.rand(),
"molecular_weight": np.random.rand(),
"heat_capacity_a": np.random.rand(),
"heat_capacity_b": np.random.rand(),
"heat_capacity_c": np.random.rand(),
"heat_capacity_d": np.random.rand(),
"napl_density_ref": np.random.rand(),
"napl_temperature_ref": np.random.rand(),
"gas_diffusivity_ref": np.random.rand(),
"gas_temperature_ref": np.random.rand(),
"exponent": np.random.rand(),
"napl_viscosity_a": np.random.rand(),
"napl_viscosity_b": np.random.rand(),
"napl_viscosity_c": np.random.rand(),
"napl_viscosity_d": np.random.rand(),
"volume_crit": np.random.rand(),
"solubility_a": np.random.rand(),
"solubility_b": np.random.rand(),
"solubility_c": np.random.rand(),
"solubility_d": np.random.rand(),
"oc_coeff": np.random.rand(),
"oc_fraction": np.random.rand(),
"oc_decay": np.random.rand(),
}
for _ in np.random.rand(10) + 1
}
}
parameters = write_read(parameters_ref)
assert len(parameters["chemical_properties"]) == len(
parameters_ref["chemical_properties"]
)
for k, v in parameters_ref["chemical_properties"].items():
for kk, vv in v.items():
assert np.allclose(
vv, parameters["chemical_properties"][k][kk], atol=1.0e-4
)
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_ncgas(write_read):
parameters_ref = {
"non_condensible_gas": [
helpers.random_string(10) for _ in np.random.rand(10) + 1
]
}
parameters = write_read(parameters_ref)
assert len(parameters["non_condensible_gas"]) == len(
parameters_ref["non_condensible_gas"]
)
for v1, v2 in zip(
parameters["non_condensible_gas"], parameters_ref["non_condensible_gas"]
):
assert v1 == v2
@pytest.mark.parametrize(
"write_read, isothermal", [(write_read_tough, True), (write_read_tough, False)],
)
def test_multi(write_read, isothermal):
import random
from toughio._io.input.tough._common import eos
parameters_ref = {
"eos": random.choice(
[k for k in eos.keys() if k not in {"eos7", "eos8", "eos9", "tmvoc"}]
),
"isothermal": isothermal,
}
parameters = write_read(parameters_ref)
multi = [
parameters["n_component"],
parameters["n_component"] + 1,
parameters["n_phase"],
6,
]
multi_ref = eos[parameters_ref["eos"]]
assert multi_ref == multi
assert parameters_ref["isothermal"] == parameters["isothermal"]
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_solvr(write_read):
parameters_ref = {
"solver": {
"method": np.random.randint(10),
"z_precond": helpers.random_string(2),
"o_precond": helpers.random_string(2),
"rel_iter_max": np.random.rand(),
"eps": np.random.rand(),
},
}
parameters = write_read(parameters_ref)
assert parameters_ref["solver"]["method"] == parameters["solver"]["method"]
assert parameters_ref["solver"]["z_precond"] == parameters["solver"]["z_precond"]
assert parameters_ref["solver"]["o_precond"] == parameters["solver"]["o_precond"]
assert np.allclose(
parameters_ref["solver"]["rel_iter_max"],
parameters["solver"]["rel_iter_max"],
atol=1.0e-5,
)
assert np.allclose(
parameters_ref["solver"]["eps"], parameters["solver"]["eps"], atol=1.0e-5
)
@pytest.mark.parametrize(
"write_read, t_steps, num_pvars",
[
(write_read_tough, np.random.rand(), 4),
(write_read_tough, np.random.rand(np.random.randint(100) + 1), 4),
(write_read_tough, np.random.rand(np.random.randint(100) + 1), 6),
(write_read_json, np.random.rand(), 4),
(write_read_json, np.random.rand(np.random.randint(100) + 1), 4),
(write_read_json, np.random.rand(np.random.randint(100) + 1), 6),
],
)
def test_param(write_read, t_steps, num_pvars):
parameters_ref = {
"options": {
"n_iteration": np.random.randint(10),
"n_cycle": np.random.randint(10),
"n_second": np.random.randint(10),
"n_cycle_print": np.random.randint(10),
"verbosity": np.random.randint(10),
"temperature_dependence_gas": np.random.rand(),
"effective_strength_vapor": np.random.rand(),
"t_ini": np.random.rand(),
"t_max": np.random.rand(),
"t_steps": t_steps,
"t_step_max": np.random.rand(),
"t_reduce_factor": np.random.rand(),
"gravity": np.random.rand(),
"mesh_scale_factor": np.random.rand(),
"eps1": np.random.rand(),
"eps2": np.random.rand(),
"w_upstream": np.random.rand(),
"w_newton": np.random.rand(),
"derivative_factor": np.random.rand(),
},
"extra_options": {
k + 1: v for k, v in enumerate(
|
np.random.randint(10, size=24)
|
numpy.random.randint
|
import liionpack as lp
import numpy as np
import matplotlib.pyplot as plt
import unittest
class netlist_utilsTest(unittest.TestCase):
def test_read_netlist(self):
net1 = lp.read_netlist("4p1s", I=50.0)
net2 = lp.read_netlist("4p1s.txt", I=50.0)
net3 = lp.read_netlist("4p1s.cir", I=50.0)
I_map = net1["desc"].str.find("I") > -1
assert np.all(net1[I_map]["value"] == 50.0)
assert np.all(net2[I_map]["value"] == 50.0)
assert np.all(net3[I_map]["value"] == 50.0)
def test_netlist_exception(self):
def bad_filename():
_ = lp.read_netlist("4p1s.bad", I=50.0)
with self.assertRaises(FileNotFoundError):
bad_filename()
def test_setup_circuit(self):
netlist = lp.setup_circuit(Np=1, Ns=2, Rb=1e-4, Rc=1e-2, Ri=1e-3, V=2.0, I=10.0)
V_map = netlist["desc"].str.find("V") > -1
assert np.all(netlist[V_map]["value"] == 2)
def test_setup_circuit_plot(self):
netlist = lp.setup_circuit(
Np=1, Ns=2, Rb=1e-4, Rc=1e-2, Ri=1e-3, V=2.0, I=10.0, plot=True
)
V_map = netlist["desc"].str.find("V") > -1
assert np.all(netlist[V_map]["value"] == 2)
plt.close("all")
def test_solve_circuit(self):
netlist = lp.setup_circuit(Np=1, Ns=2, Rb=1e-4, Rc=1e-2, Ri=1e-3, V=2.0, I=1.0)
V_node, I_batt = lp.solve_circuit(netlist)
assert np.all(I_batt) == 1.0
def test_solve_circuit_vectorized(self):
netlist = lp.setup_circuit(
Np=1, Ns=100, Rb=1e-4, Rc=1e-2, Ri=1e-3, V=2.0, I=1.0
)
V_node, I_batt = lp.solve_circuit(netlist)
V_node_v, I_batt_v = lp.solve_circuit_vectorized(netlist)
assert
|
np.allclose(V_node, V_node_v)
|
numpy.allclose
|
# Description: Calculate yearly averages from monthly files.
#
# Author: <NAME>
# E-mail: <EMAIL>
# Date: January/2018
import numpy as np
import matplotlib
from glob import glob
from os import system
from datetime import datetime
from netCDF4 import Dataset, num2date
from pandas import Timestamp
from gsw import SA_from_SP, CT_from_pt
from gsw import alpha as falpha
from gsw import beta as fbeta
import xarray as xr
from ap_tools.utils import lon360to180, rot_vec
from reproducibility import savez
def deg2m_dist(lon, lat):
"""
USAGE
-----
dx, dy = deg2m_dist(lon, lat)
"""
lon, lat = map(np.array, (lon, lat))
dlat, _ = np.gradient(lat) # [deg]
_, dlon = np.gradient(lon) # [deg]
deg2m = 111120.0 # [m/deg]
# Account for divergence of meridians in zonal distance.
dx = dlon*deg2m*np.cos(lat*np.pi/180.) # [m]
dy = dlat*deg2m # [m]
return dx, dy
def ang_isob(xiso, yiso):
xiso, yiso = map(np.array, (xiso, yiso))
R = 6371000.0 # Mean radius of the earth in meters (6371 km), from gsw.constants.earth_radius.
deg2rad = np.pi/180. # [rad/deg]
# From the coordinates of the isobath, find the angle it forms with the
# zonal axis, using points k+1 and k.
shth = yiso.size-1
theta = np.zeros(shth)
for k in range(shth):
dyk = R*(yiso[k+1] - yiso[k])
dxk = R*(xiso[k+1] - xiso[k])*np.cos(yiso[k]*deg2rad)
theta[k] = np.arctan2(dyk, dxk)
xisom = 0.5*(xiso[1:] + xiso[:-1])
yisom = 0.5*(yiso[1:] + yiso[:-1])
return xisom, yisom, theta/deg2rad
def near(x, x0, npts=1, return_index=False):
x = list(x)
xnear = []
xidxs = []
for n in range(npts):
idx = np.nanargmin(np.abs(np.array(x)-x0))
xnear.append(x.pop(idx))
if return_index:
xidxs.append(idx)
if return_index: # Sort indices according to the proximity of wanted points.
xidxs = [xidxs[i] for i in np.argsort(xnear).tolist()]
xnear.sort()
if npts==1:
xnear = xnear[0]
if return_index:
xidxs = xidxs[0]
else:
xnear = np.array(xnear)
if return_index:
return xidxs
else:
return xnear
def stripmsk(arr, mask_invalid=True):
if mask_invalid:
arr = np.ma.masked_invalid(arr)
if np.ma.isMA(arr):
msk = arr.mask
arr = arr.data
arr[msk] = np.nan
return arr
##---
CALC_MULTIYEARLY_TSDUVKE = False
NYR_avg = 10 # Average T, S, u, v every 10 years.
#
CALC_UxVaISOB = False
CALC_U_zavg = False
zslabavg_top, zslabavg_bot = 0, 150
CALC_SSH = False
CALC_PT = False
#
# Also plot seasonal cycle for these.
#
CALC_KE = False
CALC_GRADRHO = False
CALC_Jb = True
CALC_Jb_shelf_integral_timeseries = False
CALC_Tauxy = False
#
CALC_PT_zavg = False
CALC_AICE = False
z_PT = 1000 # [m].
CALC_CLIM_DUVKE = False
# Start and end years.
START_YEAR = 1959
END_YEAR = 2009
fname_out_aice = 'aice.npz'
fname_out_eke = 'EKE_MKE.npz'
fname_out_drhomag = 'gradRHO.npz'
fname_out_Jb = 'Jb.npz'
fname_out_Jb_shelf_integral_timeseries = 'Jb_int.npz'
fname_out_Tauxy = 'tauxy.npz'
fname_out_ssh = 'yearly_SSH.npz'
fname_out_u = 'yearly_U.npz'
fname_out_uvxisob = 'yearly_UVxisob.npz'
fname_out_PT = 'yearly_PT.npz'
fname_out_tsduvke = 'decadal_TSD-UV-KE.npz'
fname_out_duvke_clim = 'clim_%d-%d_D-UV-KE.npz'%(START_YEAR, END_YEAR)
fname_dzu = 'POP-dzu_dzt_kzit_subsetSO.nc'
cm2m = 1e-2
fcap = 501
thresh = 1e10
fdir_tail = '/ocn/hist/ia_top_tx0.1_v2_yel_patc_1948_intel.pop.h.????-??.nc'
head_fin = '/lustre/atlas1/cli115/proj-shared/ia_top_tx0.1_v2_60yrs/'
fdirs = glob(head_fin+'ia_top_tx0.1_v2_yel_patc_1948_intel_def_year_????')
fdirs.sort()
if not isinstance(fdirs, list):
fdirs = [fdirs]
fnames = []
for fdir in fdirs:
ystr = int(fdir[-4:])
if np.logical_or(ystr<START_YEAR, ystr>END_YEAR):
continue
fnamesi = glob(fdir + fdir_tail)
fnamesi.sort()
for f in fnamesi:
fnames.append(f)
nc = Dataset(fnames[0])
lont = nc.variables['TLONG'][:fcap,:]
latt = nc.variables['TLAT'][:fcap,:]
lonu = nc.variables['ULONG'][:fcap,:]
latu = nc.variables['ULAT'][:fcap,:]
kmt = nc.variables['KMT'][:fcap,:] - 1 # Convert fortran to python index.
ny, nx = kmt.shape
z = nc.variables['z_t'][:]*cm2m # [m].
t = []
tmo = []
fname_isobs = 'isobaths.nc'
ncx = Dataset(fname_isobs)
dmsm = ncx["1000 m isobath"]['diso'][:]
xmsm = ncx["1000 m isobath"]['xiso'][:]
ymsm = ncx["1000 m isobath"]['yiso'][:]
xm = ncx["1000 m isobath (U-points)"]['xiso'][:]
ym = ncx["1000 m isobath (U-points)"]['yiso'][:]
dm = ncx["1000 m isobath (U-points)"]['diso'][:]
Im = ncx["1000 m isobath (U-points)"]['i'][:]
Jm = ncx["1000 m isobath (U-points)"]['j'][:]
uxmsk = ncx['1000 m isobath (x-isobath U, V masks)']['Umsk'][:]
vxmsk = ncx['1000 m isobath (x-isobath U, V masks)']['Vmsk'][:]
dmm = 0.5*(dm[1:] + dm[:-1])
xmm, ymm, angm = ang_isob(xm, ym) # Angle of the U-points isobath.
##----
if CALC_AICE:
iceconc_thresh = 0.15 # Ice concentration threshold.
fnames = [fnamen.replace('ocn','ice') for fnamen in fnames]
fnames = [fnamen.replace('.pop.h.','.cice.h.') for fnamen in fnames]
AICE = np.array([])
nfirst = True
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
if nfirst:
tarea = nci['tarea'][:].data*1e-6 # [km2]
lon = lon360to180(nci['TLON'][:].data)
lat = nci['TLAT'][:].data
tmask = nci['tmask'][:]
nfirst = False
Aice = nci.variables['aice'][0,:fcap,:]/100. # Convert to fractional sea ice concentration (0-1).
# Calculate total ice area for valid ice cells.
# iarea=aice(aice>=dc & aice<=1.0 & aice~=0).*tarea(aice>=dc & aice<=1.0 & aice~=0).*1e-6;
fice=np.logical_and(Aice>=iceconc_thresh, Aice<=1.0)
aice = np.sum(Aice[fice]*tarea[fice])
t.append(yeari)
AICE = np.append(AICE, aice)
t = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in t])
savez(fname_out_aice, icearea=AICE, lon=lont, lat=latt, tarea=tarea, t=t)
##---
if CALC_UxVaISOB:
dzui = Dataset(fname_dzu).variables['dzu'][:]
dzui = dzui[:,Im,Jm]*cm2m
Uxyr, Ux, ux = None, None, None
Vayr, Va, va = None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
# Zonal/meridional vel. components.
uu = nci.variables['UVEL'][0,:,:fcap,:]
vv = nci.variables['VVEL'][0,:,:fcap,:]
ui = uu[:,Im,Jm]*cm2m
vi = vv[:,Im,Jm]*cm2m
if fnamen==fnames[0]:
hmsk = ~ui.mask
hi = np.array([dzui[hmsk[:,n],n].sum(axis=0) for n in range(Im.size)])
Ui = np.sum(ui*dzui, axis=0) # [m2/s], zonal transport per unit along-isobath length.
Vi = np.sum(vi*dzui, axis=0) # [m2/s], meridional transport per unit along-isobath length.
uui = Ui/hi # [m/s], depth-averaged zonal vel.
vvi = Vi/hi # [m/s], depth-averaged meridional vel.
uui = 0.5*(uui[1:] + uui[:-1])
vvi = 0.5*(vvi[1:] + vvi[:-1])
Ui = 0.5*(Ui[1:] + Ui[:-1])
Vi = 0.5*(Vi[1:] + Vi[:-1])
# Rotate depth-averaged velocities using angles based on realistic isobaths.
va, ux = rot_vec(uui, vvi, angle=angm, degrees=True) # ATTENTION: v_along, u_across = rot(u_east, v_north)***
ux = -ux # Positive ONSHORE.
Vva, Uux = rot_vec(Ui, Vi, angle=angm, degrees=True)
Uux = -Uux
ux = ux[np.newaxis,...]
va = va[np.newaxis,...]
Uux = Uux[np.newaxis,...]
Vva = Vva[np.newaxis,...]
if Ux is not None:
Ux = np.vstack((Ux, ux))
Va = np.vstack((Va, va))
UUx = np.vstack((UUx, Uux))
VVa = np.vstack((VVa, Vva))
else:
Ux = ux
Va = va
UUx = Uux
VVa = Vva
nmo+=1
tmo.append(yeari)
if nmo==12:
Ux = Ux.mean(axis=0)[np.newaxis,...]
Va = Va.mean(axis=0)[np.newaxis,...]
UUx = UUx.mean(axis=0)[np.newaxis,...]
VVa = VVa.mean(axis=0)[np.newaxis,...]
if Uxyr is not None:
Uxyr = np.vstack((Uxyr, Ux))
Vayr = np.vstack((Vayr, Va))
UUxyr = np.vstack((UUxyr, UUx))
VVayr = np.vstack((VVayr, VVa))
else:
Uxyr = Ux.copy()
Vayr = Va.copy()
UUxyr = UUx.copy()
VVayr = VVa.copy()
t.append(yeari2)
Ux, UUx = None, None
Va, VVa = None, None
nmo=0
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
Uxyr, Vayr = Uxyr.data, Vayr.data
Uxyr[Uxyr>thresh] = np.nan
Vayr[Vayr>thresh] = np.nan
UUxyr, VVayr = UUxyr.data, VVayr.data
UUxyr[UUxyr>thresh] = np.nan
VVayr[VVayr>thresh] = np.nan
# Uxyr, Vayr = Uxyr*cm2m, Vayr*cm2m # [m/s].
savez(fname_out_uvxisob, ux=Uxyr, va=Vayr, Ux=UUxyr, Va=VVayr, lonu=xm, latu=ym, dm=dmm, xm=xmm, ym=ymm, angm=angm, Im=Im, Jm=Jm, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym)
##----
if CALC_U_zavg:
fzu = np.logical_and(z>=zslabavg_top, z<=zslabavg_bot)
dzu0 = Dataset(fname_dzu).variables['dzu'][fzu,...]*cm2m # [m].
h0 = dzu0.sum(axis=0) # [m].
Uyr, U, u = None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
u = nci.variables['UVEL'][0,fzu,:fcap,:]
u = np.sum(u*dzu0, axis=0)/h0
u = u[np.newaxis,...]*cm2m # [m/s].
if U is not None:
U = np.vstack((U, u))
else:
U = u
nmo+=1
tmo.append(yeari)
if nmo==12:
if Umo is not None:
Umo = np.vstack((Umo, U[:, Im, Jm]))
else:
Umo = U.copy()
U = U.mean(axis=0)[np.newaxis,...]
if Uyr is not None:
Uyr = np.vstack((Uyr, U))
else:
Uyr = U.copy()
t.append(int(yeari2))
U = None
nmo=0
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
Uyr = Uyr.data
Uyr[Uyr>thresh] = np.nan
Uyr[Uyr==0.] = np.nan
savez(fname_out_u, umonthly=Umo, u=Uyr, lon=lonu, lat=latu, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym, ztop=zslabavg_top, zbot=zslabavg_bot)
##---
if CALC_Tauxy: # Yearly wind stress.
Tauxyr, Tauxmo, Taux, taux = None, None, None, None
Tauyyr, Tauymo, Tauy, tauy = None, None, None, None
skel = np.zeros((ny, nx))
Tauxclm, Tauyclm = dict(), dict()
_ = [Tauxclm.update({mo:skel}) for mo in range(1, 13)]
_ = [Tauyclm.update({mo:skel}) for mo in range(1, 13)]
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
mo = int(yeari[-2:])
_ = system('echo "%s" > t_processing.txt'%yeari)
nci = Dataset(fnamen)
taux = nci.variables['TAUX'][0,:fcap,:]
tauy = nci.variables['TAUY'][0,:fcap,:]
taux, tauy = taux[np.newaxis,...], tauy[np.newaxis,...]
if Taux is not None:
Taux = np.vstack((Taux, taux))
Tauy = np.vstack((Tauy, tauy))
else:
Taux = taux.copy()
Tauy = tauy.copy()
nmo+=1
tmo.append(yeari)
# Update monthly climatological fields.
Tauxclm.update({nmo:Tauxclm[nmo] + taux})
Tauyclm.update({nmo:Tauyclm[nmo] + tauy})
if nmo==12:
if Tauxmo is not None:
Tauxmo = np.vstack((Tauxmo, Taux[:, Im, Jm]))
Tauymo = np.vstack((Tauymo, Tauy[:, Im, Jm]))
else:
Tauxmo, Tauymo = Taux[:, Im,Jm], Tauy[:, Im,Jm]
Taux = Taux.mean(axis=0)[np.newaxis,...]
Tauy = Tauy.mean(axis=0)[np.newaxis,...]
if Tauxyr is not None:
Tauxyr = np.vstack((Tauxyr, Taux))
Tauyyr = np.vstack((Tauyyr, Tauy))
else:
Tauxyr, Tauyyr = Taux.copy(), Tauy.copy()
t.append(int(yeari2))
Taux = None
Tauy = None
nmo=0
Tauxmom = 0.5*(Tauxmo[:, 1:] + Tauxmo[:, :-1])
Tauymom = 0.5*(Tauymo[:, 1:] + Tauymo[:, :-1])
Tauamo, _ = rot_vec(Tauxmom, Tauymom, angle=angm, degrees=True) # positive CLOCKWISE***
dynecm2toNm2 = 1e-1 # 1e-5*1e4
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
#
# Along-isobath wind stress, positive CLOCKWISE around the isobath.
Tauamo = Tauamo.data
Tauamo[Tauamo>thresh] = np.nan
Tauamo = Tauamo*dynecm2toNm2 # [N/m2].
#
#--- Climatological monthly fields.
nt = len(fnames)/12
for mo in range(1, 13):
auxx = Tauxclm[mo].squeeze()*dynecm2toNm2/nt
auxy = Tauyclm[mo].squeeze()*dynecm2toNm2/nt
Tauxclm.update({mo:auxx})
Tauyclm.update({mo:auxy})
#
Tauxyr, Tauyyr = Tauxyr.data, Tauyyr.data
Tauxmo, Tauymo = Tauxmo.data, Tauymo.data
Tauxyr[Tauxyr>thresh] = np.nan
Tauyyr[Tauyyr>thresh] = np.nan
Tauxyr = Tauxyr*dynecm2toNm2 # [N/m2].
Tauyyr = Tauyyr*dynecm2toNm2 # [N/m2].
Tauxmo[Tauxmo>thresh] = np.nan
Tauymo[Tauymo>thresh] = np.nan
Tauxmo = Tauxmo*dynecm2toNm2 # [N/m2].
Tauymo = Tauymo*dynecm2toNm2 # [N/m2].
savez(fname_out_Tauxy, tauxclm=Tauxclm, tauyclm=Tauyclm, tau_alongmo=Tauamo, tauxmo=Tauxmo, tauymo=Tauymo, taux=Tauxyr, tauy=Tauyyr, lon=lonu, lat=latu, dm=dmm, xm=xmm, ym=ymm, angm=angm, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym)
if CALC_Jb_shelf_integral_timeseries: # Monthly surface buoyancy flux integrated over the shelf.
JbINT = np.array([])
JqINT = np.array([])
JsINT = np.array([])
# Load in 1000 m mask.
finvol = np.bool8(np.load('volmsk1000m.npz')['volmsk'])
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
mo = int(yeari[-2:])
_ = system('echo "%s" > t_processing.txt'%yeari)
nci = Dataset(fnamen)
shf = nci.variables['SHF'][0,:fcap,:] # [W/m2].
if fnamen==fnames[0]:
rho0 = nci.variables['rho_sw'][0]*1e3 # [kg/m3].
rho_fw = nci.variables['rho_fw'][0]*1e3 # [kg/m3].
g = nci.variables['grav'][0]*1e-2 # [m/s2].
Cp = nci.variables['cp_sw'][0]*1e3*1e-7 # [J/kg/degC].
rhoCp = rho0*Cp
#
wetmsk = np.float32(~shf.mask) # Ones in valid (non-continent) cells.
tarea = nci.variables['TAREA'][:fcap,:]*wetmsk*cm2m*cm2m # [m2].
tareain = tarea[finvol] # [m2], zeros on the continent.
Tareain = tareain.sum() # [m2].
JB = shf*0
JQ = JB.copy()
JS = JB.copy()
sfwf = nci.variables['SFWF'][0,:fcap,:]/rho_fw # [(kg of freshwater)/m2/s] / [(kg of freshwater)/m3] = [m/s] = [m3/s/m2]. Volume flux density.
# positive SFWF = Ocean gains freshwater, so this is (P - E).
SSSp = nci.variables['SALT'][0,0,:fcap,:] # [g/kg].
SST = nci.variables['TEMP'][0,0,:fcap,:] # [degC].
SSSA = SA_from_SP(SSSp, 0, lont, latt) # [g/kg].
SSCT = CT_from_pt(SSSA, SST) # [degC].
alpha = falpha(SSSA, SSCT, 0)
beta = fbeta(SSSA, SSCT, 0)
coeffQ = g*alpha/rhoCp
coeffFW = g*beta*SSSA
qb = coeffQ*shf
sb = coeffFW*sfwf # Positive SFWF, ocean gains freshwater, hence buoyancy.
jb = qb + sb # Surface buoyancy flux [W/kg]. Hosegood et al. (2013).
# Accumulate time-averaged 2D fields [W/kg].
JB += jb
JQ += qb
JS += sb
# Integrate over the 1000 m-bounded control surface.
Jbint = np.sum(jb[finvol]*tareain)/Tareain
Jqint = np.sum(qb[finvol]*tareain)/Tareain
Jsint = np.sum(sb[finvol]*tareain)/Tareain
JbINT = np.append(JbINT, Jbint)
JqINT = np.append(JqINT, Jqint)
JsINT = np.append(JsINT, Jsint)
nmo+=1
tmo.append(yeari)
if nmo==12:
nmo=0
nt = len(tmo)
JB /= nt
JQ /= nt
JS /= nt
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
savez(fname_out_Jb_shelf_integral_timeseries, Jb=JbINT, Jq=JqINT, Js=JsINT, t=tmo, Jbxy=JB, Jqxy=JQ, Jsxy=JS, lon=lont, lat=latt)
##---
if CALC_Jb: # Yearly surface buoyancy flux.
thresh = 1e3
Jbyr, Jbmo, Jb, jb = None, None, None, None
finvol = np.bool8(np.load('volmsk1000m.npz')['volmsk'])
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
mo = int(yeari[-2:])
_ = system('echo "%s" > t_processing.txt'%yeari)
nci = Dataset(fnamen)
shf = nci.variables['SHF'][0,:fcap,:] # [W/m2].
if fnamen==fnames[0]:
rho0 = nci.variables['rho_sw'][0]*1e3 # [kg/m3].
rho_fw = nci.variables['rho_fw'][0]*1e3 # [kg/m3].
g = nci.variables['grav'][0]*1e-2 # [m/s2].
Cp = nci.variables['cp_sw'][0]*1e3*1e-7 # [J/kg/degC].
rhoCp = rho0*Cp
#
wetmsk = np.float32(~shf.mask) # Ones in valid (non-continent) cells.
tarea = nci.variables['TAREA'][:fcap,:]*wetmsk*cm2m*cm2m # [m2].
tareain = tarea[finvol] # [m2], zeros on the continent.
Tareain = tareain.sum() # [m2].
sfwf = nci.variables['SFWF'][0,:fcap,:]/rho_fw # [(kg of freshwater)/m2/s] / [(kg of freshwater)/m3] = [m/s] = [m3/s/m2]. Volume flux density.
# positive SFWF = Ocean gains freshwater, so this is (P - E).
SSSp = nci.variables['SALT'][0,0,:fcap,:] # [g/kg].
SST = nci.variables['TEMP'][0,0,:fcap,:] # [degC].
SSSA = SA_from_SP(SSSp, 0, lont, latt) # [g/kg].
SSCT = CT_from_pt(SSSA, SST) # [degC].
alpha = falpha(SSSA, SSCT, 0)
beta = fbeta(SSSA, SSCT, 0)
coeffQ = g*alpha/rhoCp
coeffFW = g*beta*SSSA
qb = coeffQ*shf
sb = coeffFW*sfwf # Positive SFWF, ocean gains freshwater, hence buoyancy.
jb = qb + sb # Surface buoyancy flux [W/kg]. Hosegood et al. (2013).
# Integrate over the 1000 m-bounded control surface.
Jbint = np.sum(jb[finvol]*tareain)/Tareain
jb = jb[np.newaxis,...]
if Jb is not None:
Jb = np.vstack((Jb, jb))
else:
Jb = jb.copy()
nmo+=1
tmo.append(yeari)
if nmo==12:
if Jbmo is not None:
Jbmo = np.vstack((Jbmo, Jb[:, Im, Jm]))
else:
Jbmo = Jb[:, Im, Jm]
Jb = Jb.mean(axis=0)[np.newaxis,...]
if Jbyr is not None:
Jbyr =
|
np.vstack((Jbyr, Jb))
|
numpy.vstack
|
import os
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
# Mean and Std of train set
ECG_MEAN = np.array(
[0.618, 0.974, 0.080, 1.172, 1.415, 1.419,
1.187, 0.954, 0.356, -0.796, 0.131, 0.665]
).reshape((-1, 1))
ECG_STD = np.array(
[24.862, 33.086, 39.441, 62.491, 59.789, 64.328,
58.257, 50.321, 25.534, 26.332, 19.010, 26.810]
).reshape((-1, 1))
class ECGDataset(Dataset):
def __init__(self, dataset, is_train):
super(ECGDataset, self).__init__()
self.dataset = dataset
self.is_train = is_train
return
def __len__(self):
return len(self.dataset)
def __augment(self, ecg):
ecg_tmp = np.copy(ecg)
channels, length = ecg.shape
if np.random.randn() > 0.8:
scale = np.random.normal(loc=1.0, scale=0.1, size=(channels, 1))
scale = np.matmul(scale, np.ones((1, length)))
ecg_tmp = ecg_tmp * scale
if
|
np.random.randn()
|
numpy.random.randn
|
from moabb.datasets import BNCI2014001, Cho2017, PhysionetMI
from moabb.paradigms import MotorImagery
import numpy as np
from numpy.random import RandomState
import pickle
import time
import torch
import os
import pandas as pd
import mne
import scipy.signal as signal
import copy
from scipy.linalg import sqrtm, inv
from collections import defaultdict
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
cuda = torch.cuda.is_available()
print('gpu: ', cuda)
device = 'cuda' if cuda else 'cpu'
seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
rng = RandomState(seed)
def print_info(source_data,dataset_name):
print("current dataset {}".format(dataset_name))
for subject_idx in range(len(source_data)):
print("source_data subject_idx {} has shape : {}, with range scale ({},{}) ".format(
subject_idx, source_data[subject_idx].shape,
np.max(source_data[subject_idx]), np.min(source_data[subject_idx])))
def print_dataset_info(data,dataset_name="train dataset A"):
print(dataset_name)
# for subject_idx in range(len(data)):
print("Train subject has shape : {}, with range scale ({},{}) ".format(
data.shape,
np.max(data), np.min(data)))
class LabelAlignment:
"""
Label Alignment technique
https://arxiv.org/pdf/1912.01166.pdf
"""
def __init__(self,target_dataset):
"""
assume target_data is (trials,channels,samples)
target_label is (trials)
"""
self.target_data,self.target_label = target_dataset
self.target_r_op = self.generate_class_cov(self.target_data,self.target_label,invert=False)
# for k,v in self.target_r_op.items():
# print("target label {} has r_op : {}".format(k,v))
def convert_source_data_with_LA(self, source_data,source_label):
"""
Args:
source_data: (n_subject,(trials,channels,samples))
source_label: (n_subject,(trials))
Returns:
"""
new_source_data = list()
for subject in range(len(source_data)):
subject_data = source_data[subject]
subject_label = source_label[subject]
category_A_m = dict()
new_subject_data = list()
subject_category_r_op = self.generate_class_cov(subject_data,subject_label,invert=True)
for label in sorted(list(subject_category_r_op.keys())):
if label not in list(self.target_r_op.keys()):
print("current label {} is not in target dataset ".format(label))
return
source_r_op = subject_category_r_op[label]
target_r_op = self.target_r_op[label]
A_m = np.matmul(target_r_op, source_r_op)
category_A_m[label] = A_m
for trial in range(len(subject_data)):
trial_data = subject_data[trial]
trial_label = subject_label[trial]
trial_A_m = category_A_m[trial_label]
convert_trial_data = np.matmul(trial_A_m, trial_data)
new_subject_data.append(convert_trial_data)
new_subject_data = np.array(new_subject_data)
new_source_data.append(new_subject_data)
return new_source_data,source_label
def generate_class_cov(self,target_data,target_label,invert=True):
"""
Use the target data to generate an inverse Covariance for each class category.
Args:
target_data: (trials,channels,samples)
target_label: (trials)
Returns:
"""
category_data = defaultdict(list)
category_r_op = dict()
for data,label in zip(target_data,target_label):
# print("current label : ",label)
category_data[label].append(data)
for label,data in category_data.items():
data= np.array(data)
# print("data shape : ",data.shape)
if invert:
# print("calculate inv sqrt cov")
r_op = self.calculate_inv_sqrt_cov(data)
else:
# print("calculate sqrt cov")
r_op = self.calcualte_sqrt_cov(data)
category_r_op[label] = r_op
return category_r_op
def calculate_inv_sqrt_cov(self,data):
assert len(data.shape) == 3
#r = np.matmul(data, data.transpose((0, 2, 1))).mean(0)
#calculate covariance matrix of each trial
r = 0
for trial in data:
cov = np.cov(trial, rowvar=True)
r += cov
r = r/data.shape[0]
# print("origin cov : ", r)
if np.iscomplexobj(r):
print("covariance matrix problem")
if np.iscomplexobj(sqrtm(r)):
print("covariance matrix problem sqrt")
r_op = inv(sqrtm(r))
if np.iscomplexobj(r_op):
print("WARNING! Covariance matrix was not SPD somehow. Can be caused by running ICA-EOG rejection, if "
"not, check data!!")
# print("r op : ",r_op)
r_op = np.real(r_op).astype(np.float32)
elif not np.any(np.isfinite(r_op)):
print("WARNING! Not finite values in R Matrix")
return r_op
def calcualte_sqrt_cov(self,data):
assert len(data.shape) == 3
#r = np.matmul(data, data.transpose((0, 2, 1))).mean(0)
#calculate covariance matrix of each trial
r = 0
for trial in data:
cov = np.cov(trial, rowvar=True)
r += cov
r = r/data.shape[0]
if np.iscomplexobj(r):
print("covariance matrix problem")
if np.iscomplexobj(sqrtm(r)):
print("covariance matrix problem sqrt")
r_op = sqrtm(r)
return r_op
def expand_data_dim(data):
if isinstance(data, list):
for idx in range(len(data)):
if len(data[idx].shape) == 3:
new_data = np.expand_dims(data[idx], axis=1)
else:
new_data = data[idx]
data[idx] = new_data
return data
elif isinstance(data, np.ndarray):
if len(data.shape) == 3:
return np.expand_dims(data, axis=1)
else:
return data
else:
raise ValueError("the data format during the process section is not correct")
def shuffle_data(subject_data,subject_label):
available_index = np.arange(subject_data.shape[0])
shuffle_index = np.random.permutation(available_index)
shuffle_subject_data = subject_data[shuffle_index,]
shuffle_subject_label = subject_label[shuffle_index,]
return [shuffle_subject_data,shuffle_subject_label]
def modify_data(data,time=256):
return data[:, :, :time]
def load_source_sleep(path=None):
if path is None:
path = "C:/Users/wduong/mne_data/SleepSource/SleepSource"
train_data_1 = []
train_label_1 = []
subject_ids = []
for subj in range(39):
with open(os.path.join(path, "training_s{}r1X.npy".format(subj)), 'rb') as f:
x = pickle.load(f)
train_data_1.append(x)
with open(os.path.join(path, "training_s{}r1y.npy".format(subj)), 'rb') as f:
y = pickle.load(f)
train_label_1.append(y)
subject_ids.extend([subj]*len(y))
for subj in range(39):
with open(os.path.join(path, "training_s{}r2X.npy".format(subj)), 'rb') as f:
x = pickle.load(f)
train_data_1.append(x)
with open(os.path.join(path, "training_s{}r2y.npy".format(subj)), 'rb') as f:
y = pickle.load(f)
train_label_1.append(y)
subject_ids.extend([subj+39]*len(y))
# with open(os.path.join(path, "training_s{}r2X.npy".format(subj)), 'rb') as f:
# train_data_2.append(pickle.load(f))
# with open(os.path.join(path, "training_s{}r2y.npy".format(subj)), 'rb') as f:
# train_label_2.append(pickle.load(f))
dataset_meta = pd.DataFrame({"subject":subject_ids,"session":["session_0"]*len(subject_ids),"run":["run_0"]*len(subject_ids)})
train_data = np.concatenate(train_data_1)
train_label = np.concatenate(train_label_1)
return train_data,train_label,dataset_meta
def load_full_target_sleep(path=None,file_format="leaderboard",start_id=0,end_id=6):
###old version
# if path is None:
# path = "C:/Users/wduong/mne_data/MNE-beetlsleepleaderboard-data/sleep_target"
# target_train_data = []
# target_train_label = []
# subject_ids = []
# for subj in range(start_id,end_id):
# subject_data = list()
# subject_label = list()
# for sess in range(1,3):
#
# data_format = file_format+"_s{}r{}X.npy"
# abel_format = file_format+"_s{}r{}y.npy"
# with open(os.path.join(path, data_format.format(subj,sess)), 'rb') as f:
# x = pickle.load(f)
# subject_data.append(x)
# with open(os.path.join(path, abel_format.format(subj,sess)), 'rb') as f:
# y = pickle.load(f)
# subject_label.append(y)
#
# subject_data = np.concatenate(subject_data)
# subject_label = np.concatenate(subject_label)
# target_train_data.append(subject_data)
# target_train_label.append(subject_label)
# subject_ids.extend([subj]*len(subject_label))
#
# dataset_meta = pd.DataFrame({"subject": subject_ids, "session": ["session_0"] * len(subject_ids),
# "run": ["run_0"] * len(subject_ids)})
# target_train_data = np.concatenate(target_train_data)
# target_train_label = np.concatenate(target_train_label)
#
# return target_train_data,target_train_label,dataset_meta
###new version
if path is None:
path = "C:/Users/wduong/mne_data/MNE-beetlsleepleaderboard-data/sleep_target"
target_train_data = []
target_train_label = []
subject_ids = []
subject_idx = 0
for subj in range(start_id,end_id):
# subject_data = list()
# subject_label = list()
for sess in range(1,3):
data_format = file_format+"_s{}r{}X.npy"
label_format = file_format+"_s{}r{}y.npy"
with open(os.path.join(path, data_format.format(subj,sess)), 'rb') as f:
x = pickle.load(f)
subject_data = x
# subject_data.append(x)
with open(os.path.join(path, label_format.format(subj,sess)), 'rb') as f:
y = pickle.load(f)
subject_label = y
# subject_label.append(y)
# subject_data = np.concatenate(subject_data)
# subject_label = np.concatenate(subject_label)
target_train_data.append(subject_data)
target_train_label.append(subject_label)
subject_ids.extend([subject_idx]*len(subject_label))
subject_idx +=1
dataset_meta = pd.DataFrame({"subject": subject_ids, "session": ["session_0"] * len(subject_ids),
"run": ["run_0"] * len(subject_ids)})
target_train_data = np.concatenate(target_train_data)
target_train_label = np.concatenate(target_train_label)
return target_train_data,target_train_label,dataset_meta
def load_target_sleep(path=None,file_format="leaderboard",start_id=0,end_id=6):
if path is None:
path = "C:/Users/wduong/mne_data/MNE-beetlsleepleaderboard-data/sleep_target"
target_train_data_1 = []
target_train_label_1 = []
subject_ids_1 = []
target_train_data_2 = []
target_train_label_2 = []
subject_ids_2 = []
total_subjects = end_id-start_id
for subj in range(start_id,end_id):
session_1_data_format = file_format+"_s{}r1X.npy"
session_1_label_format = file_format+"_s{}r1y.npy"
with open(os.path.join(path, session_1_data_format.format(subj)), 'rb') as f:
x = pickle.load(f)
target_train_data_1.append(x)
with open(os.path.join(path, session_1_label_format.format(subj)), 'rb') as f:
y = pickle.load(f)
target_train_label_1.append(y)
subject_ids_1.extend([subj]*len(y))
session_2_data_format = file_format+"_s{}r2X.npy"
session_2_label_format = file_format+"_s{}r2y.npy"
with open(os.path.join(path, session_2_data_format.format(subj)), 'rb') as f:
x = pickle.load(f)
target_train_data_2.append(x)
with open(os.path.join(path, session_2_label_format.format(subj)), 'rb') as f:
y = pickle.load(f)
target_train_label_2.append(y)
subject_ids_2.extend([subj+total_subjects]*len(y))
dataset_meta_1 = pd.DataFrame({"subject":subject_ids_1,"session":["session_0"]*len(subject_ids_1),"run":["run_0"]*len(subject_ids_1)})
target_train_data_1 = np.concatenate(target_train_data_1)
target_train_label_1 = np.concatenate(target_train_label_1)
dataset_meta_2 = pd.DataFrame({"subject":subject_ids_2,"session":["session_0"]*len(subject_ids_2),"run":["run_0"]*len(subject_ids_2)})
target_train_data_2 = np.concatenate(target_train_data_2)
target_train_label_2 = np.concatenate(target_train_label_2)
return target_train_data_1,target_train_label_1,dataset_meta_1,target_train_data_2,target_train_label_2,dataset_meta_2
def load_test_sleep(path=None,file_format="leaderboard",start_id=6,end_id=18):
if path is None:
path = "C:/Users/wduong/mne_data/MNE-beetlsleepleaderboard-data/testing"
target_test_data_1 = []
target_test_label_1 = []
subject_ids_1 = []
target_test_data_2 = []
target_test_label_2 = []
subject_ids_2 = []
start_idx = start_id
total_subjects = end_id-start_id
for subj in range(start_id,end_id):
session_1_data_format = file_format+"_s{}r1X.npy"
with open(os.path.join(path, session_1_data_format.format(subj)), 'rb') as f:
x = pickle.load(f)
target_test_data_1.append(x)
test_label = np.array([-1] * len(x))
target_test_label_1.append(test_label)
subject_ids_1.extend([start_idx]*len(x))
start_idx+=1
for subj in range(start_id,end_id):
session_2_data_format = file_format+"_s{}r2X.npy"
with open(os.path.join(path, session_2_data_format.format(subj)), 'rb') as f:
x = pickle.load(f)
target_test_data_2.append(x)
test_label = np.array([-1] * len(x))
target_test_label_2.append(test_label)
subject_ids_2.extend([start_idx] * len(x))
start_idx += 1
dataset_meta_1 = pd.DataFrame({"subject": subject_ids_1, "session": ["session_0"] * len(subject_ids_1),
"run": ["run_0"] * len(subject_ids_1)})
target_test_data_1 = np.concatenate(target_test_data_1)
target_test_label_1 = np.concatenate(target_test_label_1)
dataset_meta_2 = pd.DataFrame({"subject": subject_ids_2, "session": ["session_0"] * len(subject_ids_2),
"run": ["run_0"] * len(subject_ids_2)})
target_test_data_2 = np.concatenate(target_test_data_2)
target_test_label_2 = np.concatenate(target_test_label_2)
return target_test_data_1, target_test_label_1, dataset_meta_1, target_test_data_2, target_test_label_2, dataset_meta_2
def load_test_sleep_combine(path=None,file_format="leaderboard",start_id=6,end_id=18):
if path is None:
path = "C:/Users/wduong/mne_data/MNE-beetlsleepleaderboard-data/testing/"
target_test_data = []
target_test_label = []
subject_ids = []
start_idx = 0
for subj in range(start_id, end_id):
for session in range(1, 3):
session_data_format = file_format + "_s{}r{}X.npy"
with open(path + session_data_format.format(subj, session), 'rb') as f:
x = pickle.load(f)
target_test_data.append(x)
test_label = np.array([-1] * len(x))
target_test_label.append(test_label)
subject_ids.extend([start_idx] * len(x))
start_idx += 1
dataset_meta = pd.DataFrame({"subject": subject_ids, "session": ["session_0"] * len(subject_ids),
"run": ["run_0"] * len(subject_ids)})
target_test_data = np.concatenate(target_test_data)
target_test_label = np.concatenate(target_test_label)
return target_test_data,target_test_label,dataset_meta
def generate_data_file(list_dataset_info,folder_name='case_0',file_name = 'NeurIPS_TL'):
list_dataset = list()
for dataset in list_dataset_info:
list_dataset.append(dataset)
data_file = '{}.mat'.format(file_name)
if not os.path.isdir(folder_name):
os.makedirs(folder_name)
data_file = os.path.join(folder_name,data_file)
from scipy.io import savemat
savemat(data_file, {'datasets':list_dataset})
class EuclideanAlignment:
"""
convert trials of each subject to a new format with Euclidean Alignment technique
https://arxiv.org/pdf/1808.05464.pdf
"""
def __init__(self,list_r_op=None,subject_ids=None):
self.list_r_op = list_r_op
if subject_ids is not None:
update_list_r_op = [self.list_r_op[subject_id] for subject_id in subject_ids]
print("only use r-op for subjects {}".format(subject_ids))
self.list_r_op = update_list_r_op
def calculate_r_op(self,data):
assert len(data.shape) == 3
# r = np.matmul(data, data.transpose((0, 2, 1))).mean(0)
#calculate covariance matrix of each trial
# list_cov = list()
r = 0
for trial in data:
cov = np.cov(trial, rowvar=True)
r += cov
r = r/data.shape[0]
if np.iscomplexobj(r):
print("covariance matrix problem")
if np.iscomplexobj(sqrtm(r)):
print("covariance matrix problem sqrt")
r_op = inv(sqrtm(r))
# print("r_op shape : ", r_op.shape)
# print("data shape : ",x.shape)
# print("r_op : ", r_op)
if np.iscomplexobj(r_op):
print("WARNING! Covariance matrix was not SPD somehow. Can be caused by running ICA-EOG rejection, if "
"not, check data!!")
r_op = np.real(r_op).astype(np.float64)
elif not np.any(np.isfinite(r_op)):
print("WARNING! Not finite values in R Matrix")
return r_op
def convert_trials(self,data,r_op):
results = np.matmul(r_op, data)
return results
def generate_list_r_op(self,subjects_data):
list_r_op = list()
for subject_idx in range(len(subjects_data)):
subject_data = subjects_data[subject_idx]
r_op = self.calculate_r_op(subject_data)
list_r_op.append(r_op)
return list_r_op
def convert_subjects_data_with_EA(self,subjects_data):
#calculate r_op for each subject
if self.list_r_op is not None:
assert len(self.list_r_op) == len(subjects_data)
print("use exist r_op")
else:
print("generate new r_op")
self.list_r_op = self.generate_list_r_op(subjects_data)
new_data = list()
# print("size list r : ",len(self.list_r_op))
# print("subject dat size : ",len(subjects_data))
for subject_idx in range(len(subjects_data)):
subject_data = subjects_data[subject_idx]
r_op = self.list_r_op[subject_idx]
subject_data = self.convert_trials(subject_data,r_op)
new_data.append(subject_data)
return new_data
def load_source_data(target_channels,dataset_name="cho2017",montage=None,subject_ids=None,events=None,relabel_func=None):
if relabel_func is None:
print("use default relabel function")
print("left_hand ->0 , right_hand ->1, all other -> 2")
def relabel(l):
if l == 'left_hand':
return 0
elif l == 'right_hand':
return 1
else:
return 2
relabel_func = relabel
print("common target chans : ",target_channels)
print("target size : ",len(target_channels))
fmin=4
fmax=36
tmax=3
tmin=0
sfreq=128
max_time_length = int((tmax - tmin) * sfreq)
if dataset_name == "cho2017":
# epoch_X_src, label_src, m_src = load_Cho2017(fmin=fmin,fmax=fmax,selected_chans=target_channels)
epoch_X_src, label_src, m_src = load_Cho2017(fmin=fmin,fmax=fmax,selected_chans=target_channels,subjects=subject_ids)
print("cho2017 current chans : ",epoch_X_src.ch_names)
reorder_epoch_X_src = epoch_X_src.copy().reorder_channels(target_channels)
print("reorder cho2017 chans : ",reorder_epoch_X_src.ch_names)
elif dataset_name == "physionet":
if events is None:
events=dict(left_hand=2, right_hand=3, feet=5, rest=1)
epoch_X_src, label_src, m_src = load_Physionet(fmin=fmin,fmax=fmax,selected_chans=target_channels,subjects=subject_ids,events=events)
print("physionet current chans : ",epoch_X_src.ch_names)
reorder_epoch_X_src = epoch_X_src.copy().reorder_channels(target_channels)
print("reorder physionet chans : ",reorder_epoch_X_src.ch_names)
print("total chans : ",len(epoch_X_src.ch_names))
elif dataset_name == "BCI_IV":
epoch_X_src, label_src, m_src = load_BCI_IV(fmin=fmin, fmax=fmax, selected_chans=target_channels,montage=montage,subjects=subject_ids)
print("BCI_IV current chans : ",epoch_X_src.ch_names)
reorder_epoch_X_src = epoch_X_src.copy().reorder_channels(target_channels)
print("reorder BCI_IV chans : ",reorder_epoch_X_src.ch_names)
src = reorder_epoch_X_src.get_data()
X_src = modify_data(src, time=max_time_length)
X_src = convert_volt_to_micro(X_src)
y_src = np.array([relabel_func(l) for l in label_src])
return X_src,y_src,m_src
def load_target_data(target_channels,dataset_name="dataset_B"):
if dataset_name == "dataset_A":
X_train_data,X_train_label,train_meta = load_dataset_A(train=True,selected_chans=target_channels)
X_test_data,X_test_label,test_meta = load_dataset_A(train=False, norm=False, selected_chans=target_channels)
else:
X_train_data,X_train_label,train_meta = load_dataset_B(train=True,selected_chans=target_channels)
X_test_data,X_test_label,test_meta = load_dataset_B(train=False, norm=False, selected_chans=target_channels)
X_train_data = convert_volt_to_micro(X_train_data)
X_test_data = convert_volt_to_micro(X_test_data)
return X_train_data,X_train_label,train_meta,X_test_data,X_test_label,test_meta
def create_epoch_array(data,label,channel_name,sampling_freq = 128,event_id=None):
total_trials = len(label)
ch_types = ['eeg'] * len(channel_name)
info = mne.create_info(channel_name, ch_types=ch_types, sfreq=sampling_freq)
if event_id is None:
event_id = dict(left_hand=0, right_hand=1, feet=2, rest=3)
events = np.column_stack((np.arange(0, sampling_freq * total_trials, sampling_freq),
np.zeros(total_trials, dtype=int),
label))
mne_data = mne.EpochsArray(data, info, event_id=event_id, events=events, tmin=0)
return mne_data
def process_epoch_array_with_mne(data,sampling_freq = 100,fmin=0,fmax=30):
n_channel = data.shape[1]
# sampling_freq = 500 # in Hertz
ch_types = ['eeg'] * n_channel
info = mne.create_info(ch_types=ch_types, sfreq=sampling_freq)
mne_data = mne.EpochsArray(data, info)
epoch_f = mne_data.copy().filter(
fmin, fmax, method="iir")
subject_train_data = epoch_f.get_data()
return subject_train_data
def reformat(data,label,meta_data):
"""
assume the meta_data['subject'] is a lsit of order ids. EX: 1,1,1,2,2,3,3,3,3,6,6,6
convert data from (total_trials,channels,samples) -> (subjects,trials,channels,samples)
Args:
data:
label:
meta_data:
Returns:
"""
n_subjects = len(np.unique(meta_data['subject']))
new_data = []
new_label = []
new_meta_data = []
start=0
unique_subject_ids =
|
np.unique(meta_data['subject'])
|
numpy.unique
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import pytest
import numpy as np
from ... import units as u
from .. import (PhysicsSphericalRepresentation, CartesianRepresentation,
CylindricalRepresentation, SphericalRepresentation,
UnitSphericalRepresentation, SphericalDifferential,
CartesianDifferential, UnitSphericalDifferential,
SphericalCosLatDifferential, UnitSphericalCosLatDifferential,
PhysicsSphericalDifferential, CylindricalDifferential,
RadialRepresentation, RadialDifferential, Longitude, Latitude)
from ..representation import DIFFERENTIAL_CLASSES
from ..angle_utilities import angular_separation
from ...tests.helper import assert_quantity_allclose, quantity_allclose
def assert_representation_allclose(actual, desired, rtol=1.e-7, atol=None,
**kwargs):
actual_xyz = actual.to_cartesian().get_xyz(xyz_axis=-1)
desired_xyz = desired.to_cartesian().get_xyz(xyz_axis=-1)
actual_xyz, desired_xyz = np.broadcast_arrays(actual_xyz, desired_xyz,
subok=True)
assert_quantity_allclose(actual_xyz, desired_xyz, rtol, atol, **kwargs)
def assert_differential_allclose(actual, desired, rtol=1.e-7, **kwargs):
assert actual.components == desired.components
for component in actual.components:
actual_c = getattr(actual, component)
atol = 1.e-10 * actual_c.unit
assert_quantity_allclose(actual_c, getattr(desired, component),
rtol, atol, **kwargs)
def representation_equal(first, second):
return functools.reduce(np.logical_and,
(getattr(first, component) ==
getattr(second, component)
for component in first.components))
class TestArithmetic():
def setup(self):
# Choose some specific coordinates, for which ``sum`` and ``dot``
# works out nicely.
self.lon = Longitude(np.arange(0, 12.1, 2), u.hourangle)
self.lat = Latitude(np.arange(-90, 91, 30), u.deg)
self.distance = [5., 12., 4., 2., 4., 12., 5.] * u.kpc
self.spherical = SphericalRepresentation(self.lon, self.lat,
self.distance)
self.unit_spherical = self.spherical.represent_as(
UnitSphericalRepresentation)
self.cartesian = self.spherical.to_cartesian()
def test_norm_spherical(self):
norm_s = self.spherical.norm()
assert isinstance(norm_s, u.Quantity)
# Just to be sure, test against getting object arrays.
assert norm_s.dtype.kind == 'f'
assert np.all(norm_s == self.distance)
@pytest.mark.parametrize('representation',
(PhysicsSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation))
def test_norm(self, representation):
in_rep = self.spherical.represent_as(representation)
norm_rep = in_rep.norm()
assert isinstance(norm_rep, u.Quantity)
assert_quantity_allclose(norm_rep, self.distance)
def test_norm_unitspherical(self):
norm_rep = self.unit_spherical.norm()
assert norm_rep.unit == u.dimensionless_unscaled
assert np.all(norm_rep == 1. * u.dimensionless_unscaled)
@pytest.mark.parametrize('representation',
(SphericalRepresentation,
PhysicsSphericalRepresentation,
CartesianRepresentation,
CylindricalRepresentation,
UnitSphericalRepresentation))
def test_neg_pos(self, representation):
in_rep = self.cartesian.represent_as(representation)
pos_rep = +in_rep
assert type(pos_rep) is type(in_rep)
assert pos_rep is not in_rep
assert np.all(representation_equal(pos_rep, in_rep))
neg_rep = -in_rep
assert type(neg_rep) is type(in_rep)
assert np.all(neg_rep.norm() == in_rep.norm())
in_rep_xyz = in_rep.to_cartesian().xyz
assert_quantity_allclose(neg_rep.to_cartesian().xyz,
-in_rep_xyz, atol=1.e-10*in_rep_xyz.unit)
def test_mul_div_spherical(self):
s0 = self.spherical / (1. * u.Myr)
assert isinstance(s0, SphericalRepresentation)
assert s0.distance.dtype.kind == 'f'
assert np.all(s0.lon == self.spherical.lon)
assert np.all(s0.lat == self.spherical.lat)
assert np.all(s0.distance == self.distance / (1. * u.Myr))
s1 = (1./u.Myr) * self.spherical
assert isinstance(s1, SphericalRepresentation)
assert np.all(representation_equal(s1, s0))
s2 = self.spherical * np.array([[1.], [2.]])
assert isinstance(s2, SphericalRepresentation)
assert s2.shape == (2, self.spherical.shape[0])
assert np.all(s2.lon == self.spherical.lon)
assert
|
np.all(s2.lat == self.spherical.lat)
|
numpy.all
|
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import classification_report
import argparse
import datetime
import math
import os
import csv
import numpy as np
import logging
import random
from char_lstm_tagger import CharLSTMTagger
from lstm_tagger import LSTMTagger
from char_cnn_tagger import CharCNNTagger
from mtl_wrapper import MTLWrapper
try:
import load_data
from utils import split_train_val
from data_classes import write_sentences_to_excel
except:
pass
UNKNOWN = 'UNKNOWN'
FLAT = "flat"
MTL = "multitask"
HIERARCHICAL = "hierarchical"
THRESHOLD = 2
def get_index_of_max(input):
index = 0
for i in range(1, len(input)):
if input[i] > input[index]:
index = i
return index
def get_max_prob_result(input, ix_to_tag):
return ix_to_tag[get_index_of_max(input)]
def prepare_char_sequence(word, to_ix):
idxs = []
for i in range(len(word) - 1):
curr_char = word[i]
next_char = word[i+1]
if curr_char == "'": # treat letters followed by ' as single character
continue
if next_char == "'":
char = curr_char+next_char
else:
char = curr_char
idxs.append(get_index(char, to_ix))
if word[-1] != "'":
idxs.append(get_index(word[-1], to_ix))
return idxs
def prepare_sequence_for_chars(seq, to_ix, char_to_ix, poses=None, pos_to_ix=None):
res = []
if pos_to_ix and poses:
for (w, _), pos in zip(seq, poses):
res.append((get_index(w, to_ix),
prepare_char_sequence(w, char_to_ix),
get_index(pos, pos_to_ix)))
else:
for w, _ in seq:
res.append((get_index(w, to_ix), prepare_char_sequence(w, char_to_ix)))
return res
def prepare_sequence_for_bpes(seq, to_ix, bpe_to_ix, poses=None, pos_to_ix=None):
res = []
if pos_to_ix and poses:
for (w, bpes), pos in zip(seq, poses):
res.append((get_index(w, to_ix), [get_index(bpe, bpe_to_ix) for bpe in bpes],
get_index(pos, pos_to_ix)))
else:
for w, bpes in seq:
res.append((get_index(w, to_ix),
[get_index(bpe, bpe_to_ix) for bpe in bpes]))
return res
def prepare_sequence_for_words(seq, to_ix, poses=None, pos_to_ix=None):
idxs = []
if pos_to_ix and poses:
for (w, _), pos in zip(seq, poses):
idxs.append((get_index(w, to_ix),
get_index(pos, pos_to_ix)))
else:
for w, _ in seq:
ix = get_index(w, to_ix)
idxs.append((ix,))
return idxs
def prepare_target(seq, to_ix, field_idx):
idxs = []
for w in seq:
ix = get_index(w[field_idx], to_ix)
idxs.append(ix)
return torch.LongTensor(idxs)
def reverse_dict(to_ix):
# receives a dictionary with words/tags mapped to indices
# and returns a dictionary mapping indices to words/tags
ix_to = {}
for k, v in to_ix.items():
ix_to[v] = k
return ix_to
def get_index(w, to_ix):
return to_ix.get(w, to_ix[UNKNOWN])
def train(training_data, val_data, model_path, word_dict_path, char_dict_path,
bpe_dict_path, tag_dict_path, frequencies, word_emb_dim, char_emb_dim,
hidden_dim, dropout, num_kernels=1000, kernel_width=6, by_char=False,
by_bpe=False, with_smoothing=False, cnn=False, directions=1, device='cpu',
save_all_models=False, save_best_model=True, epochs=300, lr=0.1, batch_size=8,
morph=None, weight_decay=0, loss_weights=(1,1,1,1,1), seed=42):
# training data of shape: [(sent, tags), (sent, tags)]
# where sent is of shape: [(word, bpe), (word, bpe)], len(sent) == number of words
# and tags is of shape: [(pos, an1, an2, an3, enc),...], len(tags) == len(sent) == number of words
field_names = ["pos", "an1", "an2", "an3", "enc"]
model_path_parts = model_path.split(".")
dict_path_parts = tag_dict_path.split(".")
pos_training_data = [(sent, [tag_set[0] for tag_set in tags]) for sent, tags in training_data]
logger.info(f"Number of sentences in training data: {len(pos_training_data)}")
pos_model_path = model_path_parts[0] + "-pos." + model_path_parts[1]
pos_dict_path = dict_path_parts[0] + "-pos." + dict_path_parts[1]
word_to_ix, char_to_ix, bpe_to_ix, pos_to_ix = prepare_dictionaries(pos_training_data, with_smoothing, frequencies)
torch.save(pos_to_ix, pos_dict_path)
torch.save(word_to_ix, word_dict_path)
torch.save(char_to_ix, char_dict_path)
torch.save(bpe_to_ix, bpe_dict_path)
val_sents = None
if by_char:
if val_data:
val_sents = [prepare_sequence_for_chars(val_sent[0], word_to_ix,
char_to_ix)
for i, val_sent in enumerate(val_data)]
train_sents = [prepare_sequence_for_chars(training_sent[0], word_to_ix,
char_to_ix)
for i, training_sent in enumerate(training_data)]
elif by_bpe:
if val_data:
val_sents = [prepare_sequence_for_bpes(val_sent[0], word_to_ix, bpe_to_ix)
for i, val_sent in enumerate(val_data)]
train_sents = [prepare_sequence_for_bpes(training_sent[0], word_to_ix,
bpe_to_ix)
for i, training_sent in enumerate(training_data)]
else:
if val_data:
val_sents = [prepare_sequence_for_words(val_sent[0], word_to_ix)
for i, val_sent in enumerate(val_data)]
train_sents = [prepare_sequence_for_words(training_sent[0], word_to_ix)
for i, training_sent in enumerate(training_data)]
if val_data:
logger.info(f"Number of sentences in val data: {len(val_sents)}")
val_poses = [[prepare_target(tag_sets, pos_to_ix, field_idx=0).to(device=device)]
# inside a list for MTL wrapper purposes
for (val_sent, tag_sets) in val_data]
else:
val_poses = None
train_poses = [[prepare_target(tag_sets, pos_to_ix, field_idx=0).to(device=device)]
# inside a list for MTL wrapper purposes
for (train_sent, tag_sets) in training_data]
logger.info("Finished preparing POS data:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
if morph == MTL:
model_path = model_path
all_train_field_tags, all_val_field_tags, all_field_dicts = prepare_data_for_mtl(field_names, training_data,
val_data, device,
dict_path_parts)
logger.info(f"Finished preparing MTL data:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
mtl_model = train_tag(train_sents, val_sents, all_train_field_tags, all_val_field_tags, model_path,
word_to_ix, char_to_ix, bpe_to_ix, all_field_dicts, word_emb_dim, char_emb_dim,
hidden_dim, dropout, num_kernels, kernel_width, by_char, by_bpe, cnn, directions,
device, save_all_models, save_best_model, epochs, lr, batch_size,
weight_decay=weight_decay, loss_weights=loss_weights, seed=seed)
return mtl_model
logger.info("Preparing POS training data:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
pos_model = train_tag(train_sents, val_sents, train_poses, val_poses, pos_model_path, word_to_ix,
char_to_ix, bpe_to_ix, [pos_to_ix],
word_emb_dim, char_emb_dim, hidden_dim, dropout, num_kernels,
kernel_width, by_char, by_bpe, cnn, directions,
device, save_all_models, save_best_model, epochs, lr,
batch_size, weight_decay=weight_decay, seed=seed)
if morph == FLAT or morph == HIERARCHICAL:
pos_dict_size = 0
if morph == HIERARCHICAL:
pos_dict_size = len(pos_to_ix)
if by_bpe or by_char:
train_sents = [[(idxs[0], idxs[1], tag_idx)
for idxs, tag_idx in zip(sent, sent_tags[0])]
for sent, sent_tags in zip(train_sents, train_poses)]
if val_data:
val_sents = [[(word_idx, char_idx, tag_idx)
for (word_idx, char_idx), tag_idx in zip(sent, sent_tags[0])]
for sent, sent_tags in zip(val_sents, val_poses)]
else:
train_sents = [[(word_idx, tag_idx)
for word_idx, tag_idx in zip(sent, sent_tags[0])]
for sent, sent_tags in zip(train_sents, train_poses)]
if val_data:
val_sents = [[(word_idx,tag_idx)
for word_idx, tag_idx in zip(sent, sent_tags[0])]
for sent, sent_tags in zip(val_sents, val_poses)]
field_models = [pos_model]
for field_idx, field_name in enumerate(field_names[1:]):
logger.info(f"Preparing {field_name} training data:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
field_training_data = [(sent, [tag_set[field_idx+1] for tag_set in tags]) for sent, tags in training_data]
field_tag_to_ix = prepare_tag_dict(field_training_data)
field_dict_path = dict_path_parts[0] + f"-{field_name}." + dict_path_parts[1]
torch.save(field_tag_to_ix, field_dict_path)
field_model_path = model_path_parts[0] + f"-{field_name}." + model_path_parts[1]
if val_data:
val_field_tags = [[prepare_target(tag_sets, field_tag_to_ix, field_idx=(field_idx+1)).to(device=device)]
for (val_sent, tag_sets) in val_data]
else:
val_field_tags = None
train_field_tags = [[prepare_target(tag_sets, field_tag_to_ix, field_idx=field_idx+1).to(device=device)]
for (train_sent, tag_sets) in training_data]
logger.info(f"Finished preparing {field_name} data:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
field_model = train_tag(train_sents, val_sents, train_field_tags, val_field_tags, field_model_path,
word_to_ix, char_to_ix, bpe_to_ix, [field_tag_to_ix], word_emb_dim, char_emb_dim,
hidden_dim, dropout, num_kernels, kernel_width, by_char, by_bpe, cnn, directions,
device, save_all_models, save_best_model, epochs, lr, batch_size,
weight_decay=weight_decay, pos_dict_size=pos_dict_size, seed=seed)
field_models.append(field_model)
return field_models[0], field_models[1], field_models[2], field_models[3], field_models[4]
else:
return pos_model
def train_tag(train_sents, val_sents, train_tags, val_tags, model_path, word_to_ix, char_to_ix,
bpe_to_ix, tag_to_ix_list, word_emb_dim, char_emb_dim,
hidden_dim, dropout, num_kernels=1000, kernel_width=6, by_char=False,
by_bpe=False, cnn=False, directions=1, device='cpu',
save_all_models=False, save_best_model=True, epochs=300, lr=0.1,
batch_size=8, weight_decay=0, pos_dict_size=0, loss_weights=None, seed=42):
"""
This is the central function that runs the training process; it trains a model on a given tag (or set of tags),
with or without early stopping, based on the hyperparameters provided to the function call. It saves and returns
the best model.
"""
random.seed(seed)
torch.manual_seed(seed)
torch.autograd.set_detect_anomaly(True)
base_model = base_model_factory(by_char or by_bpe, cnn)
model = MTLWrapper(word_emb_dim, char_emb_dim, hidden_dim, dropout, len(word_to_ix),
len(char_to_ix) if by_char else len(bpe_to_ix), [len(tag_to_ix) for tag_to_ix in tag_to_ix_list],
num_kernels, kernel_width, directions=directions, device=device, model_type=base_model,
pos_dict_size=pos_dict_size)
# move to gpu if supported
model = model.to(device=device)
loss_function = nn.NLLLoss().to(device=device)
optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
logger.info("Begin training:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
best_score = math.inf
best_model = None
best_model_path = None
patience = 0
val_loss = None
for epoch in range(epochs):
model.train()
running_loss = 0.0
if epoch % 10 == 0:
logger.info("Beginning epoch {}:".format(epoch))
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
sys.stdout.flush()
count = 0
r = list(range(len(train_sents)))
random.shuffle(r)
for i in range(math.ceil(len(train_sents)/batch_size)):
batch = r[i*batch_size:(i+1)*batch_size]
losses = []
for j in batch:
sentence = train_sents[j]
tags = train_tags[j]
# skip sentences with zero words or zero tags (though it should be equivalent)
if (not len(sentence)) or (not len(tags)):
continue
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
# Also, we need to clear out the hidden state of the LSTM,
# detaching it from its history on the last instance.
model.hidden = model.init_hidden(hidden_dim)
sentence_in = sentence
targets = tags
# Step 3. Run our forward pass.
tag_scores = model(sentence_in)
loss = [loss_function(tag_scores[i], targets[i]) for i in range(len(tag_scores))]
loss = torch.stack(loss)
if loss_weights:
if len(loss_weights) != len(loss):
logger.info(f"Received {len(loss_weights)} weights, for {len(loss)} tasks. Using equal weights.")
avg_loss = sum(loss)/len(loss)
else:
weighted_loss_sum = 0
for task_loss, weight in zip(loss, loss_weights):
weighted_loss_sum += task_loss*weight
avg_loss = weighted_loss_sum/sum(loss_weights)
else:
avg_loss = sum(loss)/len(loss)
losses.append(avg_loss)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
losses = torch.stack(losses)
total_loss = sum(losses)/len(losses) # average over all sentences in batch
total_loss.backward()
running_loss += total_loss.item()
optimizer.step()
count += 1
if patience == 5 and best_model:
if save_best_model:
logger.info("Saving best model at {}".format(model_path))
torch.save(best_model.state_dict(), model_path)
logger.info("Best validation loss: {}".format(best_score))
sys.stdout.flush()
break
predicted_train = predict_tags(model, train_sents)
logger.info("Loss and accuracy at epoch {}:".format(epoch))
logger.info("Loss on training data: {}".format(running_loss/count))
if val_sents:
predicted_val = predict_tags(model, val_sents)
val_loss = get_loss_on_val(val_sents, val_tags, predicted_val, loss_weights)
logger.info("Loss on validation data: {}".format(val_loss))
val_accuracy = calculate_accuracy(predicted_val, val_tags)
logger.info("Accuracy on validation data: {}".format(val_accuracy))
train_accuracy = calculate_accuracy(predicted_train, train_tags)
logger.info("Accuracy on training data: {}".format(train_accuracy))
save_path = model_path.split(".")
save_path = save_path[0] + "_epoch_" + str(epoch + 1) + "." + save_path[1]
if val_sents:
if val_loss < best_score:
base_model = base_model_factory(by_char or by_bpe, cnn)
best_model = MTLWrapper(word_emb_dim, char_emb_dim, hidden_dim, dropout,
len(word_to_ix), len(char_to_ix) if by_char else len(bpe_to_ix),
[len(tag_to_ix) for tag_to_ix in tag_to_ix_list], num_kernels,
kernel_width, directions=directions, device=device, model_type=base_model,
pos_dict_size=pos_dict_size)
best_model.load_state_dict(model.state_dict())
best_model_path = save_path
best_score = val_loss
best_accuracy = val_accuracy
patience = 0
else:
patience += 1
logger.info("Patience: {}".format(patience))
if save_all_models:
logger.info("Saving model at checkpoint.")
torch.save({
'epoch': epoch + 1,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': running_loss/count,
'val_loss': val_loss
}, save_path)
if epoch == epochs - 1 and best_model and best_model_path and save_best_model:
logger.info("Reached max epochs")
logger.info("Saving best model at {}".format(model_path))
torch.save(best_model.state_dict(), model_path)
if val_sents:
logger.info("Best validation loss: {}".format(best_score))
logger.info("Best validation accuracy: {}".format(best_accuracy))
break
sys.stdout.flush()
logger.info("Finished training:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
if not best_model_path:
logger.info("We never found a best model, saving final model")
torch.save(model.state_dict(), model_path)
return best_model
def prepare_data_for_mtl(field_names, training_data, val_data, device, dict_path_parts, test=False):
all_field_dicts = []
all_train_field_tags_misordered = []
all_val_field_tags_misordered = []
for field_idx, field_name in enumerate(field_names):
logger.info(f"Preparing {field_name} data:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
field_training_data = [(sent, [tag_set[field_idx] for tag_set in tags]) for sent, tags in training_data]
if not test:
field_tag_to_ix = prepare_tag_dict(field_training_data)
field_dict_path = dict_path_parts[0] + f"-{field_name}." + dict_path_parts[1]
torch.save(field_tag_to_ix, field_dict_path)
all_field_dicts.append(field_tag_to_ix)
else:
field_dict_path = dict_path_parts[0] + f"-{field_name}." + dict_path_parts[1]
field_tag_to_ix = torch.load(field_dict_path)
if val_data:
val_field_tags = [[prepare_target(tag_sets, field_tag_to_ix, field_idx=field_idx).to(device=device)]
for (val_sent, tag_sets) in val_data]
else:
val_field_tags = None
train_field_tags = [[prepare_target(tag_sets, field_tag_to_ix, field_idx=field_idx).to(device=device)]
for (train_sent, tag_sets) in training_data]
all_train_field_tags_misordered.append(train_field_tags)
all_val_field_tags_misordered.append(val_field_tags)
all_train_field_tags = reorder_sent_tags(all_train_field_tags_misordered, device)
if val_data:
all_val_field_tags = reorder_sent_tags(all_val_field_tags_misordered, device)
else:
all_val_field_tags = None
return all_train_field_tags, all_val_field_tags, all_field_dicts
def reorder_sent_tags(misordered_tags, device):
"""
This function receives a list of lists of tags of the following shape:
[[field_tags], [field_tags], [field_tags], [field_tags], [field_tags]] (of length num_fields)
where each list [field_tags] = [[sent], [sent], [sent]...]
and each [sent] = [word_tag, word_tag, word_tag...]
It outputs a list of length num_sentences, where each sentence is:
sent = [(w1_t1, w2_t1, w3_t1), (w1_t2, w2_t2, w3_t2)....] so len(sent) == num_fields
:param misordered_tags:
:return:
"""
num_fields = len(misordered_tags)
ordered_sents = []
for sent_idx, sent in enumerate(misordered_tags[0]):
new_sent = []
for field_idx in range(num_fields):
new_sent.append([misordered_tags[field_idx][sent_idx][0][word_idx] for word_idx in range(len(sent[0]))])
ordered_sents.append(torch.LongTensor(new_sent).to(device=device))
return ordered_sents
def prepare_tag_dict(training_data):
tag_to_ix = {}
for sent, tags in training_data:
for word_bpe, tag in zip(sent, tags):
if tag not in tag_to_ix:
tag_to_ix[tag] = len(tag_to_ix)
if UNKNOWN not in tag_to_ix:
tag_to_ix[UNKNOWN] = len(tag_to_ix)
return tag_to_ix
def prepare_dictionaries(training_data, with_smoothing, frequencies):
word_to_ix = {}
char_to_ix = {}
bpe_to_ix = {}
tag_to_ix = {}
for sent, tags in training_data:
for word_bpe, tag in zip(sent, tags):
word = word_bpe[0]
bpes = word_bpe[1]
if word not in word_to_ix:
if not (with_smoothing and frequencies[word] <= THRESHOLD):
word_to_ix[word] = len(word_to_ix)
for i in range(len(word) - 1):
curr_char = word[i]
next_char = word[i + 1]
if curr_char == "'":
continue
if next_char == "'": # treat letters followed by ' as single character
char = curr_char + next_char
else:
char = curr_char
if char not in char_to_ix:
char_to_ix[char] = len(char_to_ix)
if word[-1] != "'":
if word[-1] not in char_to_ix:
char_to_ix[word[-1]] = len(char_to_ix)
if bpes:
for bpe in bpes:
if bpe not in bpe_to_ix:
bpe_to_ix[bpe] = len(bpe_to_ix)
if tag not in tag_to_ix:
tag_to_ix[tag] = len(tag_to_ix)
if UNKNOWN not in char_to_ix:
char_to_ix[UNKNOWN] = len(char_to_ix)
if UNKNOWN not in word_to_ix:
word_to_ix[UNKNOWN] = len(word_to_ix)
if UNKNOWN not in bpe_to_ix:
bpe_to_ix[UNKNOWN] = len(bpe_to_ix)
if UNKNOWN not in tag_to_ix:
tag_to_ix[UNKNOWN] = len(tag_to_ix)
return word_to_ix, char_to_ix, bpe_to_ix, tag_to_ix
def base_model_factory(by_char, cnn):
if not by_char:
return LSTMTagger
else:
if cnn:
return CharCNNTagger
else:
return CharLSTMTagger
def get_loss_on_val(val_sents, val_tags, predicted_tags, loss_weights):
"""
For giving different tasks different weights (such as giving POS a higher weight than enc)
:param val_sents:
:param val_tags:
:param predicted_tags:
:param loss_weights:
:return:
"""
loss_function = nn.NLLLoss()
loss = 0.0
divisor = len(val_sents)
for tags, predicted in zip(val_tags, predicted_tags):
if len(predicted) == 0:
divisor -= 1
continue
sent_loss = 0.0
if not loss_weights:
loss_weights = [1]*len(predicted)
for task_pred, task_tags, weight in zip(predicted, tags, loss_weights):
sent_loss += weight*loss_function(task_pred, task_tags)
avg_sent_loss = sent_loss/sum(loss_weights)
loss += avg_sent_loss
return loss/divisor
def predict_tags(model, sents):
model = model.eval()
predicted = []
for sent in sents:
if len(sent) == 0:
predicted.append([])
else:
predicted_tags = model(sent)
predicted.append(predicted_tags)
return predicted
def calculate_accuracy(predicted_tag_scores, true_tags_2d):
"""
Shape of predicted_tag_scores: []
len(predicted_tag_scores) = len(sentences)
len(predicted_tag_scores[i]) = num_fields (this is a tensor)
shape(predicted_tag_scores[j]) = torch.size(num_tags_in_field_j, num_words_in_sentence)
Therefore shape of results:
len(results) = len(sentences)*num_fields, with [[tags of sent_1,field_1], [tags of sent_1,field_2], ...]
And shape of true_tags is the same as results
:param predicted_tag_scores:
:param true_tags_2d:
:return:
"""
score = 0
results = [[np.argmax(word_scores.cpu().detach().numpy()) for word_scores in field_scores]
for sent_scores in predicted_tag_scores for field_scores in sent_scores]
true_tags = [tag for tags in true_tags_2d for tag in tags]
num_tags = 0
for sent_result, sent_true in zip(results, true_tags):
sent_true = sent_true.cpu()
for result, true in zip(np.array(sent_result).flatten(), np.array(sent_true).flatten()):
if result == true:
score += 1
num_tags += 1
return score/num_tags
def get_pos_from_idxs_path(pos_idxs, pos_dict_path):
pos_dict = torch.load(pos_dict_path)
ix_to_tag = reverse_dict(pos_dict)
literal_pos_tags = [[ix_to_tag.get(tag, 'OOV') for tag in sentence] for sentence in pos_idxs]
return literal_pos_tags
def test(test_data, model_path, word_dict_path, char_dict_path, bpe_dict_path,
tag_dict_path, word_emb_dim, char_emb_dim, hidden_dim, dropout,
num_kernels, kernel_width, by_char=False, by_bpe=False, out_path=None,
cnn=False, directions=1, device='cpu', morph=False, use_true_pos=False,
test_sent_sources=None):
"""
Prepares all the data, and then calls the function that actually runs testing
"""
if not out_path:
out_path = str(datetime.date.today())
field_names = ["pos", "an1", "an2", "an3", "enc"]
model_path_parts = model_path.split(".")
dict_path_parts = tag_dict_path.split(".")
word_to_ix = torch.load(word_dict_path)
char_to_ix = torch.load(char_dict_path)
bpe_to_ix = torch.load(bpe_dict_path)
test_words = [[word[0] for word in test_sent[0]] for test_sent in test_data]
if by_char:
test_sents = [prepare_sequence_for_chars(test_sent[0], word_to_ix, char_to_ix)
for test_sent in test_data]
elif by_bpe:
test_sents = [prepare_sequence_for_bpes(test_sent[0], word_to_ix, bpe_to_ix)
for test_sent in test_data]
else:
test_sents = [prepare_sequence_for_words(test_sent[0], word_to_ix)
for test_sent in test_data]
if morph == MTL:
model_path = model_path
all_test_field_tags, _, _ = prepare_data_for_mtl(field_names, test_data, None, device, dict_path_parts, test=True)
all_field_dict_paths = [dict_path_parts[0] + f"-{field_name}." + dict_path_parts[1]
for field_name in field_names]
logger.info(f"Finished preparing MTL data:")
logger.info(datetime.datetime.now().strftime("%H:%M:%S"))
mtl_results = test_morph_tag(test_sents, all_test_field_tags, test_words, model_path,
word_to_ix, char_to_ix, bpe_to_ix, all_field_dict_paths,
word_emb_dim, char_emb_dim, hidden_dim, dropout, num_kernels, kernel_width,
by_char, by_bpe, out_path, cnn, directions, device, field_names=field_names,
test_sent_sources=test_sent_sources)
return mtl_results
results = []
pos_model_path = model_path_parts[0] + f"-pos." + model_path_parts[1]
pos_dict_path = dict_path_parts[0] + f"-pos." + dict_path_parts[1]
pos_out_path = out_path + f"-pos"
pos_tag_to_ix = torch.load(pos_dict_path)
test_pos_tags = [[prepare_target(tag_sets, pos_tag_to_ix, field_idx=0).to(device=device)]
for (train_sent, tag_sets) in test_data]
pos_results = test_morph_tag(test_sents, test_pos_tags, test_words, pos_model_path, word_to_ix, char_to_ix,
bpe_to_ix, [pos_dict_path], word_emb_dim, char_emb_dim, hidden_dim, dropout,
num_kernels, kernel_width, by_char, by_bpe, pos_out_path, cnn, directions, device,
field_names=["pos"], return_shaped_results=(morph==HIERARCHICAL),
test_sent_sources=test_sent_sources)
results.append(pos_results)
if morph == FLAT or morph == HIERARCHICAL:
pos_dict_size = 0
if morph == HIERARCHICAL:
if use_true_pos:
test_pos = [tags[0] for tags in test_pos_tags]
else:
test_pos = [tags[0] for tags in pos_results[1]]
if by_bpe or by_char:
test_sents = [[(idxs[0], idxs[1], tag_idx) for idxs, tag_idx in zip(sent, sent_tags)]
for sent, sent_tags in zip(test_sents, test_pos)]
else:
test_sents = [[(word_idx, tag_idx) for word_idx, tag_idx in zip(sent, sent_tags)]
for sent, sent_tags in zip(test_sents, test_pos)]
pos_dict_size = len(pos_tag_to_ix)
results[0] = pos_results[0][0]
else:
results[0] = pos_results[0]
for field_idx, field in enumerate(field_names[1:]):
field_idx += 1
field_model_path = model_path_parts[0] + f"-{field}." + model_path_parts[1]
field_dict_path = dict_path_parts[0] + f"-{field}." + dict_path_parts[1]
field_out_path = out_path + f"-{field}"
field_tag_to_ix = torch.load(field_dict_path)
test_field_tags = [[prepare_target(tag_sets, field_tag_to_ix, field_idx=field_idx).to(device=device)]
for (train_sent, tag_sets) in test_data]
field_results = test_morph_tag(test_sents, test_field_tags, test_words, field_model_path, word_to_ix,
char_to_ix, bpe_to_ix, [field_dict_path], word_emb_dim, char_emb_dim,
hidden_dim, dropout, num_kernels, kernel_width, by_char, by_bpe,
field_out_path, cnn, directions, device, pos_dict_size=pos_dict_size,
field_names=[field], test_sent_sources=test_sent_sources)
results.append(field_results[0])
return results
else:
return pos_results[0]
def test_morph_tag(test_sents, test_tags, test_words, model_path, word_dict, char_dict, bpe_dict, tag_dict_path_list,
word_emb_dim, char_emb_dim, hidden_dim, dropout, num_kernels, kernel_width, by_char=False,
by_bpe=False, out_path=None, cnn=False, directions=1, device='cpu',
pos_dict_size=0, return_shaped_results=False, field_names=None, test_sent_sources=None):
if not out_path:
out_path = str(datetime.date.today())
if torch.cuda.is_available():
map_location = lambda storage, loc: storage.cuda()
else:
map_location = 'cpu'
# checkpoint = torch.load(load_path, map_location=map_location)
# tag_dicts are dictionaries mapping tag to index
tag_dict_list = [torch.load(tag_dict_path) for tag_dict_path in tag_dict_path_list]
base_model = base_model_factory(by_char or by_bpe, cnn)
model = MTLWrapper(word_emb_dim, char_emb_dim, hidden_dim, dropout, len(word_dict),
len(char_dict) if by_char else len(bpe_dict), [len(tag_dict) for tag_dict in tag_dict_list],
num_kernels, kernel_width, directions=directions, device=device, pos_dict_size=pos_dict_size,
model_type=base_model)
model.load_state_dict(torch.load(model_path, map_location=map_location))
model = model.to(device=device)
tag_scores = predict_tags(model, test_sents)
if return_shaped_results:
# shape of tag_scores: [first sentence:[[(all tag scores for w1_f1 - max is the score you want),
# (w2_f1)], [(w1_f2), (w2_f2)...]...],
# [second sentence: [[(w1_f1), (w2_f1), (w3_f1)], [(w1_f2), (w2_f2), (w3_f2)]]]
shaped_results = [[[np.argmax(word_scores.cpu().detach().numpy()) for word_scores in field_scores]
for field_scores in sentence_scores]
for sentence_scores in tag_scores]
ix_to_tag_list = [reverse_dict(tag_dict) for tag_dict in tag_dict_list]
literal_test_tags = []
for sent in test_tags:
sent_literal = []
for field_idx, field_tags in enumerate(sent):
field_literal = [ix_to_tag_list[field_idx].get(tag.item(), 'OOV') for tag in field_tags]
sent_literal.append(field_literal)
literal_test_tags.append(sent_literal)
write_predictions_to_file(test_sents, test_words, tag_scores, out_path+"-tagged.tsv", ix_to_tag_list, word_dict,
ground_truth=literal_test_tags, field_names=field_names,
test_sent_sources=test_sent_sources)
results = [[[np.argmax(word_scores.cpu().detach().numpy()) for word_scores in field_scores]
for field_scores in sentence_scores] for sentence_scores in tag_scores]
literal_test_predicted = []
for sent in results:
sent_literal = []
for field_idx, field_tags in enumerate(sent):
field_literal = [ix_to_tag_list[field_idx].get(tag.item(), 'OOV') for tag in field_tags]
sent_literal.append(field_literal)
literal_test_predicted.append(sent_literal)
report_dicts = get_classification_report(literal_test_tags, literal_test_predicted, out_path, model_path,
len(tag_dict_list), field_names=field_names)
if not field_names:
field_names = [f"{i}" for i in range(len(report_dicts))]
for field_name, report_dict in zip(field_names, report_dicts):
logger.info(f"Result {field_name} precision: {report_dict['accuracy']}")
logger.info(f"Result {field_name} (weighted) recall: {report_dict['weighted avg']['recall']}")
logger.info(f"Result {field_name} (weighted) f1: {report_dict['weighted avg']['f1-score']}")
if return_shaped_results:
return report_dicts, shaped_results
return report_dicts
def get_classification_report(test_tags, test_predicted, out_path, model_path, num_fields, field_names=None):
if not field_names:
field_names = [f"{i}" for i in range(num_fields)]
outpaths = [out_path+f"-{field_name}.report" for field_name in field_names]
report_dicts = []
for field_idx, out_path in enumerate(outpaths):
field_true = [tag for sent in test_tags for tag in sent[field_idx]]
field_predicted = [tag for sent in test_predicted for tag in sent[field_idx]]
report = classification_report(field_true, field_predicted)
report_dict = classification_report(field_true, field_predicted, output_dict=True)
report_dicts.append(report_dict)
with open(out_path, 'w+', encoding='utf8') as report_file:
report_file.write("Classification report:\n")
report_file.write("Model: {}\n".format(model_path))
report_file.write("-------------------------------------\n")
report_file.write(report)
return report_dicts
def write_predictions_to_file(sentences, test_words, tag_scores, out_path, tag_dict_list, word_dict, ground_truth=None,
field_names=None, test_sent_sources=None):
results = [[[np.argmax(word_scores.cpu().detach().numpy()) for word_scores in field_scores]
for field_scores in sentence_scores] for sentence_scores in tag_scores]
if not test_sent_sources:
test_sent_sources = [("", "") for _ in sentences]
num_fields = len(tag_dict_list)
with open(out_path, 'w+', encoding='utf8', newline="") as out_f:
tsv_writer = csv.writer(out_f, delimiter='\t')
if ground_truth:
column_names = ['source_file', 'source_sheet', 'sentence_id', 'word']
if field_names:
if len(field_names) == num_fields:
for name in field_names:
column_names.extend([f"true_{name}", f"predicted_{name}"])
else:
logger.info("Please provide field names according to number of fields")
else:
for i in range(num_fields):
column_names.extend([f"true_{i}", f"predicted_{i}"])
tsv_writer.writerow(column_names)
for i, (sent_words, true_fields, pred_fields, source) in enumerate(zip(test_words, ground_truth,
results, test_sent_sources)):
for j, word in enumerate(sent_words):
row = [source[0], source[1], i, word]
for field_idx, (field_true_tags, field_pred_tags) in enumerate(zip(true_fields, pred_fields)):
row.extend([field_true_tags[j], tag_dict_list[field_idx][field_pred_tags[j]]])
tsv_writer.writerow(row)
else:
column_names = ['source_file', 'source_sheet', 'sentence_id', 'word']
if field_names:
if len(field_names) == num_fields:
for name in field_names:
column_names.extend([f"predicted_{name}"])
else:
logger.info("Please provide field names according to number of fields")
else:
for i in range(num_fields):
column_names.extend([f"predicted_{i}"])
tsv_writer.writerow(column_names)
for i, (sentence, pred_fields, source) in enumerate(zip(sentences, results, test_sent_sources)):
for j, word in enumerate(sentence):
row = [source[0], source[1], i, word[0]]
for field_idx, field_pred_tags in enumerate(pred_fields):
row.extend([tag_dict_list[field_idx][field_pred_tags[j]]])
tsv_writer.writerow(row)
def tag(data_path, model_path, word_dict_path, char_dict_path,
bpe_dict_path, tag_dict_path, word_emb_dim, char_emb_dim, hidden_dim, dropout,
num_kernels, kernel_width, by_char=False, by_bpe=False,
out_path=None, cnn=False, directions=1, device='cpu', morph=None, use_true_pos=False):
"""
:param data_path:
:param model_path:
:param word_dict_path:
:param char_dict_path:
:param bpe_dict_path:
:param tag_dict_path:
:param word_emb_dim:
:param char_emb_dim:
:param hidden_dim:
:param dropout:
:param num_kernels:
:param kernel_width:
:param by_char:
:param by_bpe:
:param out_path:
:param cnn:
:param directions:
:param device:
:param morph:
:param use_true_pos:
:return:
"""
# This is the function for actually just tagging
untagged_data, untagged_sent_objects = load_data.prepare_untagged_data(data_path)
untagged_sents = [sent for sent in untagged_data if len(sent) > 0]
if len(untagged_sents) != len(untagged_sent_objects):
print("Length of sentences not the same!!!!!!")
model_path_parts = model_path.split(".")
dict_path_parts = tag_dict_path.split(".")
if morph:
field_names = ["pos", "an1", "an2", "an3", "enc"]
else:
field_names = ["pos"]
if device == torch.device('cpu'):
map_location = device
else:
map_location = None
word_dict = torch.load(word_dict_path)
char_dict = torch.load(char_dict_path)
bpe_dict = torch.load(bpe_dict_path)
# tag_dict is a dictionary mapping tag to index!
tag_dict_path_list = [dict_path_parts[0] + f"-{field_name}." + dict_path_parts[1]
for field_name in field_names]
tag_dict_list = [torch.load(tag_dict_path) for tag_dict_path in tag_dict_path_list]
ix_to_tag_list = [reverse_dict(tag_dict) for tag_dict in tag_dict_list]
base_model = base_model_factory(by_char or by_bpe, cnn)
if by_char:
test_words = [prepare_sequence_for_chars(sent, word_dict, char_dict) for sent in untagged_sents]
elif by_bpe:
test_words = [prepare_sequence_for_bpes(sent, word_dict, bpe_dict) for sent in untagged_sents]
else:
test_words = [prepare_sequence_for_words(sent, word_dict).to(device=device) for sent in untagged_sents]
if morph == MTL:
model = MTLWrapper(word_emb_dim, char_emb_dim, hidden_dim, dropout, len(word_dict),
len(char_dict) if by_char else len(bpe_dict), [len(tag_dict) for tag_dict in tag_dict_list],
num_kernels, kernel_width, directions=directions, device=device,
model_type=base_model)
model.load_state_dict(torch.load(model_path, map_location=map_location))
model = model.to(device=device)
tag_scores = predict_tags(model, test_words)
results = [[[np.argmax(word_scores.cpu().detach().numpy()) for word_scores in field_scores]
for field_scores in sentence_scores] for sentence_scores in tag_scores]
else:
results_by_field = []
pos_model_path = model_path_parts[0] + f"-pos." + model_path_parts[1]
pos_dict = tag_dict_list[0]
model = MTLWrapper(word_emb_dim, char_emb_dim, hidden_dim, dropout, len(word_dict),
len(char_dict) if by_char else len(bpe_dict), [len(pos_dict)],
num_kernels, kernel_width, directions=directions, device=device,
model_type=base_model)
model.load_state_dict(torch.load(pos_model_path, map_location=map_location))
model = model.to(device=device)
tag_scores = predict_tags(model, test_words)
results_by_field.append([[[np.argmax(word_scores.cpu().detach().numpy()) for word_scores in field_scores]
for field_scores in sentence_scores] for sentence_scores in tag_scores])
if morph == FLAT or morph == HIERARCHICAL:
pos_dict_size = 0
if morph == HIERARCHICAL:
if use_true_pos:
# Be very sure the words have POS tags;
# otherwise you'll be using a whole lot of Nones for prediction
test_pos = [[word_an.pos for word_an in sent_obj.word_analyses]
for sent_obj in untagged_sent_objects]
test_pos = [torch.LongTensor([get_index(pos, pos_dict) for pos in sent_poses]).to(device=device) for sent_poses in test_pos]
else:
test_pos = [sent[0] for sent in results_by_field[0]]
if by_bpe or by_char:
test_words = [[(idxs[0], idxs[1], tag_idx) for idxs, tag_idx in zip(sent, sent_tags)]
for sent, sent_tags in zip(test_words, test_pos)]
else:
test_words = [[(word_idx, tag_idx) for word_idx, tag_idx in zip(sent, sent_tags)]
for sent, sent_tags in zip(test_words, test_pos)]
pos_dict_size = len(tag_dict_list[0])
for field_idx, field in enumerate(field_names[1:]):
field_idx += 1
field_model_path = model_path_parts[0] + f"-{field}." + model_path_parts[1]
model = MTLWrapper(word_emb_dim, char_emb_dim, hidden_dim, dropout, len(word_dict),
len(char_dict) if by_char else len(bpe_dict), [len(tag_dict_list[field_idx])],
num_kernels, kernel_width, directions=directions, device=device,
model_type=base_model, pos_dict_size=pos_dict_size)
model.load_state_dict(torch.load(field_model_path, map_location=map_location))
model = model.to(device=device)
tag_scores = predict_tags(model, test_words)
# TODO make sure shape is appropriate.
results_by_field.append([[[np.argmax(word_scores.cpu().detach().numpy())
for word_scores in field_scores]
for field_scores in sentence_scores]
for sentence_scores in tag_scores])
results = reshape_by_field_to_by_sent(results_by_field)
else:
# this is POS only tagging
results = reshape_by_field_to_by_sent(results_by_field, num_fields=1) # TODO make sure this is correct
updated_sentences = add_tags_to_sent_objs(untagged_sent_objects, results, ix_to_tag_list, field_names)
write_tagged_sents(updated_sentences, out_path)
def reshape_by_field_to_by_sent(results_by_field, num_fields=5):
"""
Input is list of length num_fields, where each inner list is of sentences, with num words
Output needs to be list of length num_sentences, where len(output[i]) == num_fields
:param results_by_field:
:param num_fields:
:return:
"""
sentences = []
for sentence_idx in range(len(results_by_field[0])):
sentence_words = []
for field_idx in range(num_fields):
sentence_words.append(results_by_field[field_idx][sentence_idx][0])
sentences.append(sentence_words)
return sentences
def add_tags_to_sent_objs(sentences, tags, ix_to_tag_list, field_names):
"""
Shape of tags:
len(tags) == len(sentences)
len(tags[i]) == len(field_names) == len(ix_to_tag_list)
len(tags[i][j]) == len(field_names)
:param sentences:
:param tags:
:param ix_to_tag_list:
:param field_names:
:return:
"""
field_to_column_dict = {"pos": "pos", "an1": "analysis1", "an2": "analysis2",
"an3": "analysis3", "enc": "enclitic_pronoun"}
for sentence, sent_tags in zip(sentences, tags):
for field_name, field_tags, ix_to_tag in zip(field_names, sent_tags, ix_to_tag_list):
for word_an, tag in zip(sentence.word_analyses, field_tags):
word_an.set_val(field_to_column_dict[field_name], ix_to_tag[tag])
return sentences
def write_tagged_sents(sentences, dir):
write_sentences_to_excel(sentences, dir)
def kfold_val(data_paths, model_path, word_dict_path, char_dict_path, bpe_path,
tag_dict_path, result_path, k, word_emb, char_emb, hidden_dim,
dropout, num_kernels=1000, kernel_width=6, by_char=False, by_bpe=False,
with_smoothing=False, cnn=False, directions=1, device='cpu',
epochs=300, morph=None, weight_decay=0, use_true_pos=False, loss_weights=(1,1,1,1,1)):
logger.info("Beginning k-fold validation")
results = []
fold = 0
for train_sentences, val_sentences, test_sentences, train_word_count in \
load_data.prepare_kfold_data(data_paths, k=k, seed=0):
logger.info("Beginning fold #{}".format(fold+1))
new_model_path = add_fold_dir_to_path(model_path, fold)
new_word_path = add_fold_dir_to_path(word_dict_path, fold)
new_char_path = add_fold_dir_to_path(char_dict_path, fold)
new_tag_path = add_fold_dir_to_path(tag_dict_path, fold)
new_bpe_path = add_fold_dir_to_path(bpe_path, fold)
train(train_sentences, val_sentences, new_model_path, new_word_path,
new_char_path, new_bpe_path, new_tag_path, train_word_count, word_emb,
char_emb, hidden_dim, dropout, num_kernels, kernel_width, by_char, by_bpe,
with_smoothing, cnn, directions, device, epochs=epochs, morph=morph,
weight_decay=weight_decay, loss_weights=loss_weights)
new_result_path = add_fold_dir_to_path(result_path, fold)
results.append(test(test_sentences, new_model_path, new_word_path, new_char_path, new_bpe_path,
new_tag_path, word_emb, char_emb, hidden_dim, dropout, num_kernels,
kernel_width, by_char, by_bpe, out_path=new_result_path,
cnn=cnn, directions=directions, device=device, morph=morph, use_true_pos=use_true_pos))
fold += 1
agg_result_path = result_path + ".agg_res"
mic_prec = []
mac_prec = []
weight_prec = []
with open(agg_result_path, 'w+') as f:
if not morph:
for i, res in enumerate(results):
micro = res.get("micro avg", res["accuracy"])
f.write("Fold {}:\nmicro avg: {}\nmacro avg: {}\nweighted avg: {}\n".format(
i, micro, res["macro avg"], res["weighted avg"]))
if "micro avg" in res:
mic_prec.append(res["micro avg"]["precision"])
else:
mic_prec.append(res["accuracy"]) # TODO you need to check how this appears in the dict!
mac_prec.append(res["macro avg"]["precision"])
weight_prec.append(res["weighted avg"]["precision"])
avg = np.mean(mic_prec)
std = np.std(mic_prec)
f.write("------------\nMicro:\nAverage precision: {}\nStandard deviation:{}\n".format(avg, std))
avg = np.mean(mac_prec)
std = np.std(mac_prec)
f.write("------------\nMacro:\nAverage precision: {}\nStandard deviation:{}\n".format(avg, std))
avg =
|
np.mean(weight_prec)
|
numpy.mean
|
import numpy as np
from lenstronomy.LensModel.Profiles.cnfw import CNFW
from pyHalo.Rendering.SpatialDistributions.compute_nfw_fast import FastNFW
import inspect
local_path = inspect.getfile(inspect.currentframe())[0:-11] + 'nfw_tables/'
class ProjectedNFW(object):
"""
This class approximates sampling from a full 3D NFW profile by
sampling the projected mass of a cored NFW profile in 2D, and then sampling
the z coordinate from a cored isothermal profile. This is MUCH faster than sampling from the
3D NFW profile, and is accurate to within a few percent.
"""
def __init__(self, rendering_radius, Rs, r_core_host, r200):
"""
:param rendering_radius: the maximum projected 2D radius where halos are rendered [arcsec]
:param Rs: the scale radius of the host dark matter halo [kpc]
:param r_core_host: the core radius of the host dark matter halo [kpc]
:param r200: the virial radius of the host dark matter halo [kpc]
"""
self._cnfw_profile = CNFW()
self.rmax2d_kpc = rendering_radius
self._rs_kpc = Rs
self._xmin = 1e-4
self.xmax_2d = rendering_radius / Rs
self.xtidal = r_core_host / Rs
self.zmax_units_rs = r200 / Rs
self._xmin = rendering_radius / 30 / self._rs_kpc
self._norm = self._cnfw_profile._F(self._xmin, self.xtidal)
@classmethod
def from_keywords_master(self, keywords_master, lens_cosmo, geometry):
keywords = self.keywords(keywords_master, lens_cosmo, geometry)
rendering_radius, Rs, r_core_host, r200 = keywords['rendering_radius'], \
keywords['Rs'], \
keywords['r_core'], \
keywords['host_r200']
return ProjectedNFW(rendering_radius, Rs, r_core_host, r200)
@staticmethod
def keywords(keywords_master, lenscosmo, geometry):
args_spatial = {}
kpc_per_arcsec_zlens = geometry.kpc_per_arcsec_zlens
zlens = lenscosmo.z_lens
# EVERYTHING EXPRESSED IN KPC
args_spatial['rendering_radius'] = 0.5 * keywords_master['cone_opening_angle'] * kpc_per_arcsec_zlens
if 'log_m_host' in keywords_master.keys():
keywords_master['host_m200'] = 10 ** keywords_master['log_m_host']
if 'host_m200' in keywords_master.keys():
# EVERYTHING EXPRESSED IN KPC
if 'host_c' not in keywords_master.keys():
keywords_master['host_c'] = lenscosmo.NFW_concentration(keywords_master['host_m200'], zlens,
model='diemer19', mdef='200c', logmhm=keywords_master['log_mc'],
scatter=True,
scatter_amplitude=keywords_master['c_scatter_dex'],
suppression_model=keywords_master['suppression_model'],
kwargs_suppresion=keywords_master['kwargs_suppression'])
if 'host_Rs' not in keywords_master.keys():
host_Rs = lenscosmo.NFW_params_physical(keywords_master['host_m200'],
keywords_master['host_c'], zlens)[1]
host_r200 = host_Rs * keywords_master['host_c']
else:
host_Rs = keywords_master['host_Rs']
host_r200 = keywords_master['host_Rs'] * keywords_master['host_c']
args_spatial['Rs'] = host_Rs
args_spatial['rmax3d'] = host_r200
args_spatial['host_r200'] = host_Rs * keywords_master['host_c']
else:
raise Exception('Must specify the host halo mass when rendering subhalos')
if 'r_tidal' in keywords_master.keys():
if isinstance(keywords_master['r_tidal'], str):
if keywords_master['r_tidal'] == 'Rs':
args_spatial['r_core'] = args_spatial['Rs']
else:
if keywords_master['r_tidal'][-2:] != 'Rs':
raise ValueError('if specifying the tidal core radius as number*Rs, the last two '
'letters in the string must be "Rs".')
scale = float(keywords_master['r_tidal'][:-2])
args_spatial['r_core'] = scale * args_spatial['Rs']
else:
args_spatial['r_core'] = keywords_master['r_tidal']
return args_spatial
def cdf(self, u):
arg = u * np.arctan(self.zmax_units_rs/self.xtidal)
return self.xtidal * np.tan(arg)
def _projected_pdf(self, r2d_kpc):
x = r2d_kpc / self._rs_kpc
if isinstance(x, float) or isinstance(x, int):
x = max(x, self._xmin)
else:
x[np.where(x < self._xmin)] = self._xmin
p = self._cnfw_profile._F(x, self.xtidal) / self._norm
return p
def draw(self, N, rescale=1.0, center_x=0., center_y=0.):
if N == 0:
return [], [], [], []
n = 0
while True:
_x_kpc, _y_kpc, _r2d, _r3d = self._draw_uniform(N, rescale, center_x, center_y)
prob = self._projected_pdf(_r2d)
u = np.random.uniform(size=len(prob))
keep = np.where(u < prob)[0]
if n == 0:
x_kpc = _x_kpc[keep]
y_kpc = _y_kpc[keep]
r3d = _r3d[keep]
else:
x_kpc = np.append(x_kpc, _x_kpc[keep])
y_kpc = np.append(y_kpc, _y_kpc[keep])
r3d = np.append(r3d, _r3d[keep])
n += len(keep)
if n >= N:
break
return x_kpc[0:N], y_kpc[0:N], r3d[0:N]
def _draw_uniform(self, N, rescale=1.0, center_x=0., center_y=0.):
if N == 0:
return [], [], [], []
angle = np.random.uniform(0, 2 * np.pi, int(N))
rmax = self.xmax_2d * rescale
r = np.random.uniform(0, rmax ** 2, int(N))
x_arcsec = r ** .5 * np.cos(angle)
y_arcsec = r ** .5 * np.sin(angle)
x_arcsec += center_x
y_arcsec += center_y
x_kpc, y_kpc = x_arcsec * self._rs_kpc, y_arcsec * self._rs_kpc
u = np.random.uniform(self._xmin, 0.999999, len(x_kpc))
z_units_rs = self.cdf(u)
z_kpc = z_units_rs * self._rs_kpc
return np.array(x_kpc), np.array(y_kpc), np.hypot(x_kpc, y_kpc), np.sqrt(x_kpc ** 2 + y_kpc**2 + z_kpc ** 2)
class NFW3DFast(object):
"""
Same as NFW3D, but uses pre-computed CDFs to do the sampling much faster, but still slower than UniformNFW
"""
def __init__(self, Rs, rmax2d, rmax3d):
self._Rs = Rs
self._rmax2d = rmax2d
self._rmax3d = rmax3d
self._xc = 0.001 * Rs
self._xmin = 0.01 * rmax2d / self._Rs
self._c = rmax3d/Rs
self.sampler = FastNFW(local_path)
def _draw(self, N, zlens):
x, y, z = self.sampler.sample(self._c, N)
r2 = np.sqrt(x**2 + y**2)
keep = np.where(r2 <= self._rmax2d/self._Rs)[0]
return x[keep], y[keep], z[keep]
def draw(self, N, zlens):
x, y, z = self._draw(N, zlens)
while len(x) < N:
_x, _y, _z = self._draw(N, zlens)
x = np.append(x, _x)
y = np.append(y, _y)
z = np.append(z, _z)
x_kpc = x[0:N] * self._Rs
y_kpc = y[0:N] * self._Rs
z_kpc = z[0:N] * self._Rs
r2_kpc = np.sqrt(x_kpc ** 2 + y_kpc**2)
r3_kpc = np.sqrt(r2_kpc ** 2 + z_kpc**2)
return x_kpc, y_kpc, r3_kpc
class NFW3DCoreRejectionSampling(object):
"""
Samples from a cored NFW profile using rejection sampling; this can be slow. Depending on the input parameters
The probability of rendering at halo at 3D position r is proportional to
p(r) ~ 1/ [ (r + r_c) * (r + r_s)^2 ]
"""
def __init__(self, Rs, rmax2d, rmax3d, r_core_parent):
"""
:param Rs: the scale radius of the host dark matter halo [kpc]
:param rmax2d: the maximum projected 2D radius where halos are rendered [arcsec]
:param rmax3d: the virial radius of the host dark matter halo [kpc]
:param r_core_parent: the core radius of the host dark matter halo [kpc]
"""
self._Rs = Rs
self._rmax2d = rmax2d
self._rmax3d = rmax3d
self._x3dmax = rmax3d / Rs
self._xcore = r_core_parent / Rs
self.nfw = NFW3DFast(Rs, rmax2d, rmax3d)
self._xmin = self.nfw._xmin
self._norm = ((self._xmin + self._xcore) * (1 + self._xmin) ** 2) ** -1
def _eval_rho_core(self, x, xcore):
if isinstance(x, int) or isinstance(x, float):
x = max(self._xmin, x)
else:
x[
|
np.where(x < self._xmin)
|
numpy.where
|
import numpy as np
import pandas as pd
import itertools
import argparse
import json
import os
import sys
import time
from tqdm import tqdm
import scipy
from aced_hmm.simulator import run_forecast__python
from aced_hmm.print_parameters import pprint_params
def run_simulation(random_seed, output_file, config_dict, states, func_name, approximate=None):
prng =
|
np.random.RandomState(random_seed)
|
numpy.random.RandomState
|
import numpy as np
import soundfile as sf
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import os
sz=1024
offsets = np.array([0])
training_fn_pairs = [
["../data/raw_waves/gt_notes.wav", "../data/raw_waves/gt_notes_dist.wav"],
["../data/raw_waves/gt_many_notes.wav", "../data/raw_waves/gt_many_notes_dist.wav"]
]
evaluation_fn_pairs = [
["../data/raw_waves/vivaldi_spring.wav", "../data/raw_waves/vivaldi_spring_dist.wav"]
]
def readWave(path_src, path_cnv):
data_src, samplerate_src = sf.read(path_src)
data_cnv, samplerate_cnv = sf.read(path_cnv)
data_len = min(data_src.shape[0], data_cnv.shape[0])
print("read ", path_src, " and ", path_cnv)
print("len=", data_len)
data_src = np.array(data_src)
data_cnv = np.array(data_cnv)
data_src_trim = data_src[:data_len]
data_cnv_trim = data_cnv[:data_len]
return data_src_trim, data_cnv_trim
def outputWave(input_fn_pairs, output_path, is_training):
file_counter = 0
for fnp in input_fn_pairs:
data_src, data_cnv = readWave(fnp[0], fnp[1])
loops = int(len(data_src)/sz) - 1
for i in np.arange(loops):
for j in np.arange(offsets.shape[0]):
data =
|
np.zeros([2,sz], dtype=np.float32)
|
numpy.zeros
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains models representing polynomials and polynomial series.
"""
from collections import OrderedDict
import numpy as np
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
from .utils import poly_map_domain, comb
from ..utils import indent, check_broadcast
from ..units import Quantity
__all__ = [
'Chebyshev1D', 'Chebyshev2D', 'Hermite1D', 'Hermite2D',
'InverseSIP', 'Legendre1D', 'Legendre2D', 'Polynomial1D',
'Polynomial2D', 'SIP', 'OrthoPolynomialBase',
'PolynomialModel'
]
class PolynomialBase(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
def __getattr__(self, attr):
if self._param_names and attr in self._param_names:
return Parameter(attr, default=0.0, model=self)
raise AttributeError(attr)
def __setattr__(self, attr, value):
# TODO: Support a means of specifying default values for coefficients
# Check for self._ndim first--if it hasn't been defined then the
# instance hasn't been initialized yet and self.param_names probably
# won't work.
# This has to vaguely duplicate the functionality of
# Parameter.__set__.
# TODO: I wonder if there might be a way around that though...
if attr[0] != '_' and self._param_names and attr in self._param_names:
param = Parameter(attr, default=0.0, model=self)
# This is a little hackish, but we can actually reuse the
# Parameter.__set__ method here
param.__set__(self, value)
else:
super().__setattr__(attr, value)
class PolynomialModel(PolynomialBase):
"""
Base class for polynomial models.
Its main purpose is to determine how many coefficients are needed
based on the polynomial order and dimension and to provide their
default values, names and ordering.
"""
def __init__(self, degree, n_models=None, model_set_axis=None,
name=None, meta=None, **params):
self._degree = degree
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name,
meta=meta, **params)
def __repr__(self):
return self._format_repr([self.degree])
def __str__(self):
return self._format_str([('Degree', self.degree)])
@property
def degree(self):
"""Degree of polynomial."""
return self._degree
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one parameter set
"""
if self.degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
nmixed = comb(self.degree, ndim)
else:
nmixed = 0
numc = self.degree * ndim + nmixed + 1
return numc
def _invlex(self):
c = []
lencoeff = self.degree + 1
for i in range(lencoeff):
for j in range(lencoeff):
if i + j <= self.degree:
c.append((j, i))
return c[::-1]
def _generate_coeff_names(self, ndim):
names = []
if ndim == 1:
for n in range(self._order):
names.append('c{0}'.format(n))
else:
for i in range(self.degree + 1):
names.append('c{0}_{1}'.format(i, 0))
for i in range(1, self.degree + 1):
names.append('c{0}_{1}'.format(0, i))
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j < self.degree + 1:
names.append('c{0}_{1}'.format(i, j))
return tuple(names)
class OrthoPolynomialBase(PolynomialBase):
"""
This is a base class for the 2D Chebyshev and Legendre models.
The polynomials implemented here require a maximum degree in x and y.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
x_window : list or None, optional
range of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
{keyword: value} pairs, representing {parameter_name: value}
"""
inputs = ('x', 'y')
outputs = ('z',)
def __init__(self, x_degree, y_degree, x_domain=None, x_window=None,
y_domain=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
# TODO: Perhaps some of these other parameters should be properties?
# TODO: An awful lot of the functionality in this method is still
# shared by PolynomialModel; perhaps some of it can be generalized in
# PolynomialBase
self.x_degree = x_degree
self.y_degree = y_degree
self._order = self.get_num_coeff()
self.x_domain = x_domain
self.y_domain = y_domain
self.x_window = x_window
self.y_window = y_window
self._param_names = self._generate_coeff_names()
super().__init__(
n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def __repr__(self):
return self._format_repr([self.x_degree, self.y_degree])
def __str__(self):
return self._format_str(
[('X-Degree', self.x_degree),
('Y-Degree', self.y_degree)])
def get_num_coeff(self):
"""
Determine how many coefficients are needed
Returns
-------
numc : int
number of coefficients
"""
return (self.x_degree + 1) * (self.y_degree + 1)
def _invlex(self):
# TODO: This is a very slow way to do this; fix it and related methods
# like _alpha
c = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
c.append((i, j))
return np.array(c[::-1])
def invlex_coeff(self, coeffs):
invlex_coeffs = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
name = 'c{0}_{1}'.format(i, j)
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return np.array(invlex_coeffs[::-1])
def _alpha(self):
invlexdeg = self._invlex()
invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1
nx = self.x_degree + 1
ny = self.y_degree + 1
alpha = np.zeros((ny * nx + 3, ny + nx))
for n in range(len(invlexdeg)):
alpha[n][invlexdeg[n]] = [1, 1]
alpha[-2, 0] = 1
alpha[-3, nx] = 1
return alpha
def imhorner(self, x, y, coeff):
_coeff = list(coeff)
_coeff.extend([0, 0, 0])
alpha = self._alpha()
r0 = _coeff[0]
nalpha = len(alpha)
karr = np.diff(alpha, axis=0)
kfunc = self._fcache(x, y)
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
nterms = x_terms + y_terms
for n in range(1, nterms + 1 + 3):
setattr(self, 'r' + str(n), 0.)
for n in range(1, nalpha):
k = karr[n - 1].nonzero()[0].max() + 1
rsum = 0
for i in range(1, k + 1):
rsum = rsum + getattr(self, 'r' + str(i))
val = kfunc[k - 1] * (r0 + rsum)
setattr(self, 'r' + str(k), val)
r0 = _coeff[n]
for i in range(1, k):
setattr(self, 'r' + str(i), 0.)
result = r0
for i in range(1, nterms + 1 + 3):
result = result + getattr(self, 'r' + str(i))
return result
def _generate_coeff_names(self):
names = []
for j in range(self.y_degree + 1):
for i in range(self.x_degree + 1):
names.append('c{0}_{1}'.format(i, j))
return tuple(names)
def _fcache(self, x, y):
# TODO: Write a docstring explaining the actual purpose of this method
"""To be implemented by subclasses"""
raise NotImplementedError("Subclasses should implement this")
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
return self.imhorner(x, y, invcoeff)
def prepare_inputs(self, x, y, **kwargs):
inputs, format_info = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), format_info
class Chebyshev1D(PolynomialModel):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(PolynomialModel, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
class Hermite1D(PolynomialModel):
r"""
Univariate Hermite series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x)
where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind").
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units,
4x^2 and -2 would have incompatible units.
"""
inputs = ('x')
outputs = ('y')
_separable = True
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = 2 * x
for i in range(2, self.degree + 1):
v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(PolynomialModel, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
x2 = x * 2
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
temp = c0
nd = nd - 1
c0 = coeffs[-i] - c1 * (2 * (nd - 1))
c1 = temp + c1 * x2
return c0 + c1 * x2
class Hermite2D(OrthoPolynomialBase):
r"""
Bivariate Hermite series.
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y)
where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x and/or y - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Hermite polynomial (H2) is 4x^2-2, but if x was
specified with units, 4x^2 and -2 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Hermite functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] =
|
np.ones(x.shape)
|
numpy.ones
|
from multiprocessing.sharedctypes import Value
from re import L
import numpy as np
import os
from tqdm import tqdm
from collections import deque as dq
from pkg_resources import resource_filename
from ptsnet.arrays import ObjArray, Table2D
from ptsnet.simulation.constants import COEFF_TOL, STEP_JOBS, INIT_JOBS, COMM_JOBS
from ptsnet.epanet.util import EN
from ptsnet.utils.data import define_curve, is_array
from ptsnet.utils.io import run_shell, get_root_path
from ptsnet.simulation.init import Initializator
from ptsnet.parallel.comm import CommManager
from ptsnet.parallel.worker import Worker
from ptsnet.results.storage import StorageManager
from ptsnet.results.workspaces import new_workspace_name, list_workspaces, num_workspaces
from ptsnet.simulation.constants import NODE_RESULTS, PIPE_END_RESULTS, PIPE_START_RESULTS, SURGE_PROTECTION_TYPES
from ptsnet.profiler.profiler import Profiler
class PTSNETSettings:
def __init__(self,
time_step : float = 0.01,
duration: float = 20,
warnings_on: bool = False,
parallel : bool = False,
gpu : bool = False,
skip_compatibility_check : bool = False,
show_progress = False,
save_results = True,
profiler_on = False,
period = 0,
default_wave_speed = 1000,
wave_speed_file_path = None,
delimiter = ',',
wave_speed_method = 'optimal',
_super = None):
self._super = _super
self.settingsOK = False
self.time_step = time_step
self.duration = duration
self.time_steps = int(round(duration/time_step))
self.warnings_on = warnings_on
self.parallel = parallel
self.gpu = gpu
self.skip_compatibility_check = skip_compatibility_check
self.show_progress = show_progress
self.save_results = save_results
self.profiler_on = profiler_on,
self.defined_wave_speeds = False
self.active_persistance = False
self.blocked = False
self.period = period
self.default_wave_speed = default_wave_speed
self.wave_speed_file_path = wave_speed_file_path
self.delimiter = delimiter
self.wave_speed_method = wave_speed_method
self.set_default()
self.settingsOK = True
self.num_points = None
def __repr__(self):
rep = "\nSimulation settings:\n\n"
for setting, val in self.__dict__.items():
if setting == '_super':
continue
rep += '%s: %s\n' % (setting, str(val))
return rep
def __setattr__(self, name, value):
try:
if self.__getattribute__(name) != value:
if name != 'settingsOK':
if self.warnings_on:
print("Warning: '%s' value has been changed to %s" % (name, str(value)))
except:
pass
if 'settingsOK' in self.__dict__:
if self.settingsOK:
if name == 'duration':
self.time_steps = int(round(value/self.time_step))
elif name == 'time_step':
if self.defined_wave_speeds:
raise ValueError("'%s' can not be modified since wave speeds have been defined" % name)
if self._super != None:
lens = sum([len(self._super.element_settings[stype]) for stype in self._super.SETTING_TYPES])
if lens > 0:
raise ValueError("'%s' can not be modified since settings have been defined" % name)
self.time_steps = int(round(self.duration/value))
object.__setattr__(self, name, value)
def set_default(self):
self.is_initialized = False
self.updated_settings = False
def to_dict(self):
l = {}
for setting, val in self.__dict__.items():
if setting == '_super':
continue
l[setting] = val
return l
class PTSNETCurve:
CURVE_TYPES = ('valve', 'pump',)
def __init__(self, X, Y, type_):
self.elements = []
if type_ not in self.CURVE_TYPES:
raise ValueError("type '%s' is not valid, use ('" % type_ + "', '".join(self.CURVE_TYPES) + "')")
self.type = type_
self.X = np.array(X)
self.Y = np.array(Y)
order = np.argsort(self.X)
self.X = self.X[order]
self.Y = self.Y[order]
self.fun = define_curve(self.X, self.Y)
def _add_element(self, element):
if is_array(element):
for e in element:
if not element in self.elements:
self.elements.append(e)
else:
if not element in self.elements:
self.elements.append(element)
def __call__(self, value):
return self.fun(value)
def __len__(self):
return len(self.elements)
class ElementSettings:
ERROR_MSG = "the simulation has started, settings can not be added/modified"
def __init__(self, _super):
self.values = []
self.elements = []
self.updated = False
self.activation_times = None
self.activation_indices = None
self.is_sorted = False
self._super = _super
def __len__(self):
return len(self.elements)
def _dump_settings(self, element_index, X, Y):
X =
|
np.array(X)
|
numpy.array
|
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.augmenters import blend
from imgaug.testutils import keypoints_equal, reseed
def main():
time_start = time.time()
test_blend_alpha()
test_Alpha()
test_AlphaElementwise()
# TODO SimplexNoiseAlpha
# TODO FrequencyNoiseAlpha
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_blend_alpha():
img_fg = np.full((3, 3, 1), 0, dtype=bool)
img_bg = np.full((3, 3, 1), 1, dtype=bool)
img_blend = blend.blend_alpha(img_fg, img_bg, 1.0, eps=0)
assert img_blend.dtype.name == np.dtype(np.bool_)
assert img_blend.shape == (3, 3, 1)
assert np.all(img_blend == 0)
img_fg = np.full((3, 3, 1), 0, dtype=bool)
img_bg = np.full((3, 3, 1), 1, dtype=bool)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.0, eps=0)
assert img_blend.dtype.name == np.dtype(np.bool_)
assert img_blend.shape == (3, 3, 1)
assert np.all(img_blend == 1)
img_fg = np.full((3, 3, 1), 0, dtype=bool)
img_bg = np.full((3, 3, 1), 1, dtype=bool)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.3, eps=0)
assert img_blend.dtype.name == np.dtype(np.bool_)
assert img_blend.shape == (3, 3, 1)
assert np.all(img_blend == 1)
img_fg = np.full((3, 3, 2), 0, dtype=bool)
img_bg = np.full((3, 3, 2), 1, dtype=bool)
img_blend = blend.blend_alpha(img_fg, img_bg, [1.0, 0.0], eps=0)
assert img_blend.dtype.name == np.dtype(np.bool_)
assert img_blend.shape == (3, 3, 2)
assert np.all(img_blend[:, :, 0] == 0)
assert np.all(img_blend[:, :, 1] == 1)
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
values = [
(0, 0),
(0, 10),
(10, 20),
(min_value, min_value),
(max_value, max_value),
(min_value, max_value),
(min_value, int(center_value)),
(int(center_value), max_value),
(int(center_value + 0.20 * max_value), max_value),
(int(center_value + 0.27 * max_value), max_value),
(int(center_value + 0.40 * max_value), max_value),
(min_value, 0),
(0, max_value)
]
values = values + [(v2, v1) for v1, v2 in values]
for v1, v2 in values:
img_fg = np.full((3, 3, 1), v1, dtype=dtype)
img_bg = np.full((3, 3, 1), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 1.0, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 1)
assert np.all(img_blend == dtype(v1))
img_fg = np.full((3, 3, 1), v1, dtype=dtype)
img_bg = np.full((3, 3, 1), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.99, eps=0.1)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 1)
assert np.all(img_blend == dtype(v1))
img_fg = np.full((3, 3, 1), v1, dtype=dtype)
img_bg = np.full((3, 3, 1), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.0, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 1)
assert np.all(img_blend == dtype(v2))
# TODO this test breaks for numpy <1.15 -- why?
for c in sm.xrange(3):
img_fg = np.full((3, 3, c), v1, dtype=dtype)
img_bg = np.full((3, 3, c), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.75, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, c)
for ci in sm.xrange(c):
v_blend = min(max(int(0.75*np.float128(v1) + 0.25*np.float128(v2)), min_value), max_value)
diff = v_blend - img_blend if v_blend > img_blend[0, 0, ci] else img_blend - v_blend
assert np.all(diff < 1.01)
img_fg = np.full((3, 3, 2), v1, dtype=dtype)
img_bg = np.full((3, 3, 2), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.75, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 2)
v_blend = min(max(int(0.75 * np.float128(v1) + 0.25 * np.float128(v2)), min_value), max_value)
diff = v_blend - img_blend if v_blend > img_blend[0, 0, 0] else img_blend - v_blend
assert np.all(diff < 1.01)
img_fg = np.full((3, 3, 2), v1, dtype=dtype)
img_bg = np.full((3, 3, 2), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, [1.0, 0.0], eps=0.1)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 2)
assert np.all(img_blend[:, :, 0] == dtype(v1))
assert np.all(img_blend[:, :, 1] == dtype(v2))
# elementwise, alphas.shape = (1, 2)
img_fg = np.full((1, 2, 3), v1, dtype=dtype)
img_bg = np.full((1, 2, 3), v2, dtype=dtype)
alphas = np.zeros((1, 2), dtype=np.float64)
alphas[:, :] = [1.0, 0.0]
img_blend = blend.blend_alpha(img_fg, img_bg, alphas, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (1, 2, 3)
assert np.all(img_blend[0, 0, :] == dtype(v1))
assert np.all(img_blend[0, 1, :] == dtype(v2))
# elementwise, alphas.shape = (1, 2, 1)
img_fg = np.full((1, 2, 3), v1, dtype=dtype)
img_bg = np.full((1, 2, 3), v2, dtype=dtype)
alphas = np.zeros((1, 2, 1), dtype=np.float64)
alphas[:, :, 0] = [1.0, 0.0]
img_blend = blend.blend_alpha(img_fg, img_bg, alphas, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (1, 2, 3)
assert np.all(img_blend[0, 0, :] == dtype(v1))
assert np.all(img_blend[0, 1, :] == dtype(v2))
# elementwise, alphas.shape = (1, 2, 3)
img_fg = np.full((1, 2, 3), v1, dtype=dtype)
img_bg = np.full((1, 2, 3), v2, dtype=dtype)
alphas = np.zeros((1, 2, 3), dtype=np.float64)
alphas[:, :, 0] = [1.0, 0.0]
alphas[:, :, 1] = [0.0, 1.0]
alphas[:, :, 2] = [1.0, 0.0]
img_blend = blend.blend_alpha(img_fg, img_bg, alphas, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (1, 2, 3)
assert np.all(img_blend[0, 0, [0, 2]] == dtype(v1))
assert np.all(img_blend[0, 1, [0, 2]] == dtype(v2))
assert np.all(img_blend[0, 0, 1] == dtype(v2))
assert np.all(img_blend[0, 1, 1] == dtype(v1))
for dtype in [np.float16, np.float32, np.float64]:
def _allclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.allclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
max_value = 1000 ** (isize - 1)
min_value = -max_value
center_value = 0
values = [
(0, 0),
(0, 10),
(10, 20),
(min_value, min_value),
(max_value, max_value),
(min_value, max_value),
(min_value, center_value),
(center_value, max_value),
(center_value + 0.20 * max_value, max_value),
(center_value + 0.27 * max_value, max_value),
(center_value + 0.40 * max_value, max_value),
(min_value, 0),
(0, max_value)
]
values = values + [(v2, v1) for v1, v2 in values]
max_float_dt = np.float128
for v1, v2 in values:
img_fg = np.full((3, 3, 1), v1, dtype=dtype)
img_bg = np.full((3, 3, 1), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 1.0, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 1)
assert _allclose(img_blend, max_float_dt(v1))
img_fg = np.full((3, 3, 1), v1, dtype=dtype)
img_bg = np.full((3, 3, 1), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.99, eps=0.1)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 1)
assert _allclose(img_blend, max_float_dt(v1))
img_fg = np.full((3, 3, 1), v1, dtype=dtype)
img_bg = np.full((3, 3, 1), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.0, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 1)
assert _allclose(img_blend, max_float_dt(v2))
for c in sm.xrange(3):
img_fg = np.full((3, 3, c), v1, dtype=dtype)
img_bg = np.full((3, 3, c), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, 0.75, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, c)
assert _allclose(img_blend, 0.75*max_float_dt(v1) + 0.25*max_float_dt(v2))
img_fg = np.full((3, 3, 2), v1, dtype=dtype)
img_bg = np.full((3, 3, 2), v2, dtype=dtype)
img_blend = blend.blend_alpha(img_fg, img_bg, [1.0, 0.0], eps=0.1)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (3, 3, 2)
assert _allclose(img_blend[:, :, 0], max_float_dt(v1))
assert _allclose(img_blend[:, :, 1], max_float_dt(v2))
# elementwise, alphas.shape = (1, 2)
img_fg = np.full((1, 2, 3), v1, dtype=dtype)
img_bg = np.full((1, 2, 3), v2, dtype=dtype)
alphas = np.zeros((1, 2), dtype=np.float64)
alphas[:, :] = [1.0, 0.0]
img_blend = blend.blend_alpha(img_fg, img_bg, alphas, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (1, 2, 3)
assert _allclose(img_blend[0, 0, :], dtype(v1))
assert _allclose(img_blend[0, 1, :], dtype(v2))
# elementwise, alphas.shape = (1, 2, 1)
img_fg = np.full((1, 2, 3), v1, dtype=dtype)
img_bg = np.full((1, 2, 3), v2, dtype=dtype)
alphas = np.zeros((1, 2, 1), dtype=np.float64)
alphas[:, :, 0] = [1.0, 0.0]
img_blend = blend.blend_alpha(img_fg, img_bg, alphas, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (1, 2, 3)
assert _allclose(img_blend[0, 0, :], dtype(v1))
assert _allclose(img_blend[0, 1, :], dtype(v2))
# elementwise, alphas.shape = (1, 2, 3)
img_fg = np.full((1, 2, 3), v1, dtype=dtype)
img_bg = np.full((1, 2, 3), v2, dtype=dtype)
alphas = np.zeros((1, 2, 3), dtype=np.float64)
alphas[:, :, 0] = [1.0, 0.0]
alphas[:, :, 1] = [0.0, 1.0]
alphas[:, :, 2] = [1.0, 0.0]
img_blend = blend.blend_alpha(img_fg, img_bg, alphas, eps=0)
assert img_blend.dtype.name == np.dtype(dtype)
assert img_blend.shape == (1, 2, 3)
assert _allclose(img_blend[0, 0, [0, 2]], dtype(v1))
assert _allclose(img_blend[0, 1, [0, 2]], dtype(v2))
assert _allclose(img_blend[0, 0, 1], dtype(v2))
assert _allclose(img_blend[0, 1, 1], dtype(v1))
def test_Alpha():
reseed()
base_img = np.zeros((3, 3, 1), dtype=np.uint8)
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps_arr_r1 = np.float32([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0]])
heatmaps_arr_l1 = np.float32([[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 0.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
aug = iaa.Alpha(1, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = np.round(base_img + 10).astype(np.uint8)
assert np.allclose(observed, expected)
for per_channel in [False, True]:
aug = iaa.Alpha(1, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"x": -1}),
per_channel=per_channel)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert 0 - 1e-6 < heatmaps.min_value < 0 + 1e-6
assert 1 - 1e-6 < heatmaps.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_r1)
aug = iaa.Alpha(0, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = np.round(base_img + 20).astype(np.uint8)
assert np.allclose(observed, expected)
for per_channel in [False, True]:
aug = iaa.Alpha(0, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"x": -1}),
per_channel=per_channel)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert 0 - 1e-6 < heatmaps.min_value < 0 + 1e-6
assert 1 - 1e-6 < heatmaps.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_l1)
aug = iaa.Alpha(0.75, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = np.round(base_img + 0.75 * 10 + 0.25 * 20).astype(np.uint8)
assert np.allclose(observed, expected)
aug = iaa.Alpha(0.75, None, iaa.Add(20))
observed = aug.augment_image(base_img + 10)
expected = np.round(base_img + 0.75 * 10 + 0.25 * (10 + 20)).astype(np.uint8)
assert np.allclose(observed, expected)
aug = iaa.Alpha(0.75, iaa.Add(10), None)
observed = aug.augment_image(base_img + 10)
expected = np.round(base_img + 0.75 * (10 + 10) + 0.25 * 10).astype(np.uint8)
assert np.allclose(observed, expected)
base_img = np.zeros((1, 2, 1), dtype=np.uint8)
nb_iterations = 1000
aug = iaa.Alpha((0.0, 1.0), iaa.Add(10), iaa.Add(110))
values = []
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
observed_val = np.round(np.average(observed)) - 10
values.append(observed_val / 100)
nb_bins = 5
hist, _ = np.histogram(values, bins=nb_bins, range=(0.0, 1.0), density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / nb_iterations
assert density_expected - density_tolerance < density < density_expected + density_tolerance
# bad datatype for factor
got_exception = False
try:
_ = iaa.Alpha(False, iaa.Add(10), None)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# per_channel
aug = iaa.Alpha(1.0, iaa.Add((0, 100), per_channel=True), None, per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 1000), dtype=np.uint8))
uq = np.unique(observed)
assert len(uq) > 1
assert np.max(observed) > 80
assert np.min(observed) < 20
aug = iaa.Alpha((0.0, 1.0), iaa.Add(100), None, per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 1000), dtype=np.uint8))
uq = np.unique(observed)
assert len(uq) > 1
assert np.max(observed) > 80
assert np.min(observed) < 20
aug = iaa.Alpha((0.0, 1.0), iaa.Add(100), iaa.Add(0), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) > 1:
seen[1] += 1
else:
assert False
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
# bad datatype for per_channel
got_exception = False
try:
_ = iaa.Alpha(0.5, iaa.Add(10), None, per_channel="test")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# propagating
aug = iaa.Alpha(0.5, iaa.Add(100), iaa.Add(50), name="AlphaTest")
def propagator(images, augmenter, parents, default):
if "Alpha" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(propagator=propagator)
image =
|
np.zeros((10, 10, 3), dtype=np.uint8)
|
numpy.zeros
|
import numpy as np
from collections import OrderedDict
from robolearn_envs.pybullet.core.bullet_env import BulletEnv
from gym.spaces import Box
from robolearn_envs.pybullet.centauro.centauro \
import Centauro
from robolearn_envs.pybullet.common import GoalArrow
from robolearn_envs.pybullet.common import Cylinder
from robolearn_envs.pybullet.common import Drill
from robolearn_envs.pybullet.common import Plane
from robolearn_envs.pybullet.common import MetalTable1
from pybullet import getEulerFromQuaternion
from robolearn_envs.utils.transformations import pose_transform
from robolearn_envs.utils.transformations import compute_cartesian_error
from robolearn_envs.utils.transformations import create_quat_pose
from robolearn_envs.utils.transformations import euler_to_quat
class CentauroObstacleEnv(BulletEnv):
def __init__(self,
is_render=False,
active_joints='RA',
control_mode='joint_tasktorque',
obs_distances=True,
goal_tolerance=0.02,
obstacle='cylinder',
max_time=None,
sim_timestep=1/240.,
frame_skip=1,
seed=None,
):
"""
Centauro robot seeking to move the righthand to a target position
while avoiding an obstacle.
"""
super(CentauroObstacleEnv,
self).__init__(sim_timestep=sim_timestep, frameskip=frame_skip,
is_render=is_render, seed=seed)
self._use_obs_distances = obs_distances
# Environment/Scene
self._pose_dof = 7 # [x, y, z, ow, ox, oy, oz]
self._diff_dof = 6 # [dx, dy, dz, dR, dP, dY]
# Plane
# self._plane = PlaneObject(plane_type='stone',
self._plane = Plane(plane_type='plane_simple',
color=None,
)
self.add_to_sim(self._plane)
# Table
self._table = MetalTable1(
init_pos=(1.0, 0., 0.),
)
self.add_to_sim(self._table)
# Obstacle
if obstacle == 'cylinder':
self._obstacle_fixed_height = 0.75 + 0.10 + 0.001
self._obstacle = Cylinder(
init_pos=(1.0, 0.0, self._obstacle_fixed_height),
cylinder_type='T',
color='red',
fixed_base=False,
)
else:
self._obstacle_fixed_height = 0.8815
self._obstacle = Drill(
init_pos=(1.0, 0.0, self._obstacle_fixed_height),
color='red',
fixed_base=False,
)
self.add_to_sim(self._obstacle)
obstacle_pos_offset = [0.08, 0.25, 0.0]
obstacle_ori_offset = [0, 0.7071068, 0, 0.7071068]
self.obstacle_offset = np.concatenate((obstacle_pos_offset,
obstacle_ori_offset))
self._init_obstacle_pose = np.zeros(7)
self._prev_obst_hand_ori_diff = None
# Target
self.goal = GoalArrow(
init_pos=(1., 0., 0.8),
tolerance=0.025,
color='green',
)
self.add_to_sim(self.goal)
self._target_offset_mean = np.array([0.10, 0.4, 0.0,
0.0, 0.0, 0.0])
target_ori_offset = euler_to_quat(self._target_offset_mean[3:])
self.target_offset = np.concatenate((self._target_offset_mean[:3],
target_ori_offset))
self._init_goal_pose = np.zeros(self._pose_dof)
self._prev_tgt_hand_ori_diff = None
# Robot
init_pos = [0, 0, 0.7975]
collision = True
fixed_base = True
# fixed_base = False
self._robot = Centauro(
init_config=None,
init_pos=init_pos,
control_mode=control_mode,
self_collision=collision,
active_joints=active_joints,
robot_model=None,
fixed_base=fixed_base,
)
self.add_to_sim(self._robot)
init_config = self._robot.initial_configuration
init_config[9] = np.deg2rad(45)
# init_config[12] = np.deg2rad(15)
# init_config[13] = np.deg2rad(32)
# init_config[14] = np.deg2rad(90)
self._init_robot_config_mean = init_config
self._init_robot_config_std = np.zeros_like(init_config)
self._init_robot_config = self._init_robot_config_mean
# Reset environment so we can get info from pybullet
self.set_rendering(False)
self._is_env_instantiation_complete = False
self.reset()
self._is_render = is_render
# ACTUATION
self.action_space = Box(
self._robot.low_action_bounds,
self._robot.high_action_bounds,
dtype=np.float32
)
# OBSERVATION
robot_state_dim = self._robot.observation_dim # joint pos/vel
# Optitrack
if obs_distances:
optitrack_dim = self._diff_dof*2
else:
optitrack_dim = self._pose_dof*3
obs_dim = robot_state_dim + optitrack_dim
self.observation_space = Box(
low=-np.inf, high=np.inf, shape=(obs_dim,),
dtype=np.float32
)
self._observation = OrderedDict(
robot_state=np.zeros(robot_state_dim),
)
if obs_distances:
self._observation['goal_ee_diff'] = np.zeros(self._diff_dof)
self._observation['obstacle_ee_diff'] = np.zeros(self._diff_dof)
else:
self._observation['goal_pose'] = np.zeros(self._pose_dof)
self._observation['obstacle_pose'] = np.zeros(self._pose_dof)
self._observation['ee_pose'] = np.zeros(self._pose_dof)
# STATE
state_dim = robot_state_dim + self._pose_dof*3
self.state_space = Box(
low=-np.inf, high=np.inf, shape=(state_dim,),
dtype=np.float32
)
self._state = OrderedDict(
robot_state=np.zeros(robot_state_dim),
)
self._state['goal_pose'] = np.zeros(self._pose_dof)
self._state['obstacle_pose'] =
|
np.zeros(self._pose_dof)
|
numpy.zeros
|
import numpy as np
import math
from robot.robot_core import Robot, Robotworld, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
# define scenario properties
num_agents = 1
num_objects = 0
num_joints = 1
arm_length = 0.35
# create world
world = Robotworld()
# add agents
world.agents = [Robot() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
# add objects
world.objects = [Landmark() for i in range(num_objects)]
for i, object in enumerate(world.objects):
object.name = 'object %d' % i
# add goals
world.goals = [Robot() for i in range(1)]
for i, goal in enumerate(world.goals):
goal.name = 'end_pos'
# add world specifications
world.num_joints = num_joints
world.arm_length = arm_length
# reset world
self.reset_world(world)
return world
def reset_world(self, world):
# set agent properties
origins = world.robot_position(len(world.agents))
for i, agent in enumerate(world.agents):
agent.color = np.array([0.25,0.25,0.25])
agent.state.lengths = world.arm_length * np.ones(world.num_joints)
agent.state.angles = (2 * np.random.rand(world.num_joints) - 1) * math.pi
agent.state.p_pos = np.array(origins[i][:])
# set properties for goal
world.goals[0].color = np.array([0.5, 0.1, 0.1])
world.goals[0].state.p_pos = np.array(origins[i][:])
world.goals[0].state.lengths = world.arm_length * np.ones(world.num_joints)
world.goals[0].state.angles = (2 * np.random.rand(world.num_joints) - 1) * math.pi
def reward(self, agent, world):
# reward is based on polar difference between the agent and goal position in domain [0, 3.14]
reward =
|
np.absolute(world.goals[0].state.angles[0] - agent.state.angles[0])
|
numpy.absolute
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import locale
import os
import socket
from datetime import datetime
import pytest
import numpy as np
from astropy.utils import data, misc
def test_isiterable():
assert misc.isiterable(2) is False
assert misc.isiterable([2]) is True
assert misc.isiterable([1, 2, 3]) is True
assert misc.isiterable(np.array(2)) is False
assert misc.isiterable(np.array([1, 2, 3])) is True
def test_signal_number_to_name_no_failure():
# Regression test for #5340: ensure signal_number_to_name throws no
# AttributeError (it used ".iteritems()" which was removed in Python3).
misc.signal_number_to_name(0)
@pytest.mark.remote_data
def test_api_lookup():
try:
strurl = misc.find_api_page('astropy.utils.misc', 'dev', False, timeout=3)
objurl = misc.find_api_page(misc, 'dev', False, timeout=3)
except socket.timeout:
if os.environ.get('CI', False):
pytest.xfail('Timed out in CI')
else:
raise
assert strurl == objurl
assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc' # noqa
# Try a non-dev version
objurl = misc.find_api_page(misc, 'v3.2.1', False, timeout=3)
assert objurl == 'https://docs.astropy.org/en/v3.2.1/utils/index.html#module-astropy.utils.misc' # noqa
def test_skip_hidden():
path = data._find_pkg_data_path('data')
for root, dirs, files in os.walk(path):
assert '.hidden_file.txt' in files
assert 'local.dat' in files
# break after the first level since the data dir contains some other
# subdirectories that don't have these files
break
for root, dirs, files in misc.walk_skip_hidden(path):
assert '.hidden_file.txt' not in files
assert 'local.dat' in files
break
def test_JsonCustomEncoder():
from astropy import units as u
assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]'
assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]'
assert json.dumps(set([1, 2, 1]), cls=misc.JsonCustomEncoder) == '[1, 2]'
assert json.dumps(b'hello world \xc3\x85',
cls=misc.JsonCustomEncoder) == '"hello world \\u00c5"'
assert json.dumps({1: 2},
cls=misc.JsonCustomEncoder) == '{"1": 2}' # default
assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}'
# Quantities
tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp)
tmpd = {"a": {"unit": "cm", "value": 5.0}}
assert newd == tmpd
tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp2)
tmpd = {"a": {"unit": "cm", "value": [0., 1.]}}
assert newd == tmpd
tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp3)
tmpd = {"a": {"unit": "erg / s", "value": [0., 1.]}}
assert newd == tmpd
def test_set_locale():
# First, test if the required locales are available
current = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, 'en_US')
locale.setlocale(locale.LC_ALL, 'de_DE')
except locale.Error as e:
pytest.skip(f'Locale error: {e}')
finally:
locale.setlocale(locale.LC_ALL, current)
date = datetime(2000, 10, 1, 0, 0, 0)
day_mon = date.strftime('%a, %b')
with misc._set_locale('en_US'):
assert date.strftime('%a, %b') == 'Sun, Oct'
with misc._set_locale('de_DE'):
assert date.strftime('%a, %b') == 'So, Okt'
# Back to original
assert date.strftime('%a, %b') == day_mon
with misc._set_locale(current):
assert date.strftime('%a, %b') == day_mon
def test_dtype_bytes_or_chars():
assert misc.dtype_bytes_or_chars(
|
np.dtype(np.float64)
|
numpy.dtype
|
from keras.models import Sequential
from keras.layers import Dense
from keras import utils
from keras import optimizers
import scipy.io as sio
import numpy as np
from sklearn import preprocessing
import csv
from keras import backend as K
import matplotlib.pyplot as plt
x_temp = []
y_temp = []
list_size = 0
with open('/Users/jiazhengsun/Desktop/nn-contact/data/NN-contact-force/train_data/rect_c2_sym.csv') as csvfile:
raw_data = csv.reader(csvfile, delimiter='\n')
for row in raw_data:
data_string = row[0]
data_digit_str = data_string.split(',')
list_size = len(data_digit_str)
#print(data_digit_str[list_size - 2])
x_temp.append(data_digit_str[:list_size - 2])
y_temp.append(data_digit_str[list_size - 2])
x_total = []
y_total = []
for i in range(len(x_temp)):
y_total.append(int(y_temp[i]))
indi_data_set = []
for item in x_temp[i]:
#x_total.append(float(item))
indi_data_set.append(float(item))
x_total.append(indi_data_set)
y_total = np.transpose(y_total)
one_hot_labels = utils.to_categorical(y_total, num_classes=3)
length = y_total.shape[0]
#x_total: 5 elements per array, NUMSAM number of arrays. So dim = 5.
#y_total: 1 array, NUMSAM labels. To make it match x_total, need to transpose it.
x_train = x_total[:int(length * 0.8)]
x_test = x_total[int(length*0.8):]
y_train = one_hot_labels[:int(length* 0.8)]
y_test = one_hot_labels[int(length*0.8):]
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_test =
|
np.array(y_test)
|
numpy.array
|
import utils as u
import QUtils as qu
import numpy as np
import multiprocessing as mp
import scipy.fftpack as sp
import time
import matplotlib.pyplot as plt
import pickle
from numpy import linalg as LA
import scipy.stats as st
import sys
import yt; yt.enable_parallelism(); is_root = yt.is_root();
end = lambda start, id: print(f"Done with {id} in {time.time()-start:.4f} seconds")
simName = "FNS_r1"
simName = "test_r15_(0,30,30,15,0)"
decimate = 2
label = ""
PLOT = True
class figObj(object):
'''
This class stores all simulation metadata for figures
'''
def __init__(self):
self.meta = None
self.tags = None
self.N = None
self.dt = None
self.framesteps = None
self.IC = None
self.phi = None
self.name = None
self.fileNames_psi = None
self.indToTuple = None
self.tupleToInd = None
self.decimate = None
fo = figObj()
def str2sig(string):
'''
This function takes in a string with format '(A, B)' and returns a tuple
with (A, B)
Parameters
---------------------------------------------------------------------------
string: string
A string with format '(A, B)'
Returns
---------------------------------------------------------------------------
tuple: tuple
A tuple with (A, B)
'''
string = string.replace('(','')
string = string.replace(')','')
ints = string.split(",")
return (int(ints[0]), int(ints[1]) )
def setFigObj(name, decimate):
'''
This function populates the attributes of the instance of the figObj class
with values.
'''
# read in simulation parameters
meta = u.getMetaKno(name, dir = 'Data/', N = "N", dt = "dt", frames = "frames",
framesteps = "framesteps", IC = "IC", omega0 = "omega0", Lambda0 = "Lambda0")
fo.meta = meta
# sets the figure object with these parameters
# this is basically just so I can access them in the glocal scope
fo.name = name
fo.N = fo.meta["N"]
fo.dt = fo.meta["dt"]
fo.framsteps = fo.meta["framesteps"]
fo.IC = fo.meta["IC"]
fo.decimate = decimate
np.random.seed(1)
fo.phi = np.random.uniform(0, 2 * np.pi, fo.N)
# this is basically just to see how many time drops there were
fo.fileNames_psi = u.getNamesInds('Data/' + name + "/" + "psi" + fo.tags[0])
def offDiag(sig, psi_j, indToTuple_j, i):
'''
This function constructs the offdiagonal term of the (ANDREW TODO: CONFIRM)
special Hilbert space hamiltonian.
Parameters
---------------------------------------------------------------------------
psi_j: array-like
The (ANDREW TODO: CONFIRM) special hilbert space wavefunction
indToTuple_j: (ANDREW TODO)
Converts index of state to its tuple key
i: int
Index to state
'''
M = np.zeros((fo.N, fo.N)) + 0j
# creation op on mode b
for b in range(fo.N):
# annihilation op on mode a
for a in range(b+1, fo.N):
new_p = sig[1] + b - a
newsig = (sig[0], new_p)
tag_ = str(newsig)
if tag_ in fo.tags:
tupleToInd = None
with open("../Data/" + fo.name + "/" + "tupleToInd" + tag_ + ".pkl", 'rb') as f:
tupleToInd_ = pickle.load(f)
# load in the psi in a given special Hilbert space
fileNames_psi = u.getNamesInds("Data/" + fo.name + "/" + "psi" + tag_)
psi_ = np.load(fileNames_psi[i])
for j in range(len(psi_j)):
c_j = psi_j[j]
state_j = np.array(indToTuple_j[j])
state_i = state_j.copy()
state_i[a] = state_j[a] - 1
state_i[b] = state_j[b] + 1
if tuple(state_i) in tupleToInd_:
i_ = tupleToInd_[ tuple(state_i) ]
val_ = c_j
val_ *= np.sqrt(state_j[b] + 1)
val_ *= np.sqrt(state_j[a])
val_ *= np.conj(psi_[i_])
M[b,a] += val_
M[a,b] += np.conj(val_)
return M
def get_aa(sig, psi_j, indToTuple_j, i):
'''
This function constructs the aa operator for (ANDREW TODO)
Parameters
---------------------------------------------------------------------------
psi_j: array-like
The (ANDREW TODO: CONFIRM) special hilbert space wavefunction
indToTuple_j: (ANDREW TODO: what kind of thing is this?)
Converts index of state to its tuple key
i: int
Index to state
Returns
---------------------------------------------------------------------------
aa: array-like
The aa operator
'''
aa = np.zeros((fo.N, fo.N)) + 0j
for a1 in range(fo.N):
for a2 in range(a1,fo.N):
new_p = sig[1] - a1 - a2
newsig = (sig[0] - 2, new_p)
newTag = str(newsig)
if newTag in fo.tags:
tupleToInd = None
with open("../Data/" + fo.name + "/" + "tupleToInd" + newTag + ".pkl", 'rb') as f:
tupleToInd_ = pickle.load(f)
# load in the psi in a given special Hilbert space
fileNames_psi = u.getNamesInds("Data/" + fo.name + "/" + "psi" + newTag)
psi_ = np.load(fileNames_psi[i])
for j in range(len(psi_j)):
c_j = psi_j[j]
state_j = np.array(indToTuple_j[j])
state_i = state_j.copy()
state_i[a1] = state_i[a1] - 1
state_i[a2] = state_i[a2] - 1
if tuple(state_i) in tupleToInd_:
i_ = tupleToInd_[ tuple(state_i) ]
val_ = c_j
if a1 != a2:
val_ *= np.sqrt(state_j[a1])
val_ *= np.sqrt(state_j[a2])
val_ *= np.conj(psi_[i_])
aa[a1,a2] += val_
aa[a2,a1] += val_
else:
val_ *= np.sqrt(state_j[a1])
val_ *= np.sqrt(state_j[a2]-1)
val_ *= np.conj(psi_[i_])
aa[a1,a2] += val_
return aa
def get_a(sig, psi_j, indToTuple_j, i):
'''
This function constructs the a operator for (ANDREW TODO)
Parameters
---------------------------------------------------------------------------
psi_j: array-like
The (ANDREW TODO: CONFIRM) special hilbert space wavefunction
indToTuple_j: (ANDREW TODO: what kind of thing is this?)
Converts index of state to its tuple key
i: int
Index to state
Returns
---------------------------------------------------------------------------
a: array-like
The a operator
'''
a = np.zeros(fo.N) + 0j
for a1 in range(fo.N):
new_p = sig[1] - a1
newsig = (sig[0] - 1, new_p)
newTag = str(newsig)
if newTag in fo.tags:
tupleToInd = None
with open("../Data/" + fo.name + "/" + "tupleToInd" + newTag + ".pkl", 'rb') as f:
tupleToInd_ = pickle.load(f)
# load in the psi in a given special Hilbert space
fileNames_psi = u.getNamesInds("Data/" + fo.name + "/" + "psi" + newTag)
psi_ = np.load(fileNames_psi[i])
for j in range(len(psi_j)):
c_j = psi_j[j]
state_j = np.array(indToTuple_j[j])
state_i = state_j.copy()
state_i[a1] = state_i[a1] - 1
if tuple(state_i) in tupleToInd_:
i_ = tupleToInd_[ tuple(state_i) ]
val_ = c_j
val_ *= np.sqrt(state_j[a1])
val_ *= np.conj(psi_[i_])
a[a1] += val_
return a
def getN(psi_, indToTuple_):
'''
This function constructs the number operator for (ANDREW TODO)
Parameters
---------------------------------------------------------------------------
psi_: array-like
The (ANDREW TODO: CONFIRM) special hilbert space wavefunction
indToTuple_j: (ANDREW TODO: what kind of thing is this?)
Converts index of state to its tuple key
Returns
---------------------------------------------------------------------------
N: array-like
The N operator
'''
N = np.zeros(fo.N)
for j in range(len(indToTuple_)):
subState = np.array(indToTuple_[j])
c_j = psi_[j]
if np.abs(c_j)>0:
N += subState*np.abs(c_j)**2
return N
def analyzeTimeStep(i):
'''
This function finds all of the relevant summarizing quantities for each
timestep, e.g. number, eigenvalues, squeezing
Parameters
---------------------------------------------------------------------------
i: integer
Timestep to analyze
Returns
---------------------------------------------------------------------------
t: float
Current simulation time
N: (ANDREW TODO)
(ANDREW TODO)
M: array-like
(ANDREW TODO)
eigs: array-like
Eigenvalues of M matrix
aa: (ANDREW TODO)
(ANDREW TODO)
a: (ANDREW TODO)
(ANDREW TODO)
Q: float
Classical aproximation error tracker
'''
t = fo.dt*fo.framsteps*(i+1)
outputs = {}
for sto, tag_ in yt.parallel_objects( fo.tags , 0, storage=outputs, dynamic = True):
sys.stdout.flush()
sto.result_id = tag_
sig = str2sig(tag_)
#start = time.time()
# load in the psi in a given special Hilbert space
fileNames_psi = u.getNamesInds("Data/" + fo.name + "/" + "psi" + tag_)
psi_ = np.load(fileNames_psi[i])
#end(start, "Loading Psi")
#start = time.time()
indToTuple_ = None
with open("../Data/" + fo.name + "/" + "indToTuple" + tag_ + ".pkl", 'rb') as f:
indToTuple_ = pickle.load(f)
#end(start, "Loading indToTuple")
N_ = getN(psi_, indToTuple_)
M_ = np.zeros((fo.N, fo.N)) + 0j
aa_ = np.zeros((fo.N, fo.N)) + 0j
a_ = np.zeros(fo.N) + 0j
#start = time.time()
M_ += np.diag(N_)
#end(start, "adding diag")
#start = time.time()
M_ += offDiag(sig, psi_, indToTuple_, i)
#end(start, "adding offdiag")
#start = time.time()
aa_ += get_aa(sig, psi_, indToTuple_, i)
#end(start, "adding aa")
#start = time.time()
a_ += get_a(sig, psi_, indToTuple_, i)
#end(start, "adding aa")
sto.result = (N_, M_, aa_, a_)
N = np.zeros(fo.N)
M = np.zeros((fo.N, fo.N)) + 0j
aa = np.zeros((fo.N, fo.N)) + 0j
a = np.zeros(fo.N) + 0j
for i, key_ in enumerate(outputs.keys()):
#key_ = outputs.keys()[i]
N_, M_, aa_, a_ = outputs[key_]
N += N_
M += M_
aa += aa_
a += a_
eigs, _ = LA.eig(M)
eigs = qu.sortE(np.abs(eigs),eigs)
Q = np.sum( N - a*np.conj(a) ) / np.sum(fo.IC)
return t, N, M, eigs, aa, a, Q
def analyze():
'''
main() for analysis
'''
print("Starting di_analysis...")
time0 = time.time()
t, N, M, eigs, aa, a, Q = [], [], [], [], [], [], []
steps = len(range(0,len(fo.fileNames_psi), fo.decimate))
for i in range(0,len(fo.fileNames_psi), fo.decimate):
if is_root:
start = time.time()
t_, N_, M_, eigs_, aa_, a_, Q_ = analyzeTimeStep(i)
if is_root:
end(start, f"analyzing timestep {i}")
t.append(t_)
N.append(N_)
M.append(M_)
eigs.append(eigs_)
aa.append(aa_)
a.append(a_)
Q.append(Q_)
if is_root:
u.repeat_print(('%i hrs, %i mins, %i s remaining.\n' %u.remaining(i + 1, steps, time0)))
t = np.array(t)
N = np.array(N)
M = np.array(M)
eigs = np.array(eigs)
aa = np.array(aa)
a = np.array(a)
Q = np.array(Q)
return t, N, M, eigs, aa, a, Q
def makeNFig(t, N):
'''
Make number operator figure.
Parameters
---------------------------------------------------------------------------
t: float
Simulation time of a given timestep
N: array-like
Number operator
'''
fig, ax = plt.subplots(figsize = (6,6))
ax.set_xlabel(r'$t$')
for i in range(fo.N):
ax.plot(t, N[:,i], label = r'$E[\hat N_%i]$' %i)
ax.set_xlim(0, np.max(t) )
ax.set_ylim(0, np.max(N)*1.05 )
ax.legend()
fig.savefig("../Figs/" + fo.name + "_Num.pdf",bbox_inches = 'tight')
def makeMFig(t, lams):
'''
Make M operator figure.
Parameters
---------------------------------------------------------------------------
t: float
Simulation time of a given timestep
M: array-like
M operator
'''
fig, ax = plt.subplots(figsize = (6,6))
ax.set_xlabel(r'$t$')
for i in range(fo.N):
ax.plot(t, lams[:,i], label = r'$\lambda_%i$' %i)
ax.set_xlim(0, np.max(t) )
ax.set_ylim(0, np.max(lams)*1.05 )
ax.legend()
fig.savefig("../Figs/" + fo.name + "_lams.pdf",bbox_inches = 'tight')
def makePOQFig(t, eigs, Q):
'''
Make classical approximation error tracker figure.
Parameters
---------------------------------------------------------------------------
t: float
Simulation time of a given timestep
eigs: array-like
Eigenvalues of M operator
Q: array-like
Error tracking matrix
'''
fig, ax = plt.subplots(figsize = (6,6))
n = np.sum(fo.IC)
ax.set_xlabel(r'$t$')
PO = 1. - (eigs[:,-1] / n)
ax.plot(t, PO, label = r'$1 - \lambda_p/n_{tot}$')
ax.plot(t, Q, label = r'$Q$')
ax.set_xlim(0, np.max(t) )
ax.set_ylim(0, 1.05)
ax.legend()
fig.savefig("../Figs/" + fo.name + "_POQ.pdf",bbox_inches = 'tight')
def constructSq(a,aa,M):
'''
Construct squeezing operator
Parameters
---------------------------------------------------------------------------
a: array-like
The a operator
aa: array-like
The aa operator
M: array-like
The M operator
'''
N = len(a[0])
n = np.sum(np.diag(M[0]))
xi_p = np.zeros( (len(a), N) ) + 0j
aaS = np.zeros( len(a) ) + 0j
baS = np.zeros( len(a) ) + 0j
aS = np.zeros( len(a) ) + 0j
for i in range(len(a)):
M_ = M[i]
eigs, psis = LA.eig(M_)
psis = qu.sortVects(np.abs(eigs),psis)
eigs = qu.sortE(np.abs(eigs),eigs)
principle = psis[:,-1]
xi_p[i,:] = principle#*np.sqrt(eigs[-1])
for k in range(N):
k_ = (-1*k -1)%N
#xi_k = np.conj(xi_p[i,k_])
xi_k = xi_p[i,k]
aS[i] += xi_k*a[i,k]
for j in range(N):
j_ = (-1*j -1)%N
#xi_j = np.conj(xi_p[i,j_])
xi_j = xi_p[i,j]
aaS[i] += xi_k*xi_j*aa[i,k,j]
baS[i] += np.conj(xi_k)*xi_j*M[i,k,j]
dbaS = baS - np.conj(aS)*aS
daaS = aaS - aS*aS
return 1 + 2*dbaS - 2*np.abs(daaS)
def makeSqueezeFig(t, aa, M, a):
'''
'''
sq = constructSq(a, aa, M)
fig, ax = plt.subplots(figsize = (6,6))
ax.set_xlabel(r'$t$')
ax.plot(t, sq)
ax.text(.5,.9,r'$1 + 2 E[\delta \hat a_S^\dagger \delta \hat a_S ] - 2 |Var[\hat a_S]|$', ha='center', va='center', transform= ax.transAxes,
bbox = {'facecolor': 'white', 'pad': 5})
ax.plot([0, np.max(t)], [1,1], "r:")
r_pred = np.log(fo.n**(1/6.))
t_pred = .6/(5*.1)
ax.plot([t_pred], [np.exp(-2*r_pred)], 'ko')
index =
|
np.argmin(sq)
|
numpy.argmin
|
# Author: <NAME>, 2019
# License: BSD
import numpy as np
from polytopes import UnitCube
from polytopes import ProbabilitySimplex
from polytopes import Knapsack
from polytopes import OrderSimplex
from polytopes import Permutahedron
from polytopes import Birkhoff
from polytopes import CartesianProduct
from fw import project_fw
from fista import KL_project_fista
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
def test_Euclidean_projection():
rng =
|
np.random.RandomState(0)
|
numpy.random.RandomState
|
#
# Created by: <NAME>, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
class TestTbtrs(object):
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
seed(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.H @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
ab = rand(4, 2)
b = rand(2, 4)
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab = np.ones((ldab, n), dtype=float)
b = np.ones((ldb, nrhs), dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
assert_raises(Exception, tbtrs, ab, b)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.full(4, 3, dtype)
v = np.full(4, 4, dtype)
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read().decode())
class TestSytrd(object):
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_sytrd_with_zero_dim_array(self, dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd = get_lapack_funcs('sytrd', (A,))
assert_raises(ValueError, sytrd, A)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('n', (1, 3))
def test_sytrd(self, dtype, n):
A = np.zeros((n, n), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd(object):
@pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
def test_hetrd_with_zero_dim_array(self, complex_dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd = get_lapack_funcs('hetrd', (A,))
assert_raises(ValueError, hetrd, A)
@pytest.mark.parametrize('real_dtype,complex_dtype',
zip(REAL_DTYPES, COMPLEX_DTYPES))
@pytest.mark.parametrize('n', (1, 3))
def test_hetrd(self, n, real_dtype, complex_dtype):
A = np.zeros((n, n), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# test query lwork
for x in [0, 1]:
_, info = hetrd_lwork(n, lower=x)
assert_equal(info, 0)
# lwork returns complex which segfaults hetrd call (gh-10388)
# use the safe and recommended option
lwork = _compute_lwork(hetrd_lwork, n)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermitian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
def test_sygst():
seed(1234)
for ind, dtype in enumerate(REAL_DTYPES):
# DTYPES = <s,d> sygst
n = 10
potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
'syevd', 'sygvd'),
dtype=dtype)
A = rand(n, n).astype(dtype)
A = (A + A.T)/2
# B must be positive definite
B = rand(n, n).astype(dtype)
B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (sygvd)
eig_gvd, _, info = sygvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = sygst(A, b)
assert_(info == 0)
eig, _, info = syevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_hegst():
seed(1234)
for ind, dtype in enumerate(COMPLEX_DTYPES):
# DTYPES = <c,z> hegst
n = 10
potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
'heevd', 'hegvd'),
dtype=dtype)
A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
A = (A + A.conj().T)/2
# B must be positive definite
B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (hegvd)
eig_gvd, _, info = hegvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = hegst(A, b)
assert_(info == 0)
eig, _, info = heevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_tzrzf():
"""
This test performs an RZ decomposition in which an m x n upper trapezoidal
array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
and Z is unitary.
"""
seed(1234)
m, n = 10, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork = _compute_lwork(tzrzf_lw, m, n)
if ind < 2:
A = triu(rand(m, n).astype(dtype))
else:
A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype))
# assert wrong shape arg, f2py returns generic error
assert_raises(Exception, tzrzf, A.T)
rz, tau, info = tzrzf(A, lwork=lwork)
# Check success
assert_(info == 0)
# Get Z manually for comparison
R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
Id = np.eye(n, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
Z = reduce(np.dot, ref)
assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
atol=10*np.spacing(dtype(1.0).real), rtol=0.)
def test_tfsm():
"""
Test for solving a linear system with the coefficient matrix is a
triangular array stored in Full Packed (RFP) format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype)
trans = 'C'
else:
A = triu(rand(n, n) + eye(n)).astype(dtype)
trans = 'T'
trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
dtype=dtype)
Afp, _ = trttf(A)
B = rand(n, 2).astype(dtype)
soln = tfsm(-1, Afp, B)
assert_array_almost_equal(soln, solve(-A, B),
decimal=4 if ind % 2 == 0 else 6)
soln = tfsm(-1, Afp, B, trans=trans)
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Make A, unit diagonal
A[np.arange(n), np.arange(n)] = dtype(1.)
soln = tfsm(-1, Afp, B, trans=trans, diag='U')
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Change side
B2 = rand(3, n).astype(dtype)
soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
decimal=4 if ind % 2 == 0 else 6)
def test_ormrz_unmrz():
"""
This test performs a matrix multiplication with an arbitrary m x n matric C
and a unitary matrix Q without explicitly forming the array. The array data
is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
size is inferred by m, n, side keywords.
"""
seed(1234)
qm, qn, cn = 10, 15, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
if ind < 2:
A = triu(rand(qm, qn).astype(dtype))
C = rand(cn, cn).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
dtype=dtype)
else:
A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype))
C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
dtype=dtype)
lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
rz, tau, info = tzrzf(A, lwork=lwork_rz)
# Get Q manually for comparison
V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
Id = np.eye(qn, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
Q = reduce(np.dot, ref)
# Now that we have Q, we can test whether lapack results agree with
# each case of CQ, CQ^H, QC, and QC^H
trans = 'T' if ind < 2 else 'C'
tol = 10*np.spacing(dtype(1.0).real)
cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
rtol=0.)
def test_tfttr_trttf():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transr = 'C'
else:
A_full = (rand(n, n)).astype(dtype)
transr = 'T'
trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
A_tf_U, info = trttf(A_full)
assert_(info == 0)
A_tf_L, info = trttf(A_full, uplo='L')
assert_(info == 0)
A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
assert_(info == 0)
A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
assert_(info == 0)
# Create the RFP array manually (n is even!)
A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T
assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_U_T,
A_tf_U_m.conj().T.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L_T,
A_tf_L_m.conj().T.reshape(-1, order='F'))
# Get the original array from RFP
A_tr_U, info = tfttr(n, A_tf_U)
assert_(info == 0)
A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
assert_(info == 0)
A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
assert_(info == 0)
A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_U_T, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
assert_array_almost_equal(A_tr_L_T, tril(A_full))
def test_tpttr_trttp():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A_full = (rand(n, n)).astype(dtype)
trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype)
A_tp_U, info = trttp(A_full)
assert_(info == 0)
A_tp_L, info = trttp(A_full, uplo='L')
assert_(info == 0)
# Create the TP array manually
inds = tril_indices(n)
A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_U_m[:] = (triu(A_full).T)[inds]
inds = triu_indices(n)
A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_L_m[:] = (tril(A_full).T)[inds]
assert_array_almost_equal(A_tp_U, A_tp_U_m)
assert_array_almost_equal(A_tp_L, A_tp_L_m)
# Get the original array from TP
A_tr_U, info = tpttr(n, A_tp_U)
assert_(info == 0)
A_tr_L, info = tpttr(n, A_tp_L, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
def test_pftrf():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
Achol_rfp, info = pftrf(n, Afp)
assert_(info == 0)
A_chol_r, _ = tfttr(n, Achol_rfp)
Achol = cholesky(A)
assert_array_almost_equal(A_chol_r, Achol)
def test_pftri():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array to find its inverse
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
A_inv_rfp, info = pftri(n, A_chol_rfp)
assert_(info == 0)
A_inv_r, _ = tfttr(n, A_inv_rfp)
Ainv = inv(A)
assert_array_almost_equal(A_inv_r, triu(Ainv),
decimal=4 if ind % 2 == 0 else 6)
def test_pftrs():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array and solve a linear system
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
B = ones((n, 3), dtype=dtype)
Bf1 = ones((n+2, 3), dtype=dtype)
Bf2 = ones((n-2, 3), dtype=dtype)
pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
# larger B arrays shouldn't segfault
soln, info = pftrs(n, A_chol_rfp, Bf1)
assert_(info == 0)
assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2)
soln, info = pftrs(n, A_chol_rfp, B)
assert_(info == 0)
assert_array_almost_equal(solve(A, B), soln,
decimal=4 if ind % 2 == 0 else 6)
def test_sfrk_hfrk():
"""
Test for performing a symmetric rank-k operation for matrix in RFP format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
prefix = 's'if ind < 2 else 'h'
trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk'
''.format(prefix)),
dtype=dtype)
Afp, _ = trttf(A)
C = np.random.rand(n, 2).astype(dtype)
Afp_out = shfrk(n, 2, -1, C, 2, Afp)
A_out, _ = tfttr(n, Afp_out)
assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A),
decimal=4 if ind % 2 == 0 else 6)
def test_syconv():
"""
Test for going back and forth between the returned format of he/sytrf to
L and D factors/permutations.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
if ind > 1:
A = (randint(-30, 30, (n, n)) +
randint(-30, 30, (n, n))*1j).astype(dtype)
A = A + A.conj().T
else:
A = randint(-30, 30, (n, n)).astype(dtype)
A = A + A.T + n*eye(n)
tol = 100*np.spacing(dtype(1.0).real)
syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf',
'sytrf_lwork'), dtype=dtype)
lw = _compute_lwork(trf_lwork, n, lower=1)
L, D, perm = ldl(A, lower=1, hermitian=False)
lw = _compute_lwork(trf_lwork, n, lower=1)
ldu, ipiv, info = trf(A, lower=1, lwork=lw)
a, e, info = syconv(ldu, ipiv, lower=1)
assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.)
# Test also upper
U, D, perm = ldl(A, lower=0, hermitian=False)
ldu, ipiv, info = trf(A, lower=0)
a, e, info = syconv(ldu, ipiv, lower=0)
assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.)
class TestBlockedQR(object):
"""
Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt
and tpmqr.
"""
def test_geqrt_gemqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype)
a, t, info = geqrt(n, A)
assert(info == 0)
# Extract elementary reflectors from lower triangle, adding the
# main diagonal of ones.
v = np.tril(a, -1) + np.eye(n, dtype=dtype)
# Generate the block Householder transform I - VTV^H
Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj()
R = np.triu(a)
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol,
rtol=0.)
assert_allclose(Q @ R, A, atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, info = gemqrt(a, t, C, side=side, trans=trans)
assert(info == 0)
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
qC = q @ C
else:
qC = C @ q
assert_allclose(c, qC, atol=tol, rtol=0.)
# Test default arguments
if (side, trans) == ('L', 'N'):
c_default, info = gemqrt(a, t, C)
assert(info == 0)
assert_equal(c_default, c)
# Test invalid side/trans
assert_raises(Exception, gemqrt, a, t, C, side='A')
assert_raises(Exception, gemqrt, a, t, C, trans='A')
def test_tpqrt_tpmqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
B = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
B = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype)
# Test for the range of pentagonal B, from square to upper
# triangular
for l in (0, n // 2, n):
a, b, t, info = tpqrt(l, n, A, B)
assert(info == 0)
# Check that lower triangular part of A has not been modified
assert_equal(np.tril(a, -1), np.tril(A, -1))
# Check that elements not part of the pentagonal portion of B
# have not been modified.
assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1))
# Extract pentagonal portion of B
B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n)
# Generate elementary reflectors
v = np.concatenate((np.eye(n, dtype=dtype), b_pent))
# Generate the block Householder transform I - VTV^H
Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj()
R = np.concatenate((np.triu(a), np.zeros_like(a)))
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype),
atol=tol, rtol=0.)
assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)),
atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
D = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
D = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, d, info = tpmqrt(l, b, t, C, D, side=side,
trans=trans)
assert(info == 0)
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
cd = np.concatenate((c, d), axis=0)
CD = np.concatenate((C, D), axis=0)
qCD = q @ CD
else:
cd = np.concatenate((c, d), axis=1)
CD = np.concatenate((C, D), axis=1)
qCD = CD @ q
assert_allclose(cd, qCD, atol=tol, rtol=0.)
if (side, trans) == ('L', 'N'):
c_default, d_default, info = tpmqrt(l, b, t, C, D)
assert(info == 0)
assert_equal(c_default, c)
assert_equal(d_default, d)
# Test invalid side/trans
assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A')
assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A')
def test_pstrf():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstrf
n = 10
r = 2
pstrf = get_lapack_funcs('pstrf', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstrf(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the following assertion.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstrf(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_pstf2():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstf2
n = 10
r = 2
pstf2 = get_lapack_funcs('pstf2', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstf2(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the commented assertions.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstf2(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_geequ():
desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269],
[1.0000, -0.5619, -1.0000, -1.0000],
[0.5874, -1.0000, -0.0596, -0.5341],
[-1.0000, -0.5946, -0.0294, 0.9957]])
desired_cplx = np.array([[-0.2816+0.5359*1j,
0.0812+0.9188*1j,
-0.7439-0.2561*1j],
[-0.3562-0.2954*1j,
0.9566-0.0434*1j,
-0.0174+0.1555*1j],
[0.8607+0.1393*1j,
-0.2759+0.7241*1j,
-0.1642-0.1365*1j]])
for ind, dtype in enumerate(DTYPES):
if ind < 2:
# Use examples from the NAG documentation
A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09],
[5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00],
[1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00],
[-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]])
A = A.astype(dtype)
else:
A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00],
[-1.70e+00, 3.31e+10, -0.15e+00],
[2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype)
A += np.array([[2.55e+00, 3.17e+10, -2.20e+00],
[-1.41e+00, -0.15e+10, 1.34e+00],
[0.39e-10, 1.47e+00, -0.69e-10]])*1j
A = A.astype(dtype)
geequ = get_lapack_funcs('geequ', dtype=dtype)
r, c, rowcnd, colcnd, amax, info = geequ(A)
if ind < 2:
assert_allclose(desired_real.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
else:
assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
def test_syequb():
desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3])
for ind, dtype in enumerate(DTYPES):
A = np.eye(10, dtype=dtype)
alpha = dtype(1. if ind < 2 else 1.j)
d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype)
A += np.rot90(np.diag(d))
syequb = get_lapack_funcs('syequb', dtype=dtype)
s, scond, amax, info = syequb(A)
assert_equal(np.log2(s).astype(int), desired_log2s)
@pytest.mark.skipif(True,
reason="Failing on some OpenBLAS version, see gh-12276")
def test_heequb():
# zheequb has a bug for versions =< LAPACK 3.9.0
# See Reference-LAPACK gh-61 and gh-408
# Hence the zheequb test is customized accordingly to avoid
# work scaling.
A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j
s, scond, amax, info = lapack.zheequb(A)
assert_equal(info, 0)
assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5)
A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j)
A[5, 5] = 1024
A[5, 0] = 16j
s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1)
assert_equal(info, 0)
assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2])
def test_getc2_gesc2():
np.random.seed(42)
n = 10
desired_real = np.random.rand(n)
desired_cplx = np.random.rand(n) + np.random.rand(n)*1j
for ind, dtype in enumerate(DTYPES):
if ind < 2:
A = np.random.rand(n, n)
A = A.astype(dtype)
b = A @ desired_real
b = b.astype(dtype)
else:
A = np.random.rand(n, n) + np.random.rand(n, n)*1j
A = A.astype(dtype)
b = A @ desired_cplx
b = b.astype(dtype)
getc2 = get_lapack_funcs('getc2', dtype=dtype)
gesc2 = get_lapack_funcs('gesc2', dtype=dtype)
lu, ipiv, jpiv, info = getc2(A, overwrite_a=0)
x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0)
if ind < 2:
assert_array_almost_equal(desired_real.astype(dtype),
x/scale, decimal=4)
else:
assert_array_almost_equal(desired_cplx.astype(dtype),
x/scale, decimal=4)
@pytest.mark.parametrize('size', [(6, 5), (5, 5)])
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R'
@pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N'
@pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N'
@pytest.mark.parametrize('jobr', [0, 1])
@pytest.mark.parametrize('jobp', [0, 1])
def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0):
"""Test the lapack routine ?gejsv.
This function tests that a singular value decomposition can be performed
on the random M-by-N matrix A. The test performs the SVD using ?gejsv
then performs the following checks:
* ?gejsv exist successfully (info == 0)
* The returned singular values are correct
* `A` can be reconstructed from `u`, `SIGMA`, `v`
* Ensure that u.T @ u is the identity matrix
* Ensure that v.T @ v is the identity matrix
* The reported matrix rank
* The reported number of singular values
* If denormalized floats are required
Notes
-----
joba specifies several choices effecting the calculation's accuracy
Although all arguments are tested, the tests only check that the correct
solution is returned - NOT that the prescribed actions are performed
internally.
jobt is, as of v3.9.0, still experimental and removed to cut down number of
test cases. However keyword itself is tested externally.
"""
seed(42)
# Define some constants for later use:
m, n = size
atol = 100 * np.finfo(dtype).eps
A = generate_random_dtype_array(size, dtype)
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# Set up checks for invalid job? combinations
# if an invalid combination occurs we set the appropriate
# exit status.
lsvec = jobu < 2 # Calculate left singular vectors
rsvec = jobv < 2 # Calculate right singular vectors
l2tran = (jobt == 1) and (m == n)
is_complex = np.iscomplexobj(A)
invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex)
invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex
invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex
# Set the exit status to the expected value.
# Here we only check for invalid combinations, not individual
# parameters.
if invalid_cplx_jobu:
exit_status = -2
elif invalid_real_jobv or invalid_cplx_jobv:
exit_status = -3
else:
exit_status = 0
if (jobu > 1) and (jobv == 1):
assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp)
else:
sva, u, v, work, iwork, info = gejsv(A,
joba=joba,
jobu=jobu,
jobv=jobv,
jobr=jobr,
jobt=jobt,
jobp=jobp)
# Check that ?gejsv exited successfully/as expected
assert_equal(info, exit_status)
# If exit_status is non-zero the combination of jobs is invalid.
# We test this above but no calculations are performed.
if not exit_status:
# Check the returned singular values
sigma = (work[0] / work[1]) * sva[:n]
assert_allclose(sigma, svd(A, compute_uv=False), atol=atol)
if jobu == 1:
# If JOBU = 'F', then u contains the M-by-M matrix of
# the left singular vectors, including an ONB of the orthogonal
# complement of the Range(A)
# However, to recalculate A we are concerned about the
# first n singular values and so can ignore the latter.
# TODO: Add a test for ONB?
u = u[:, :n]
if lsvec and rsvec:
assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol)
if lsvec:
assert_allclose(u.conj().T @ u, np.identity(n), atol=atol)
if rsvec:
assert_allclose(v.conj().T @ v, np.identity(n), atol=atol)
assert_equal(iwork[0], np.linalg.matrix_rank(A))
assert_equal(iwork[1], np.count_nonzero(sigma))
# iwork[2] is non-zero if requested accuracy is not warranted for
# the data. This should never occur for these tests.
assert_equal(iwork[2], 0)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_gejsv_edge_arguments(dtype):
"""Test edge arguments return expected status"""
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# scalar A
sva, u, v, work, iwork, info = gejsv(1.)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 1d A
A = np.ones((1,), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 2d empty A
A = np.ones((1, 0), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 0))
assert_equal(v.shape, (1, 0))
assert_equal(sva, np.array([], dtype=dtype))
# make sure "overwrite_a" is respected - user reported in gh-13191
A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype)
A = np.asfortranarray(A + A.T) # make it symmetric and column major
Ac = A.copy('A')
_ = gejsv(A)
assert_allclose(A, Ac)
@pytest.mark.parametrize(('kwargs'),
({'joba': 9},
{'jobu': 9},
{'jobv': 9},
{'jobr': 9},
{'jobt': 9},
{'jobp': 9})
)
def test_gejsv_invalid_job_arguments(kwargs):
"""Test invalid job arguments raise an Exception"""
A = np.ones((2, 2), dtype=float)
gejsv = get_lapack_funcs('gejsv', dtype=float)
assert_raises(Exception, gejsv, A, **kwargs)
@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect",
[(np.array([[2.27, -1.54, 1.15, -1.94],
[0.28, -1.67, 0.94, -0.78],
[-0.48, -3.09, 0.99, -0.21],
[1.07, 1.22, 0.79, 0.63],
[-2.35, 2.93, -1.45, 2.30],
[0.62, -7.39, 1.03, -2.57]]),
np.array([9.9966, 3.6831, 1.3569, 0.5000]),
np.array([[0.2774, -0.6003, -0.1277, 0.1323],
[0.2020, -0.0301, 0.2805, 0.7034],
[0.2918, 0.3348, 0.6453, 0.1906],
[-0.0938, -0.3699, 0.6781, -0.5399],
[-0.4213, 0.5266, 0.0413, -0.0575],
[0.7816, 0.3353, -0.1645, -0.3957]]),
np.array([[0.1921, -0.8030, 0.0041, -0.5642],
[-0.8794, -0.3926, -0.0752, 0.2587],
[0.2140, -0.2980, 0.7827, 0.5027],
[-0.3795, 0.3351, 0.6178, -0.6017]]))])
def test_gejsv_NAG(A, sva_expect, u_expect, v_expect):
"""
This test implements the example found in the NAG manual, f08khf.
An example was not found for the complex case.
"""
# NAG manual provides accuracy up to 4 decimals
atol = 1e-4
gejsv = get_lapack_funcs('gejsv', dtype=A.dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_allclose(sva_expect, sva, atol=atol)
assert_allclose(u_expect, u, atol=atol)
assert_allclose(v_expect, v, atol=atol)
@pytest.mark.parametrize("dtype", DTYPES)
def test_gttrf_gttrs(dtype):
# The test uses ?gttrf and ?gttrs to solve a random system for each dtype,
# tests that the output of ?gttrf define LU matricies, that input
# parameters are unmodified, transposal options function correctly, that
# incompatible matrix shapes raise an error, and singular matrices return
# non zero info.
seed(42)
n = 10
atol = 100 * np.finfo(dtype).eps
# create the matrix in accordance with the data type
du = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
dl = generate_random_dtype_array((n-1,), dtype=dtype)
diag_cpy = [dl.copy(), d.copy(), du.copy()]
A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1)
x = np.random.rand(n)
b = A @ x
gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype)
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
# test to assure that the inputs of ?gttrf are unmodified
assert_array_equal(dl, diag_cpy[0])
assert_array_equal(d, diag_cpy[1])
assert_array_equal(du, diag_cpy[2])
# generate L and U factors from ?gttrf return values
# L/U are lower/upper triangular by construction (initially and at end)
U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2)
L = np.eye(n, dtype=dtype)
for i, m in enumerate(_dl):
# L is given in a factored form.
# See
# www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html
piv = ipiv[i] - 1
# right multiply by permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# right multiply by Li, rank-one modification of identity
L[:, i] += L[:, i+1]*m
# one last permutation
i, piv = -1, ipiv[-1] - 1
# right multiply by final permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# check that the outputs of ?gttrf define an LU decomposition of A
assert_allclose(A, L @ U, atol=atol)
b_cpy = b.copy()
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
# test that the inputs of ?gttrs are unmodified
assert_array_equal(b, b_cpy)
# test that the result of ?gttrs matches the expected input
assert_allclose(x, x_gttrs, atol=atol)
# test that ?gttrf and ?gttrs work with transposal options
if dtype in REAL_DTYPES:
trans = "T"
b_trans = A.T @ x
else:
trans = "C"
b_trans = A.conj().T @ x
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans)
assert_allclose(x, x_gttrs, atol=atol)
# test that ValueError is raised with incompatible matrix shapes
with assert_raises(ValueError):
gttrf(dl[:-1], d, du)
with assert_raises(ValueError):
gttrf(dl, d[:-1], du)
with assert_raises(ValueError):
gttrf(dl, d, du[:-1])
# test that matrix of size n=2 raises exception
with assert_raises(Exception):
gttrf(dl[0], d[:1], du[0])
# test that singular (row of all zeroes) matrix fails via info
du[0] = 0
d[0] = 0
__dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du)
np.testing.assert_(__d[info - 1] == 0,
"?gttrf: _d[info-1] is {}, not the illegal value :0."
.format(__d[info - 1]))
@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([2.3, -5, -.9, 7.1]),
np.array([3.4, 3.6, 7, -6, -1.015373]),
np.array([-1, 1.9, 8]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.7, 6.6],
[-0.5, 10.8],
[2.6, -3.2],
[0.6, -11.2],
[2.7, 19.1]
]),
np.array([[-4, 5],
[7, -4],
[3, -3],
[-4, -2],
[-3, 1]])),
(
np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j,
-1.3 + 3.3j, - .3 + 4.3j,
-3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
# du exp
np.array([-1.3 + 1.3j, -1.3 + 3.3j,
-0.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j,
-1.3399 + 0.2875j]),
np.array([2 + 1j, -1 + 1j, 1 - 1j]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, - 6.9 - 5.3j],
[-14.7 + 9.7j, - 6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j],
[3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j],
[-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]])
)])
def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp,
du2_exp, ipiv_exp, b, x):
# test to assure that wrapper is consistent with NAG Library Manual Mark 26
# example problems: f07cdf and f07cef (real)
# examples: f07crf and f07csf (complex)
# (Links may expire, so search for "NAG Library Manual Mark 26" online)
gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0]))
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
assert_allclose(du2, du2_exp)
assert_allclose(_du, du_exp)
assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals.
assert_allclose(ipiv, ipiv_exp)
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
assert_allclose(x_gttrs, x)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)])
def test_geqrfp_lwork(dtype, shape):
geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrfp_lwork(m=m, n=n)
assert_equal(info, 0)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs(ddtype, dtype):
seed(42)
# set test tolerance appropriate for dtype
atol = 100*np.finfo(dtype).eps
# n is the length diagonal of A
n = 10
# create diagonals according to size and dtype
# diagonal d should always be real.
# add 4 to d so it will be dominant for all dtypes
d = generate_random_dtype_array((n,), ddtype) + 4
# diagonal e may be real or complex.
e = generate_random_dtype_array((n-1,), dtype)
# assemble diagonals together into matrix
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
# store a copy of diagonals to later verify
diag_cpy = [d.copy(), e.copy()]
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
_d, _e, info = pttrf(d, e)
# test to assure that the inputs of ?pttrf are unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_equal(info, 0, err_msg="pttrf: info = {}, should be 0".format(info))
# test that the factors from pttrf can be recombined to make A
L = np.diag(_e, -1) + np.diag(np.ones(n))
D = np.diag(_d)
assert_allclose(A, L@[email protected]().T, atol=atol)
# generate random solution x
x = generate_random_dtype_array((n,), dtype)
# determine accompanying b to get soln x
b = A@x
# determine _x from pttrs
pttrs = get_lapack_funcs('pttrs', dtype=dtype)
_x, info = pttrs(_d, _e.conj(), b)
assert_equal(info, 0, err_msg="pttrs: info = {}, should be 0".format(info))
# test that _x from pttrs matches the expected x
assert_allclose(x, _x, atol=atol)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that ValueError is raised with incompatible matrix shapes
assert_raises(ValueError, pttrf, d[:-1], e)
assert_raises(ValueError, pttrf, d, e[:-1])
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that singular (row of all zeroes) matrix fails via info
d[0] = 0
e[0] = 0
_d, _e, info = pttrf(d, e)
assert_equal(_d[info - 1], 0,
"?pttrf: _d[info-1] is {}, not the illegal value :0."
.format(_d[info - 1]))
# test with non-spd matrix
d = generate_random_dtype_array((n,), ddtype)
_d, _e, info = pttrf(d, e)
assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't")
@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [
(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([4, 9, 25, 16, 1]),
np.array([-.5, -.6667, .6, .5]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6],
[3, -5]])
), (
np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([16, 9, 1, 4]),
np.array([1+1j, 2-1j, 1-4j]),
np.array([[64+16j, -16-32j], [93+62j, 61-66j],
[78-80j, 71-74j], [14-27j, 35+15j]]),
np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j],
[1-1j, 2+1j]])
)])
def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problems: f07jdf and f07jef (real)
# examples: f07jrf and f07csf (complex)
# NAG examples provide 4 decimals.
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
atol = 1e-4
pttrf = get_lapack_funcs('pttrf', dtype=e[0])
_d, _e, info = pttrf(d, e)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(_e, e_expect, atol=atol)
pttrs = get_lapack_funcs('pttrs', dtype=e[0])
_x, info = pttrs(_d, _e.conj(), b)
assert_allclose(_x, x_expect, atol=atol)
# also test option `lower`
if e.dtype in COMPLEX_DTYPES:
_x, info = pttrs(_d, _e, b, lower=1)
assert_allclose(_x, x_expect, atol=atol)
def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z):
# used by ?pteqr tests to build parameters
# returns tuple of (d, e, A, z)
if compute_z == 1:
# build Hermitian A from Q**T * tri * Q = A by creating Q and tri
A_eig = generate_random_dtype_array((n, n), dtype)
A_eig = A_eig + np.diag(np.zeros(n) + 4*n)
A_eig = (A_eig + A_eig.conj().T) / 2
# obtain right eigenvectors (orthogonal)
vr = eigh(A_eig)[1]
# create tridiagonal matrix
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), realtype)
tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
# Build A using these factors that sytrd would: (Q**T * tri * Q = A)
A = vr @ tri @ vr.conj().T
# vr is orthogonal
z = vr
else:
# d and e are always real per lapack docs.
d = generate_random_dtype_array((n,), realtype)
e = generate_random_dtype_array((n-1,), realtype)
# make SPD
d = d + 4
A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1)
return (d, e, A, z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr(dtype, realtype, compute_z):
'''
Tests the ?pteqr lapack routine for all dtypes and compute_z parameters.
It generates random SPD matrix diagonals d and e, and then confirms
correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it
tests that z can reform A.
'''
seed(42)
atol = 1000*np.finfo(dtype).eps
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_equal(info, 0, "info = {}, should be 0.".format(info))
# compare the routine's eigenvalues with scipy.linalg.eig's.
assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol)
if compute_z:
# verify z_pteqr as orthogonal
assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n),
atol=atol)
# verify that z_pteqr recombines to A
assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T,
A, atol=atol)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_non_spd(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with non-spd matrix
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with incorrect/incompatible array sizes
assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z)
assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z)
if compute_z:
assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_singular(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with singular matrix
d[0] = 0
e[0] = 0
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect",
[(2, # "I"
np.array([4.16, 5.25, 1.09, .62]),
np.array([3.17, -.97, .55]),
np.array([8.0023, 1.9926, 1.0014, 0.1237]),
np.array([[0.6326, 0.6245, -0.4191, 0.1847],
[0.7668, -0.4270, 0.4176, -0.2352],
[-0.1082, 0.6071, 0.4594, -0.6393],
[-0.0081, 0.2432, 0.6625, 0.7084]])),
])
def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect):
'''
Implements real (f08jgf) example from NAG Manual Mark 26.
Tests for correct outputs.
'''
# the NAG manual has 4 decimals accuracy
atol = 1e-4
pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype)
z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
_d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)])
def test_geqrfp(dtype, matrix_size):
# Tests for all dytpes, tall, wide, and square matrices.
# Using the routine with random matrix A, Q and R are obtained and then
# tested such that R is upper triangular and non-negative on the diagonal,
# and Q is an orthagonal matrix. Verifies that A=Q@R. It also
# tests against a matrix that for which the linalg.qr method returns
# negative diagonals, and for error messaging.
# set test tolerance appropriate for dtype
np.random.seed(42)
rtol = 250*np.finfo(dtype).eps
atol = 100*np.finfo(dtype).eps
# get appropriate ?geqrfp for dtype
geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype)
gqr = get_lapack_funcs(("orgqr"), dtype=dtype)
m, n = matrix_size
# create random matrix of dimentions m x n
A = generate_random_dtype_array((m, n), dtype=dtype)
# create qr matrix using geqrfp
qr_A, tau, info = geqrfp(A)
# obtain r from the upper triangular area
r = np.triu(qr_A)
# obtain q from the orgqr lapack routine
# based on linalg.qr's extraction strategy of q with orgqr
if m > n:
# this adds an extra column to the end of qr_A
# let qqr be an empty m x m matrix
qqr = np.zeros((m, m), dtype=dtype)
# set first n columns of qqr to qr_A
qqr[:, :n] = qr_A
# determine q from this qqr
# note that m is a sufficient for lwork based on LAPACK documentation
q = gqr(qqr, tau=tau, lwork=m)[0]
else:
q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0]
# test that q and r still make A
assert_allclose(q@r, A, rtol=rtol)
# ensure that q is orthogonal (that q @ transposed q is the identity)
assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol,
atol=atol)
# ensure r is upper tri by comparing original r to r as upper triangular
assert_allclose(r, np.triu(r), rtol=rtol)
# make sure diagonals of r are positive for this random solution
assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r)))))
# ensure that info is zero for this success
assert_(info == 0)
# test that this routine gives r diagonals that are positive for a
# matrix that returns negatives in the diagonal with scipy.linalg.rq
A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1
r_rq_neg, q_rq_neg = qr(A_negative)
rq_A_neg, tau_neg, info_neg = geqrfp(A_negative)
# assert that any of the entries on the diagonal from linalg.qr
# are negative and that all of geqrfp are positive.
assert_(np.any(np.diag(r_rq_neg) < 0) and
np.all(np.diag(r) > 0))
def test_geqrfp_errors_with_empty_array():
# check that empty array raises good error message
A_empty = np.array([])
geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype)
assert_raises(Exception, geqrfp, A_empty)
@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_standard_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
try:
_compute_lwork(sc_dlw, n, lower=1)
_compute_lwork(dz_dlw, n, lower=1)
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("driver", ['gv', 'gvx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_generalized_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
# Shouldn't raise any exceptions
try:
_compute_lwork(sc_dlw, n, uplo="L")
_compute_lwork(dz_dlw, n, uplo="L")
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("dtype_", DTYPES)
@pytest.mark.parametrize("m", [1, 10, 100, 1000])
def test_orcsd_uncsd_lwork(dtype_, m):
seed(1234)
p = randint(0, m)
q = m - p
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
dlw = pfx + 'csd_lwork'
lw = get_lapack_funcs(dlw, dtype=dtype_)
lwval = _compute_lwork(lw, m, p, q)
lwval = lwval if pfx == 'un' else (lwval,)
assert all([x > 0 for x in lwval])
@pytest.mark.parametrize("dtype_", DTYPES)
def test_orcsd_uncsd(dtype_):
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_)
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'], lwval))
cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
assert info == 0
U = block_diag(u1, u2)
VH = block_diag(v1t, v2t)
r = min(min(p, q), min(m-p, m-q))
n11 = min(p, q) - r
n12 = min(p, m-q) - r
n21 = min(m-p, q) - r
n22 = min(m-p, m-q) - r
S = np.zeros((m, m), dtype=dtype_)
one = dtype_(1.)
for i in range(n11):
S[i, i] = one
for i in range(n22):
S[p+i, q+i] = one
for i in range(n12):
S[i+n11+r, i+n11+r+n21+n22+r] = -one
for i in range(n21):
S[p+n22+r+i, n11+r+i] = one
for i in range(r):
S[i+n11, i+n11] = np.cos(theta[i])
S[p+n22+i, i+r+n21+n22] = np.cos(theta[i])
S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i])
S[p+n22+i, i+n11] = np.sin(theta[i])
Xc = U @ S @ VH
assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx(dtype, trans_bool, fact):
"""
These tests uses ?gtsvx to solve a random Ax=b system for each dtype.
It tests that the outputs define an LU matrix, that inputs are unmodified,
transposal options, incompatible shapes, singular matrices, and
singular factorizations. It parametrizes DTYPES and the 'fact' value along
with the fact related inputs.
"""
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N"
b = (A.conj().T if trans_bool else A) @ x
# store a copy of the inputs to check they haven't been modified later
inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()]
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_(info == 0, "?gtsvx info = {}, should be zero".format(info))
# assure that inputs are unmodified
assert_array_equal(dl, inputs_cpy[0])
assert_array_equal(d, inputs_cpy[1])
assert_array_equal(du, inputs_cpy[2])
assert_array_equal(b, inputs_cpy[3])
# test that x_soln matches the expected x
assert_allclose(x, x_soln, atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert_(hasattr(rcond, "__len__") is not True,
"rcond should be scalar but is {}".format(rcond))
# ferr should be length of # of cols in x
assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but shoud be {},"
.format(ferr.shape[0], b.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but shoud be {},"
.format(berr.shape[0], b.shape[1]))
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [0, 1])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_singular(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test with singular matrix
# no need to test inputs with fact "F" since ?gttrf already does.
if fact == "N":
# Construct a singular example manually
d[-1] = 0
dl[-1] = 0
# solve using routine
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test for the singular matrix.
assert info > 0, "info should be > 0 for singular matrix"
elif fact == 'F':
# assuming that a singular factorization is input
df_[-1] = 0
duf_[-1] = 0
du2f_[-1] = 0
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_,
du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# info should not be zero and should provide index of illegal value
assert info > 0, "info should be > 0 for singular matrix"
@pytest.mark.parametrize("dtype", DTYPES*2)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
if fact == "N":
assert_raises(ValueError, gtsvx, dl[:-1], d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d[:-1], du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du[:-1], b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(Exception, gtsvx, dl, d, du, b[:-1],
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
else:
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_[:-1], df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_[:-1],
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_[:-1], du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_[:-1], ipiv=ipiv_)
@pytest.mark.parametrize("du,d,dl,b,x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -0.9, 7.1]),
|
np.array([3.4, 3.6, 7.0, -6.0])
|
numpy.array
|
from uma_mot.tracker.Siamese_utils.infer_utils import convert_bbox_format, Rectangle
import numpy as np
class TrackState:
Tracked = 1
Lost = 2
class Track:
def __init__(self, current_target_state, track_bbox, track_id, max_age, payload=None):
self.current_target_state = current_target_state
self.track_bbox = track_bbox
self.track_id = track_id
self.time_since_update = 0
self.state = TrackState.Tracked
self._max_age = max_age
self.overlap_history = [1]
self.average_overlap = 1
self.payload = payload
def predict(self, sess, siamese, input_image):
self.current_target_state, self.track_bbox = siamese.track(sess, self.current_target_state, input_image)
self.time_since_update += 1
def update(self, detection, det_embeding, mode, matched_iou=1.0, frame_rate=30, payload = None):
self.time_since_update = 0
self.payload = payload
if mode == 'tracked':
self.overlap_history.append(1 if matched_iou > 0.5 else 0)
history_length = len(self.overlap_history)
if history_length > 2 * frame_rate:
self.overlap_history.pop(0)
self.average_overlap = sum(self.overlap_history) / min(2 * frame_rate, history_length)
refine_detection = [0.5 * self.track_bbox[0] + 0.5 * detection[0],
0.5 * self.track_bbox[1] + 0.5 * detection[1],
0.5 * self.track_bbox[2] + 0.5 * detection[2],
0.5 * self.track_bbox[3] + 0.5 * detection[3]] # ltrb
self.current_target_state.bbox = convert_bbox_format(Rectangle(refine_detection[0],
refine_detection[1],
refine_detection[2],
refine_detection[3]), 'center-based')
self.track_bbox = np.array(refine_detection) # track result
self.current_target_state.his_feature.append(det_embeding)
if len(self.current_target_state.his_feature) > frame_rate:
self.current_target_state.his_feature.pop(0)
return
if mode == 'recover':
self.state = TrackState.Tracked # re-tracked
init_bb = Rectangle(int(detection[0]) - 1, int(detection[1]) - 1, int(detection[2]), int(detection[3])) # xl, yt, w, h
bbox = convert_bbox_format(init_bb, 'center-based')
self.current_target_state.bbox = bbox
self.current_target_state.reid_templates = det_embeding[0]
self.current_target_state.init_templates = det_embeding[1]
self.current_target_state.scale_idx = int(1)
self.current_target_state.similarity = 1.0
self.current_target_state.original_target_wh = [bbox.width, bbox.height]
self.current_target_state.bbox_in = detection
self.track_bbox =
|
np.array([init_bb.x, init_bb.y, init_bb.width, init_bb.height])
|
numpy.array
|
# -*- coding: utf-8 -*-
## @package inversetoon.batch.generate_isophote_scene
#
# Isophote scene generator.
# @author tody
# @date 2015/07/31
import numpy as np
from inversetoon.batch.batch import normalDataSetBatch
from inversetoon.core.silhouette import silhoutteCurve
from inversetoon.io.image import loadNormal
from inversetoon.core.isophote import isophoteCurves
from inversetoon.cv.light import computeIllumination
from inversetoon.data.isophote_mesh import IsophoteMesh
from inversetoon.data.scene import Scene
from inversetoon.io.isophote import saveSceneData
from inversetoon import datasets
def computeIsophoteCurves(N_32F, L, S_8U):
I_32F = computeIllumination(N_32F, L)
isophotes = isophoteCurves(I_32F, M_8U=S_8U)
for isophote in isophotes:
isophote.setNormalImage(N_32F)
isophote.setLightDir(L)
return I_32F, isophotes
def normalToIsophoteFile(normal_file, scene_file, L1=np.array([-0.5, 0.5, 0.2]), L2=
|
np.array([0.5, 0.5, 0.2])
|
numpy.array
|
# <NAME>
# python 3
# This code is to be run after the main dataframe " df_bin_wins and df_tce_wins are already created ...
# ... Using the file "calc_plot_regional_analysis.py" ...
# ... and the files saved e.g. /global/cscratch1/sd/bharat/add_cmip6_data/CESM2/ssp585/r1i1p1f1/nbp/Stats/"%s_%s_%s_Bin_Stats_Wins_mask.csv"%(source_run,member_run,variable)
# To calculate the spatial metrics of extemes
# - Regional analysis based on SREX regions
# - TCE stats of frequency and length of
# * positive and negative extremes
# - Carbon gains and losses
# - Relative changes to overall changes in NBP flux
# - Extremes happening during CUP and CRp
import matplotlib as mpl
#mpl.use('Agg')
from matplotlib import cm
import numpy as np
import matplotlib.pyplot as plt
from functions import time_dim_dates, index_and_dates_slicing, geo_idx, mpi_local_and_global_index, create_seq_mat, cumsum_lagged,patch_with_gaps_and_eventsize, norm
import netCDF4 as nc4
import datetime as dt
import argparse
import pandas as pd
import os
import seaborn as sns
# Plotting Libraries
# ==================
import cartopy.crs as ccrs
from matplotlib.axes import Axes
#from cartopy.mpl.geoaxes import GeoAxes
#GeoAxes._pcolormesh_patched = Axes.pcolormesh
parser = argparse.ArgumentParser()
parser.add_argument('--anomalies' , '-ano' , help = "Anomalies of carbon cycle variable" , type= str , default= 'gpp' )
parser.add_argument('--variable' , '-var' , help = "Original carbon cycle variable" , type= str , default= 'gpp' )
parser.add_argument('--source' , '-src' , help = "Model (Source_Run)" , type= str , default= 'CESM2' ) # Model Name
parser.add_argument('--member_idx' , '-m_idx' , help = "Member Index" , type= int , default= 0 ) # Index of the member
#parser.add_argument('--plot_win' , '-pwin' , help = "which time period to plot? 2000-24:win 06" , type= int, default= 6 ) # 0 to 9
args = parser.parse_args()
# run plot_regional_analysis_postDF_nbp.py -var nbp -ano nbp -src CESM2 -m_idx 0
print (args)
variable = args.variable
source_run = args.source
member_idx = args.member_idx
# Paths for reading the main files
# --------------------------------
web_path = '/global/homes/b/bharat/results/web/'
cori_scratch = '/global/cscratch1/sd/bharat/'
members_list = os.listdir(cori_scratch+"add_cmip6_data/%s/ssp585/"%source_run)
member_run = members_list[member_idx]
filepaths = {}
filepaths[source_run] = {}
filepaths[source_run][member_run] = {}
filepaths[source_run][member_run]["anomalies"] = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/CESM2_ssp585_%s_%s_anomalies_gC.nc"%(
source_run,member_run, variable,member_run,variable)
filepaths[source_run][member_run]["variable"] = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/CESM2_ssp585_%s_%s_gC.nc"%(
source_run,member_run, variable,member_run,variable)
path_bin = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s_TCE/"%(
source_run,member_run, variable)
# File paths to the binary (without TCE)
filepaths[source_run][member_run]["bin"] = {} # To save the negative bin without TCE anomalies
filepaths[source_run][member_run]["bin"]["neg"] = path_bin + '%s_%s_bin_neg.nc' %(source_run,member_run) # To save the negative bin
filepaths[source_run][member_run]["bin"]["pos"] = path_bin + '%s_%s_bin_pos.nc' %(source_run,member_run) # To save the positive bin
# Filepaths to binary of TCEs
win_start_years = np.arange(1850,2100,25)
filepaths[source_run][member_run]["var_TCE_1s"] = {} # To save the negative bin 1s anomalies
filepaths[source_run][member_run]["var_TCE_1s"]["neg"] = {} # To save the negative bin 1s anos for multiple wins
filepaths[source_run][member_run]["var_TCE_1s"]["pos"] = {} # To save the positive bin 1s anos for multiple wins
for w_idx, wins in enumerate(win_start_years):
filepaths[source_run][member_run]["var_TCE_1s"]["neg"][w_idx] =path_bin + "bin_TCE_1s_neg_%d.nc"%wins
filepaths[source_run][member_run]["var_TCE_1s"]["pos"][w_idx] =path_bin + "bin_TCE_1s_pos_%d.nc"%wins
# Reading nc files
# ----------------
nc_data = {}
nc_data [source_run] = {}
nc_data [source_run] [member_run] = {}
nc_data [source_run] [member_run] ['var'] = nc4.Dataset(filepaths[source_run][member_run]["variable"]) .variables[variable]
nc_data [source_run] [member_run] ['sftlf'] = nc4.Dataset(filepaths[source_run][member_run]["variable"]) .variables["sftlf"]
nc_data [source_run] [member_run] ['areacella'] = nc4.Dataset(filepaths[source_run][member_run]["variable"]) .variables["areacella"]
nc_data [source_run] [member_run] ['lat'] = nc4.Dataset(filepaths[source_run][member_run]["variable"]) .variables["lat"]
nc_data [source_run] [member_run] ['lat_bounds']= nc4.Dataset(filepaths[source_run][member_run]["variable"]) .variables[
nc_data [source_run] [member_run] ['lat'].bounds]
nc_data [source_run] [member_run] ['lon'] = nc4.Dataset(filepaths[source_run][member_run]["variable"]) .variables["lon"]
nc_data [source_run] [member_run] ['lon_bounds']= nc4.Dataset(filepaths[source_run][member_run]["variable"]) .variables[
nc_data [source_run] [member_run] ['lon'].bounds]
nc_data [source_run] [member_run] ['time'] = nc4.Dataset(filepaths[source_run][member_run]["variable"]) .variables["time"]
nc_data [source_run] [member_run] ['ano'] = nc4.Dataset(filepaths[source_run][member_run]["anomalies"]) .variables[variable]
# bin without TCE (same shape as gpp/ano):
nc_data [source_run] [member_run] ['bin'] = {}
nc_data [source_run] [member_run] ['bin'] ['neg'] = nc4.Dataset(filepaths[source_run][member_run]["bin"]["neg"]) .variables['%s_bin'%variable]
nc_data [source_run] [member_run] ['bin'] ['pos'] = nc4.Dataset(filepaths[source_run][member_run]["bin"]["pos"]) .variables['%s_bin'%variable]
# bin with TCEs (per win):
nc_data [source_run] [member_run] ['var_TCE_1s'] = {}
nc_data [source_run] [member_run] ['var_TCE_1s'] ["neg"] = {}
nc_data [source_run] [member_run] ['var_TCE_1s'] ["pos"] = {}
for w_idx, wins in enumerate(win_start_years):
nc_data [source_run] [member_run] ['var_TCE_1s'] ["neg"][w_idx] = nc4.Dataset (filepaths[source_run][member_run]["var_TCE_1s"]["neg"][w_idx]).variables['%s_TCE_1s'%variable]
nc_data [source_run] [member_run] ['var_TCE_1s'] ["pos"][w_idx] = nc4.Dataset (filepaths[source_run][member_run]["var_TCE_1s"]["pos"][w_idx]).variables['%s_TCE_1s'%variable]
# SREX regional analysis
# -----------------------
# only for CESM2 to begin and AMZ region
source_run = "CESM2"
member_run = "r1i1p1f1"
import regionmask
lat = nc_data [source_run] [member_run] ['lat']
lon = nc_data [source_run] [member_run] ['lon']
lat_bounds = nc_data [source_run] [member_run] ['lat_bounds']
lon_bounds = nc_data [source_run] [member_run] ['lon_bounds']
lon_edges = np.hstack (( lon_bounds[:,0], lon_bounds[-1,-1]))
lat_edges = np.hstack (( lat_bounds[:,0], lat_bounds[-1,-1]))
# Creating mask of the regions based on the resolution of the model
srex_mask = regionmask.defined_regions.srex.mask(lon[...], lat[...]).values # it has nans
srex_mask_ma= np.ma.masked_invalid(srex_mask) # got rid of nans; values from 1 to 26
# important information:
srex_abr = regionmask.defined_regions.srex.abbrevs
srex_names = regionmask.defined_regions.srex.names
srex_nums = regionmask.defined_regions.srex.numbers
srex_centroids = regionmask.defined_regions.srex.centroids
srex_polygons = regionmask.defined_regions.srex.polygons
"""
Meaning of the stats:
=====================
Common to df_bin_wins and df_tce_wins:
--------------------------------------
* win_idx : The index of the 25 yr time windows starting 1850
* region_abr: The SREX region shortname or abr
Specific to df_bin_wins:
------------------------
* fq_neg/fq_pos : Count of total months affect by neg/pos extremes (non-TCE)
* c_gain/c_loss : Total carbon uptake gain or loss due to positive or negative extremes (non -TCE)
* reg_var : Total GPP irrespective of if there is a bin or not
* tot_var : Total GPP of the location/pixel for 25 years when at least one extreme has occured
* tot_var_ext : Total GPP of the location/pixel for 25 years when either a postive or negative carbon extremes as occuerd
e.g. if a loc witnessed 30 pos and 40 neg extremes so this 'tot_var_ext' will give the gpp of 70 exts
* area_neg/area_pos : Total area affected by either negative or positive extreme (Areacella * sftlf)
* count_reg : Count of total number of pixels if a region with non-zero carbon flux values
* count_px : Count of total number pixels for every 25 years when at least one extreme has occured
"""
# Creating a dataframe to capture the Bin stats (without TCE)
#df_bin_wins = pd.DataFrame(columns = ["win_idx",'region_abr','fq_neg', 'fq_pos', 'c_gain', 'c_loss','reg_var' ,'tot_var', 'tot_var_ext', 'area_neg', 'area_pos'])
# Creating a dataframe to capture the TCE stats
#df_tce_wins = pd.DataFrame(columns = ["win_idx",'region_abr','tce_neg', 'tce_pos', 'tce_len_neg', 'tce_len_pos', 'tce_len_tot','c_gain','c_loss', 'reg_var','tot_var', 'tot_var_ext', 'area_neg','area_pos'])
# Calculation of actual area of land pixels (areacella * sftlf)
# -------------------------------------------------------------
if source_run == 'CESM2':
lf_units = '%'
lf_div = 100
lf = nc_data[source_run][member_run]['sftlf'] [...] /lf_div
area_act = nc_data[source_run][member_run]['areacella'] [...] * lf
# Pixel wise analysis in every region
# for region_abr (this case)
from scipy import ndimage
win_size = 300
count = 0 # Just to check the length of dataframe
for region_abr in srex_abr:
print ("Looking into the Region %s"%region_abr)
# Masking for a particular region : "AMZ"
#not_land = np.ma.masked_equal( nc_data [source_run] [member_run] ['sftlf'][...] , 0 )
srex_idxs = np.arange(len(srex_names))
filter_region = np.array(srex_abr) == region_abr
region_idx = srex_idxs[filter_region][0]
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
region_abr = np.array(srex_abr)[filter_region][0]
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region; Mask of region is False
region_mask = ~region_mask_not # Only the mask of region is True
# Reading the dataframes
# ======================
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Stats/"%(source_run,member_run, variable)
df_tce_wins = pd.read_csv (path_save + "%s_%s_%s_TCE_Stats_Wins_mask.csv"%(source_run,member_run,variable), index_col=0)
df_bin_wins = pd.read_csv (path_save + "%s_%s_%s_Bin_Stats_Wins_mask.csv"%(source_run,member_run,variable), index_col=0)
# Converting the str of the dataframe to floats
# ---------------------------------------------
cols_tmp = df_bin_wins.columns[2:]
for col_t in cols_tmp:
df_bin_wins.loc[:,col_t] = pd.to_numeric(df_bin_wins.loc[:,col_t], errors = 'coerce')
del cols_tmp, col_t
cols_tmp = df_tce_wins.columns[2:]
for col_t in cols_tmp:
df_tce_wins.loc[:,col_t] = pd.to_numeric(df_tce_wins.loc[:,col_t], errors = 'coerce')
del cols_tmp, col_t
#Main DataFrame : df_tce_wins, df_bin_wins
# Finding the basic statistics of all above metrics for every region and time window
from functions import MaskedConstant_Resolve, MaskedArray_Resolve
# Calculating Summary of stats per region for bin (non-TCE):
# ---------------------------------------------------------
df_bin_summary_wins = pd.DataFrame(columns = ["win_idx" ,'region_abr',
'fq_neg' , 'fq_pos',
'fq_cup_du_neg', 'fq_crp_du_neg',
'fq_cup_du_pos', 'fq_crp_du_pos',
'count_reg' , 'count_px',
'c_gain' , 'c_loss',
'reg_var',
'tot_var',
'tot_var_ext',
'area_neg' , 'area_pos',
'reg_cup' , 'reg_crp',
'tot_cup' , 'tot_crp',
'tot_cup_ext', 'tot_crp_ext'])
for region_abr in srex_abr:
df_bin_summary = pd.DataFrame(columns = ["win_idx" ,'region_abr',
'fq_neg' , 'fq_pos',
'fq_cup_du_neg', 'fq_crp_du_neg',
'fq_cup_du_pos', 'fq_crp_du_pos',
'c_gain' , 'c_loss',
'reg_var',
'tot_var',
'tot_var_ext',
'area_neg' , 'area_pos',
'reg_cup' , 'reg_crp',
'tot_cup' , 'tot_crp',
'tot_cup_ext', 'tot_crp_ext'])
for w_idx, wins in enumerate(win_start_years):
filter_win_region = (df_bin_wins["win_idx"] == w_idx ) & (df_bin_wins["region_abr"] == region_abr)
df_tmp = df_bin_wins [filter_win_region]
bin_stats = {}
for col in df_bin_summary_wins.columns[2:]:
bin_stats[col] = {}
if col in ['count_reg']: # for count we will report mean,max ,sum=count and std = 0
bin_stats[col]['mean'] = df_tmp.loc[:,'reg_var'].count()
bin_stats[col]['std'] = 0
bin_stats[col]['max'] = df_tmp.loc[:,'reg_var'].count()
bin_stats[col]['sum'] = df_tmp.loc[:,'reg_var'].count()
elif col in ['count_px'] :
bin_stats[col]['mean'] = df_tmp.loc[:,'tot_var'].count()
bin_stats[col]['std'] = 0
bin_stats[col]['max'] = df_tmp.loc[:,'tot_var'].count()
bin_stats[col]['sum'] = df_tmp.loc[:,'tot_var'].count()
else:
bin_stats[col]['mean'] = np.ma.mean(MaskedArray_Resolve(df_tmp[col]))
bin_stats[col]['std'] = np.ma.std(MaskedArray_Resolve(df_tmp[col]))
bin_stats[col]['max'] = np.ma.max(MaskedArray_Resolve(df_tmp[col]))
bin_stats[col]['sum'] = np.ma.sum(MaskedArray_Resolve(df_tmp[col]))
for k in bin_stats[col].keys():
if k not in df_bin_summary.index:
df_bin_summary = df_bin_summary.reindex(df_bin_summary.index.values.tolist() + [k])
for k in bin_stats[col].keys():
for col in df_bin_summary_wins.columns[2:]:
df_bin_summary.loc[k,col] = bin_stats[col][k]
df_bin_summary.loc[k,"win_idx"] = w_idx
df_bin_summary.loc[k,"region_abr"] = region_abr
del df_tmp, filter_win_region
# Summary of results for all windows
df_bin_summary_wins = df_bin_summary_wins.append(df_bin_summary)
# Storing the summary of the binary stats
df_bin_summary_wins. to_csv (path_save + "%s_%s_%s_Bin_Stats_Wins_Summary_mask.csv"%(source_run,member_run,variable))
# CALCULATION of Summary of stats per region for bin_TCE (with TCE):
# ------------------------------------------------------------------
df_tce_summary_wins = pd. DataFrame(columns = ["win_idx",'region_abr','tce_neg', 'tce_pos', 'tce_len_neg', 'tce_len_pos', 'tce_len_tot','c_gain','c_loss','reg_var' ,'tot_var', 'tot_var_ext' ,'area_neg','area_pos'])
for region_abr in srex_abr:
df_summary = pd. DataFrame(columns = ["win_idx",'region_abr','tce_neg', 'tce_pos', 'tce_len_neg', 'tce_len_pos', 'tce_len_tot','c_gain','c_loss', 'reg_var' ,'tot_var','tot_var_ext' ,'area_neg','area_pos'])
for w_idx, wins in enumerate(win_start_years):
filter_win_region = (df_tce_wins["win_idx"] == w_idx ) & (df_tce_wins["region_abr"] == region_abr)
df_tmp = df_tce_wins [filter_win_region]
tce_stats = {}
for col in df_tce_summary_wins.columns[2:]:
tce_stats[col] = {}
tce_stats[col]['mean'] = np.ma.mean(MaskedArray_Resolve(df_tmp[col]))
tce_stats[col]['std'] = np.ma.std(MaskedArray_Resolve(df_tmp[col]))
tce_stats[col]['max'] = np.ma.max(MaskedArray_Resolve(df_tmp[col]))
tce_stats[col]['sum'] = np.ma.sum(MaskedArray_Resolve(df_tmp[col]))
for k in tce_stats[col].keys():
if k not in df_summary.index:
df_summary = df_summary.reindex(df_summary.index.values.tolist() + [k])
for k in tce_stats[col].keys():
for col in df_tce_summary_wins.columns[2:]:
df_summary.loc[k,col] = tce_stats[col][k]
df_summary.loc[k,"win_idx"] = w_idx
df_summary.loc[k,"region_abr"] = region_abr
# Summary of results for all windows
df_tce_summary_wins = df_tce_summary_wins.append(df_summary)
# Storing the summary
df_tce_summary_wins. to_csv (path_save + "%s_%s_%s_TCE_Stats_Wins_Summary_mask.csv"%(source_run,member_run,variable))
# Plotting of the region of interest
# ----------------------------------
# Regional analysis
# -----------------
import regionmask
# Cartopy Plotting
# ----------------
import cartopy.crs as ccrs
from shapely.geometry.polygon import Polygon
import cartopy.feature as cfeature
# Plotting the TS of regions:
region_abr = 'CAM'
filter_region = np.array(srex_abr) == region_abr
region_idx = srex_idxs[filter_region][0]
region_number = np.array(srex_nums)[filter_region][0]
region_name = np.array(srex_names)[filter_region][0]
filter_region = df_tce_summary_wins ['region_abr'] == region_abr
web_path = '/global/homes/b/bharat/results/web/'
# For x-axis
in_yr = 1850
win_yr = [str(in_yr+i*25) + '-'+str(in_yr +(i+1)*25-1)[2:] for i in range(win_start_years.size)]
# Plotting the Timeseries of Box plot for all regions:
# ----------------------------------------------------
for region_abr in srex_abr:
# TS: Box Plots of Columns:
#region_abr = 'AMZ'
# This filter will only keep the region of interest and mask everything else
filter_region = df_tce_wins['region_abr'] == region_abr
# droping the column of region_abr because it is str and I want df to be float
df_tmp = df_tce_wins[filter_region].drop(columns=['region_abr'])
col_tmp = df_tmp.columns[1:]
for col_t in col_tmp:
df_tmp[col_t] = pd.to_numeric(df_tmp[col_t],errors='coerce')
for idx,w_str in enumerate(win_yr):
col = df_tmp.columns[0]
df_tmp.loc[:,col][df_tmp.loc[:,col] == idx] = w_str #improved version of the line below
#df_tmp[col][df_tmp[col]==idx] = w_str
df_tmp.rename(columns={'win_idx':'Wins'}, inplace=True) # Changing the win_idx : "Wins"
for col in df_tmp.columns[1:]:
df_tmp[col] = df_tmp[col].mask(df_tmp[col]==0)
fig,ax = plt.subplots(figsize=(12,5))
ax = sns.boxplot(x="Wins", y=col, data=df_tmp)
fig.savefig(web_path + 'Regional/%s_%s_%s_%s_box.pdf'%(source_run,member_run,region_abr,col))
fig.savefig(path_save + '%s_%s_%s_%s_box.pdf'%(source_run,member_run,region_abr,col))
plt.close(fig)
del df_tmp
# Creating the DataFrame of the interested stats for regions
# interested mean and sum of
# * c_gain, c_loss, tot_var rel%, change in carbon_uptake
# ==========================================================
# Saving carbon loss and gains for every region based on Bin (non-TCE) Stats :
# ----------------------------------------------------------------------------
dict_carbon_bin = {}
for r_idx, region_abr in enumerate(srex_abr):
filter_region = np.array(srex_abr) == region_abr #for finding srex number
region_number = np.array(srex_nums)[filter_region][0] # for srex mask
del filter_region
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region; Mask of region is False
region_mask = ~region_mask_not # Only the mask of region is True
filter_region = df_bin_summary_wins['region_abr'] == region_abr
df_tmp = df_bin_summary_wins[filter_region].drop(columns=['region_abr']).astype(float)
for idx,w_str in enumerate(win_yr):
col = df_tmp.columns[0]
df_tmp[col][df_tmp[col]==idx] = w_str
df_tmp.rename(columns={'win_idx':'Wins'}, inplace=True) # Changing the win_idx : "Wins"
# Only looking at SUM or total change in a region
df_sum = df_tmp[df_tmp.index == 'sum']
# Checking for 'Sum'
df_carbon = pd.DataFrame(columns = ["Uptake_Gain", "Uptake_Loss", "Uptake_Change",
"Regional_%s"%variable.upper() , "%s"%variable.upper(),"%s_du_Ext"%variable.upper(),
"Percent_Gain_du","Percent_Loss_du", "Percent_Gain_reg", "Percent_Loss_reg", "Percent_Gain_px", "Percent_Loss_px"])
for win_str in win_yr:
if win_str not in df_carbon.index:
df_carbon = df_carbon.reindex(df_carbon.index.values.tolist()+[win_str])
for w_idx,win_str in enumerate(df_sum['Wins']):
df_carbon.loc[win_str, "Uptake_Gain"] = (df_sum[df_sum['Wins'] == win_str]['c_gain']/10**15).values[0]
df_carbon.loc[win_str, "Uptake_Loss"] = (df_sum[df_sum['Wins'] == win_str]['c_loss']/10**15).values[0]
df_carbon.loc[win_str, "Uptake_Change"] = df_carbon.loc[win_str, "Uptake_Gain"] + df_carbon.loc[win_str, "Uptake_Loss"]
df_carbon.loc[win_str, "Regional_%s"%variable.upper()] = np.sum(nc_data [source_run] [member_run] ['var'] [w_idx * win_size: (w_idx+1) * win_size,:,:]* np.array([region_mask]*300))/10**15
df_carbon.loc[win_str, "%s"%variable.upper()] = (df_sum[df_sum['Wins'] == win_str]['tot_var']/10**15).values[0]
df_carbon.loc[win_str, "%s_du_Ext"%variable.upper()] = (df_sum[df_sum['Wins'] == win_str]['tot_var_ext']/10**15).values[0]
# print (region_abr, df_carbon.loc[win_str, "Regional_%s"%variable.upper()], df_carbon.loc[win_str, "%s"%variable.upper()])
df_carbon.loc[win_str, "Percent_Gain_du"] = df_carbon.loc[win_str, "Uptake_Gain"]*100/df_carbon.loc[win_str, "%s_du_Ext"%variable.upper()]
df_carbon.loc[win_str, "Percent_Loss_du"] = df_carbon.loc[win_str, "Uptake_Loss"]*100/df_carbon.loc[win_str, "%s_du_Ext"%variable.upper()]
df_carbon.loc[win_str, "Percent_Gain_reg"] = df_carbon.loc[win_str, "Uptake_Gain"]*100/df_carbon.loc[win_str, "Regional_%s"%variable.upper()]
df_carbon.loc[win_str, "Percent_Loss_reg"] = df_carbon.loc[win_str, "Uptake_Loss"]*100/df_carbon.loc[win_str, "Regional_%s"%variable.upper()]
df_carbon.loc[win_str, "Percent_Gain_px"] = df_carbon.loc[win_str, "Uptake_Gain"]*100/df_carbon.loc[win_str, "%s"%variable.upper()]
df_carbon.loc[win_str, "Percent_Loss_px"] = df_carbon.loc[win_str, "Uptake_Loss"]*100/df_carbon.loc[win_str, "%s"%variable.upper()]
df_carbon.to_csv(web_path + 'Regional/%s_%s_%s_CarbonUptake_PgC.csv'%(source_run,member_run,region_abr))
df_carbon.to_csv(path_save + '%s_%s_%s_CarbonUptake_PgC.csv'%(source_run,member_run,region_abr))
dict_carbon_bin [region_abr] = df_carbon
if r_idx == 0:
df_bin_carbon_all = df_carbon.fillna(0) # df_bin_carbon_all is for global stats of carbon uptake; intializing it
else:
df_bin_carbon_all = df_bin_carbon_all + df_carbon.fillna(0)
del df_carbon,df_tmp, filter_region
# Updating the percent Gain and Loss of carbon uptake of all regions
# -------------------------------------------------------------------
df_bin_carbon_all['Percent_Gain_du'] = df_bin_carbon_all['Uptake_Gain']*100/df_bin_carbon_all['%s_du_Ext'%variable.upper()]
df_bin_carbon_all['Percent_Loss_du'] = df_bin_carbon_all['Uptake_Loss']*100/df_bin_carbon_all['%s_du_Ext'%variable.upper()]
df_bin_carbon_all['Percent_Gain_reg'] = df_bin_carbon_all['Uptake_Gain']*100/df_bin_carbon_all['Regional_%s'%variable.upper()]
df_bin_carbon_all['Percent_Loss_reg'] = df_bin_carbon_all['Uptake_Loss']*100/df_bin_carbon_all['Regional_%s'%variable.upper()]
df_bin_carbon_all['Percent_Gain_px'] = df_bin_carbon_all['Uptake_Gain']*100/df_bin_carbon_all['%s'%variable.upper()]
df_bin_carbon_all['Percent_Loss_px'] = df_bin_carbon_all['Uptake_Loss']*100/df_bin_carbon_all['%s'%variable.upper()]
dict_carbon_bin [ 'ALL'] = df_bin_carbon_all
"""
Meaning of the stats:
=====================
Common to dict_carbon_freq_bin
--------------------------------------
* index : 'Wins' or Str of window range
* key :region_abr or The SREX region shortname or abr
Specific to dict_carbon_freq_bin for every 25 year time window for 'key' regions:
--------------------------------------------------------------------------------
* Uptake_Gain : Sum of anomalies in C flux during positive extremes
* Uptake_Loss : Sum of anomalies in C flux during negative extremes
* Uptake_Change : Uptake_Gain + Uptake_Loss
* Regional_{C-flux} : Sum of all {C-Flux} irrespective with or without an extreme
* Count_Reg : Total Count of total number of pixels with non-zero carbon flux values
* {C-flux} : Sum of all {C-Flux} at pixels where at least one extreme has occured
* Count_px : Total Count of total number of pixels where at least one extreme has occured
* {C-flux}_du_Ext : Sum of all {C-Flux} at pixels and times i.e. same filter or space and time of extremes..
... where either or both postive or negative carbon extremes as occuerd ...
... e.g. if a loc witnessed 30 pos and 40 neg extremes so the ...
... '{C-flux}_du_Ext' will give the total gpp of during these 70 exts.
* Count_Neg_Ext : Count of total months affect by neg extremes (non-TCE)
* Count_Pos_Ext : Count of total months affect by pos extremes (non-TCE)
"""
# Saving carbon loss and gains for every region based on Bin (non-TCE) Stats, with Frequency and count of cells:
# --------------------------------------------------------------------------------------------------------------
dict_carbon_freq_bin = {}
for r_idx, region_abr in enumerate(srex_abr):
filter_region = np.array(srex_abr) == region_abr #for finding srex number
region_number = np.array(srex_nums)[filter_region][0] # for srex mask
del filter_region
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region; Mask of region is False
region_mask = ~region_mask_not # Only the mask of region is True
filter_region = df_bin_summary_wins['region_abr'] == region_abr
df_tmp = df_bin_summary_wins[filter_region].drop(columns=['region_abr']).astype(float)
for idx,w_str in enumerate(win_yr):
col = df_tmp.columns[0]
df_tmp[col][df_tmp[col]==idx] = w_str
df_tmp.rename(columns={'win_idx':'Wins'}, inplace=True) # Changing the win_idx : "Wins"
# Only looking at SUM or total change in a region
df_sum = df_tmp[df_tmp.index == 'sum']
# Checking for 'Sum'
df_carbon = pd.DataFrame(columns = ["Uptake_Gain", "Uptake_Loss", "Uptake_Change",
"Regional_%s"%variable.upper() ,"Count_Reg", "%s"%variable.upper(), "Count_px","%s_du_Ext"%variable.upper(),
"Count_Neg_Ext","Count_Pos_Ext"])
for win_str in win_yr:
if win_str not in df_carbon.index:
df_carbon = df_carbon.reindex(df_carbon.index.values.tolist()+[win_str])
for w_idx,win_str in enumerate(df_sum['Wins']):
df_carbon.loc[win_str, "Uptake_Gain"] = (df_sum[df_sum['Wins'] == win_str]['c_gain']/10**15).values[0]
df_carbon.loc[win_str, "Uptake_Loss"] = (df_sum[df_sum['Wins'] == win_str]['c_loss']/10**15).values[0]
df_carbon.loc[win_str, "Uptake_Change"] = df_carbon.loc[win_str, "Uptake_Gain"] + df_carbon.loc[win_str, "Uptake_Loss"]
df_carbon.loc[win_str, "Regional_%s"%variable.upper()] = np.sum(nc_data [source_run] [member_run] ['var'] [w_idx * win_size: (w_idx+1) * win_size,:,:]* np.array([region_mask]*300))/10**15
df_carbon.loc[win_str, "%s"%variable.upper()] = (df_sum[df_sum['Wins'] == win_str]['tot_var']/10**15).values[0]
df_carbon.loc[win_str, "%s_du_Ext"%variable.upper()] = (df_sum[df_sum['Wins'] == win_str]['tot_var_ext']/10**15).values[0]
df_carbon.loc[win_str, "CUP_reg"] = (df_sum[df_sum['Wins'] == win_str]['reg_cup']/10**15).values[0]
df_carbon.loc[win_str, "CRP_reg"] = (df_sum[df_sum['Wins'] == win_str]['reg_crp']/10**15).values[0]
df_carbon.loc[win_str, "CRP_px"] = (df_sum[df_sum['Wins'] == win_str]['tot_crp']/10**15).values[0]
df_carbon.loc[win_str, "CUP_px"] = (df_sum[df_sum['Wins'] == win_str]['tot_cup']/10**15).values[0]
df_carbon.loc[win_str, "CUP_du_Ext"] = (df_sum[df_sum['Wins'] == win_str]['tot_cup_ext']/10**15).values[0]
df_carbon.loc[win_str, "CRP_du_Ext"] = (df_sum[df_sum['Wins'] == win_str]['tot_crp_ext']/10**15).values[0]
df_carbon.loc[win_str, "Count_Reg"] = (df_sum[df_sum['Wins'] == win_str]['count_reg']) .values[0]
df_carbon.loc[win_str, "Count_px"] = (df_sum[df_sum['Wins'] == win_str]['count_px' ]) .values[0]
df_carbon.loc[win_str, "Count_Neg_Ext"] = (df_sum[df_sum['Wins'] == win_str]['fq_neg' ]) .values[0]
df_carbon.loc[win_str, "Count_Pos_Ext"] = (df_sum[df_sum['Wins'] == win_str]['fq_pos' ]) .values[0]
df_carbon.loc[win_str, "Count_CUP_du_Neg_Ext"] = (df_sum[df_sum['Wins'] == win_str]['fq_cup_du_neg' ]) .values[0]
df_carbon.loc[win_str, "Count_CRP_du_Neg_Ext"] = (df_sum[df_sum['Wins'] == win_str]['fq_crp_du_neg' ]) .values[0]
df_carbon.loc[win_str, "Count_CUP_du_Pos_Ext"] = (df_sum[df_sum['Wins'] == win_str]['fq_cup_du_pos' ]) .values[0]
df_carbon.loc[win_str, "Count_CRP_du_Pos_Ext"] = (df_sum[df_sum['Wins'] == win_str]['fq_crp_du_pos' ]) .values[0]
df_carbon.to_csv(web_path + 'Regional/%s_%s_%s_CarbonUptake_Freq.csv'%(source_run,member_run,region_abr))
df_carbon.to_csv(path_save + '%s_%s_%s_CarbonUptake_Freq.csv'%(source_run,member_run,region_abr))
dict_carbon_freq_bin [region_abr] = df_carbon
if r_idx == 0:
df_bin_carbon_all = df_carbon.fillna(0) # df_bin_carbon_all is for global stats of carbon uptake; intializing it
else:
df_bin_carbon_all = df_bin_carbon_all + df_carbon.fillna(0)
del df_carbon,df_tmp, filter_region
# Updating the percent Gain and Loss of carbon uptake
# ---------------------------------------------------
dict_carbon_freq_bin [ 'ALL'] = df_bin_carbon_all
# Saving carbon loss and gains for every region based on TCE Stats, with Frequency and count of cells:
# --------------------------------------------------------------------------------------------------------------
dict_carbon_freq_tce = {}
for r_idx, region_abr in enumerate(srex_abr):
filter_region = np.array(srex_abr) == region_abr #for finding srex number
region_number = np.array(srex_nums)[filter_region][0] # for srex mask
del filter_region
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region; Mask of region is False
region_mask = ~region_mask_not # Only the mask of region is True
filter_region = df_tce_summary_wins['region_abr'] == region_abr
df_tmp = df_tce_summary_wins[filter_region].drop(columns=['region_abr']).astype(float)
for idx,w_str in enumerate(win_yr):
col = df_tmp.columns[0]
df_tmp[col][df_tmp[col]==idx] = w_str
df_tmp.rename(columns={'win_idx':'Wins'}, inplace=True) # Changing the win_idx : "Wins"
# Only looking at SUM or total change in a region
df_sum = df_tmp[df_tmp.index == 'sum']
# Checking for 'Sum'
df_carbon = pd.DataFrame(columns = ["Uptake_Gain", "Uptake_Loss", "Uptake_Change",
"Len_Neg_TCE", "Len_Pos_TCE", "Count_Neg_TCE", "Count_Pos_TCE"])
for win_str in win_yr:
if win_str not in df_carbon.index:
df_carbon = df_carbon.reindex(df_carbon.index.values.tolist()+[win_str])
for w_idx,win_str in enumerate(df_sum['Wins']):
df_carbon.loc[win_str, "Uptake_Gain"] = (df_sum[df_sum['Wins'] == win_str]['c_gain']/10**15).values[0]
df_carbon.loc[win_str, "Uptake_Loss"] = (df_sum[df_sum['Wins'] == win_str]['c_loss']/10**15).values[0]
df_carbon.loc[win_str, "Uptake_Change"] = df_carbon.loc[win_str, "Uptake_Gain"] + df_carbon.loc[win_str, "Uptake_Loss"]
df_carbon.loc[win_str, "Len_Neg_TCE"] = (df_sum[df_sum['Wins'] == win_str]['tce_len_neg']) .values[0]
df_carbon.loc[win_str, "Len_Pos_TCE"] = (df_sum[df_sum['Wins'] == win_str]['tce_len_pos']) .values[0]
df_carbon.loc[win_str, "Count_Neg_TCE"] = (df_sum[df_sum['Wins'] == win_str]['tce_neg' ]) .values[0]
df_carbon.loc[win_str, "Count_Pos_TCE"] = (df_sum[df_sum['Wins'] == win_str]['tce_pos' ]) .values[0]
df_carbon.to_csv(web_path + 'Regional/%s_%s_%s_CarbonUptake_Freq_tce.csv'%(source_run,member_run,region_abr))
df_carbon.to_csv(path_save + '%s_%s_%s_CarbonUptake_Freq_tce.csv'%(source_run,member_run,region_abr))
dict_carbon_freq_tce [region_abr] = df_carbon
if r_idx == 0:
df_tce_carbon_all = df_carbon.fillna(0) # df_bin_carbon_all is for global stats of carbon uptake; intializing it
else:
df_tce_carbon_all = df_tce_carbon_all + df_carbon.fillna(0)
del df_carbon,df_tmp, filter_region
# Updating the percent Gain and Loss of carbon uptake TCE
# ---------------------------------------------------
dict_carbon_freq_tce [ 'ALL'] = df_tce_carbon_all
# Saving carbon loss and gains for every region based on TCE Stats :
dict_carbon_tce = {}
for r_idx, region_abr in enumerate(srex_abr):
#region_abr = 'AMZ'
filter_region = np.array(srex_abr) == region_abr #for finding srex number
region_number = np.array(srex_nums)[filter_region][0] # for srex mask
del filter_region
region_mask_not = np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region; Mask of region is False
region_mask = ~region_mask_not # Only the mask of region is True
filter_region = df_tce_summary_wins['region_abr'] == region_abr
df_tmp = df_tce_summary_wins[filter_region].drop(columns=['region_abr']).astype(float)
for idx,w_str in enumerate(win_yr):
col = df_tmp.columns[0]
df_tmp[col][df_tmp[col]==idx] = w_str
df_tmp.rename(columns={'win_idx':'Wins'}, inplace=True) # Changing the win_idx : "Wins"
# Only looking at SUM or total change in a region
df_sum = df_tmp[df_tmp.index == 'sum']
# Checking for 'Sum'
df_carbon = pd.DataFrame(columns = ["Uptake_Gain", "Uptake_Loss", "Uptake_Change",
"Regional_%s"%variable.upper() , "%s"%variable.upper(),"%s_du_Ext"%variable.upper(),
"Percent_Gain_du","Percent_Loss_du", "Percent_Gain_reg", "Percent_Loss_reg","Percent_Gain_px", "Percent_Loss_px"])
for win_str in win_yr:
if win_str not in df_carbon.index:
df_carbon = df_carbon.reindex(df_carbon.index.values.tolist()+[win_str])
for win_str in df_sum['Wins']:
df_carbon.loc[win_str, "Uptake_Gain"] = (df_sum[df_sum['Wins'] == win_str]['c_gain']/10**15).values[0]
df_carbon.loc[win_str, "Uptake_Loss"] = (df_sum[df_sum['Wins'] == win_str]['c_loss']/10**15).values[0]
df_carbon.loc[win_str, "Uptake_Change"] = df_carbon.loc[win_str, "Uptake_Gain"] + df_carbon.loc[win_str, "Uptake_Loss"]
df_carbon.loc[win_str, "Regional_%s"%variable.upper()] = np.sum(nc_data [source_run] [member_run] ['var'] [w_idx * win_size: (w_idx+1) * win_size,:,:]* np.array([region_mask]*300))/10**15
df_carbon.loc[win_str, "%s"%variable.upper()] = (df_sum[df_sum['Wins'] == win_str]['tot_var']/10**15).values[0]
df_carbon.loc[win_str, "%s_du_Ext"%variable.upper()] = (df_sum[df_sum['Wins'] == win_str]['tot_var_ext']/10**15).values[0]
df_carbon.loc[win_str, "Percent_Gain_du"] = df_carbon.loc[win_str, "Uptake_Gain"]*100/df_carbon.loc[win_str, "%s_du_Ext"%variable.upper()]
df_carbon.loc[win_str, "Percent_Loss_du"] = df_carbon.loc[win_str, "Uptake_Loss"]*100/df_carbon.loc[win_str, "%s_du_Ext"%variable.upper()]
df_carbon.loc[win_str, "Percent_Gain_reg"] = df_carbon.loc[win_str, "Uptake_Gain"]*100/df_carbon.loc[win_str, "Regional_%s"%variable.upper()]
df_carbon.loc[win_str, "Percent_Loss_reg"] = df_carbon.loc[win_str, "Uptake_Loss"]*100/df_carbon.loc[win_str, "Regional_%s"%variable.upper()]
df_carbon.loc[win_str, "Percent_Gain_px"] = df_carbon.loc[win_str, "Uptake_Gain"]*100/df_carbon.loc[win_str, "%s"%variable.upper()]
df_carbon.loc[win_str, "Percent_Loss_px"] = df_carbon.loc[win_str, "Uptake_Loss"]*100/df_carbon.loc[win_str, "%s"%variable.upper()]
df_carbon.to_csv(web_path + 'Regional/%s_%s_%s_CarbonUptake_PgC.csv'%(source_run,member_run,region_abr))
df_carbon.to_csv(path_save + '%s_%s_%s_CarbonUptake_PgC.csv'%(source_run,member_run,region_abr))
dict_carbon_tce [region_abr] = df_carbon
if r_idx == 0:
# .fillna(0) will replace the NaN to 0 so that the addition operation could result in non-zero numbers
df_tce_carbon_all = df_carbon.fillna(0) # df_tce_carbon_all is for global stats of carbon uptake; intializing it
else:
df_tce_carbon_all = df_tce_carbon_all + df_carbon.fillna(0)
del df_carbon
# Updating the percent Gain and Loss of carbon uptake
# ---------------------------------------------------
df_tce_carbon_all['Percent_Gain_du'] = df_tce_carbon_all['Uptake_Gain']*100/df_tce_carbon_all['%s_du_Ext'%variable.upper()]
df_tce_carbon_all['Percent_Loss_du'] = df_tce_carbon_all['Uptake_Loss']*100/df_tce_carbon_all['%s_du_Ext'%variable.upper()]
df_tce_carbon_all['Percent_Gain_reg'] = df_tce_carbon_all['Uptake_Gain']*100/df_tce_carbon_all['Regional_%s'%variable.upper()]
df_tce_carbon_all['Percent_Loss_reg'] = df_tce_carbon_all['Uptake_Loss']*100/df_tce_carbon_all['Regional_%s'%variable.upper()]
df_tce_carbon_all['Percent_Gain_px'] = df_tce_carbon_all['Uptake_Gain']*100/df_tce_carbon_all['%s'%variable.upper()]
df_tce_carbon_all['Percent_Loss_px'] = df_tce_carbon_all['Uptake_Loss']*100/df_tce_carbon_all['%s'%variable.upper()]
dict_carbon_tce [ 'ALL'] = df_tce_carbon_all
"""
# ploting the Normalized and GPP of all regions for TCE Stats
# -----------------------------------------------------------
x = dict_carbon_tce [ 'ALL'].index
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col', sharey='row',
gridspec_kw={'hspace': 0, 'wspace': 0})
axs = axs.ravel()
for k_idx, key in enumerate(dict_carbon_tce.keys()):
axs[k_idx] .plot(x, norm(dict_carbon_tce[key]['%s'%variable.upper()]), 'k', linewidth = 0.6 ,label = key)
axs[k_idx] .plot(x, norm(abs(dict_carbon_tce[key]['Percent_Loss'])) , 'r--', linewidth = 0.4)
axs[k_idx] .plot(x, norm(abs(dict_carbon_tce[key]['Percent_Gain'])) , 'g--', linewidth = 0.4)
axs[k_idx] . legend(loc="upper left")
for ax in axs.flat:
ax.label_outer()
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
fig.savefig(web_path+'Regional/%s_Norm_%s.pdf'%(source_run,variable.upper()))
fig.savefig(path_save+'%s_TS_Norm_%s_Regions.pdf'%(source_run,variable.upper()))
plt.close(fig)
"""
"""
# Plotting the real GPP and Percent Change in Carbon Uptake during TCE
# --------------------------------------------------------------------
x = dict_carbon_tce [ 'ALL'].index
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col',
gridspec_kw={'hspace': .4, 'wspace': .4}, figsize=(6,9))
plt.title ("%s and Percent Carbon Uptake and Loss"%(variable.upper()))
txt ="The left y-axis denotes the total %s in the region per 25 years; Units: PgC\n"%variable.upper()
txt+="The right y-axis denotes the percent carbon uptake w.r.t. to total %s\n"%variable.upper()
txt+="Red and Green represents Percent Loss and Gain in Carbon Uptake\n"
txt+="The carbon uptake is calclated during TCEs"
axs = axs.ravel()
for k_idx, key in enumerate(dict_carbon_tce.keys()):
axs[k_idx] .plot(x, dict_carbon_tce[key]['%s'%variable.upper()], 'k', linewidth = 0.6 ,label = key)
ar= np.array([abs(dict_carbon_tce[key]['Percent_Loss'].min()),
abs(dict_carbon_tce[key]['Percent_Loss'].max()),
abs(dict_carbon_tce[key]['Percent_Gain'].max()),
abs(dict_carbon_tce[key]['Percent_Gain'].min())])
ax1 = axs[k_idx] .twinx()
ax2 = axs[k_idx] .twinx()
ax1 .plot(x, abs(dict_carbon_tce[key]['Percent_Loss']) , 'r--', linewidth = 0.4)
ax2 .plot(x, abs(dict_carbon_tce[key]['Percent_Gain']) , 'g--', linewidth = 0.4)
ax1.set_ylim(ar.min(),ar.max())
ax2.set_ylim(ar.min(),ar.max())
axs[k_idx] . legend(loc="upper left")
#for ax in axs.flat:
# ax.label_outer()
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12) #Caption
fig.savefig(web_path+'Regional/%s_%s_per_Uptake_TCE.pdf'%(source_run,variable.upper()))
fig.savefig(path_save+'%s_TS_%s_per_Uptake_Regions_TCE.pdf'%(source_run,variable.upper()))
plt.close(fig)
"""
# Plotting the real GPP and Percent Change in Carbon Uptake for TCE + Bin w.r.t. Total Regional GPP
# -------------------------------------------------------------------------------------------------
x = dict_carbon_bin [ 'ALL'].index
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col',
gridspec_kw={'hspace': .4, 'wspace': .4}, figsize=(6,9))
txt ="The left y-axis denotes the total %s in the region per 25 years; Units: PgC\n"%variable.upper()
txt+="The right y-axis denotes the percent carbon uptake w.r.t. to total regional %s\n"%variable.upper()
txt+="Red and Green represents Percent Loss and Gain in Carbon Uptake\n"
txt+="The carbon uptake during TCE and bin extremes is shown by dashed and solid lines"
axs = axs.ravel()
for k_idx, key in enumerate(dict_carbon_bin.keys()):
axs[k_idx] .plot(x, dict_carbon_bin[key]['%s'%variable.upper()], 'k', linewidth = 0.6 ,label = key)
ar= np.array([abs(dict_carbon_bin[key]['Percent_Loss_reg'].min()),
abs(dict_carbon_bin[key]['Percent_Loss_reg'].max()),
abs(dict_carbon_bin[key]['Percent_Gain_reg'].max()),
abs(dict_carbon_bin[key]['Percent_Gain_reg'].min()),
abs(dict_carbon_tce[key]['Percent_Loss_reg'].min()),
abs(dict_carbon_tce[key]['Percent_Loss_reg'].max()),
abs(dict_carbon_tce[key]['Percent_Gain_reg'].max()),
abs(dict_carbon_tce[key]['Percent_Gain_reg'].min())])
ax1 = axs[k_idx] .twinx() # for representing Percent carbon gain durning TCE
ax2 = axs[k_idx] .twinx() # for representing Percent carbon loss durning TCE
ax3 = axs[k_idx] .twinx() # for representing Percent carbon gain durning BIN
ax4 = axs[k_idx] .twinx() # for representing Percent carbon loss durning BIN
ax1 .plot(x, abs(dict_carbon_tce[key]['Percent_Loss_reg']) , 'r--', linewidth = 0.4)
ax2 .plot(x, abs(dict_carbon_tce[key]['Percent_Gain_reg']) , 'g--', linewidth = 0.4)
ax1 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax2 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax3 .plot(x, abs(dict_carbon_bin[key]['Percent_Loss_reg']) , 'r', linewidth = 0.4)
ax4 .plot(x, abs(dict_carbon_bin[key]['Percent_Gain_reg']) , 'g', linewidth = 0.4)
ax3 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax4 .set_ylim(ar.min()*.95,ar.max()*1.05)
axs[k_idx] . legend(loc="upper left")
#for ax in axs.flat:
# ax.label_outer()
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
plt.suptitle ("Percent Uptake in carbon w.r.t. Total Regional %s"%variable.upper())
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=7) #Caption
fig.savefig(web_path+'Regional/%s_%s_per_Uptake_regional.pdf'%(source_run,variable.upper()))
fig.savefig(path_save+'%s_TS_%s_per_Uptake_Regional.pdf'%(source_run,variable.upper()))
plt.close(fig)
# Plotting the real GPP and Percent Change in Carbon Uptake for TCE + Bin w.r.t. Total GPP during extremes for every region
# -------------------------------------------------------------------------------------------------------------------------
x = dict_carbon_bin [ 'ALL'].index
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col',
gridspec_kw={'hspace': .4, 'wspace': .4}, figsize=(6,9))
txt ="The left y-axis denotes the total %s in the region per 25 years; Units: PgC\n"%variable.upper()
txt+="The right y-axis denotes the percent carbon uptake w.r.t. to %s during extremes\n"%variable.upper()
txt+="Red and Green represents Percent Loss and Gain in Carbon Uptake\n"
txt+="The carbon uptake during TCE and bin extremes is shown by dashed and solid lines"
axs = axs.ravel()
for k_idx, key in enumerate(dict_carbon_bin.keys()):
axs[k_idx] .plot(x, dict_carbon_bin[key]['%s'%variable.upper()], 'k', linewidth = 0.6 ,label = key)
ar= np.array([abs(dict_carbon_bin[key]['Percent_Loss_du'].min()),
abs(dict_carbon_bin[key]['Percent_Loss_du'].max()),
abs(dict_carbon_bin[key]['Percent_Gain_du'].max()),
abs(dict_carbon_bin[key]['Percent_Gain_du'].min()),
abs(dict_carbon_tce[key]['Percent_Loss_du'].min()),
abs(dict_carbon_tce[key]['Percent_Loss_du'].max()),
abs(dict_carbon_tce[key]['Percent_Gain_du'].max()),
abs(dict_carbon_tce[key]['Percent_Gain_du'].min())])
ax1 = axs[k_idx] .twinx() # for representing Percent carbon gain durning TCE
ax2 = axs[k_idx] .twinx() # for representing Percent carbon loss durning TCE
ax3 = axs[k_idx] .twinx() # for representing Percent carbon gain durning BIN
ax4 = axs[k_idx] .twinx() # for representing Percent carbon loss durning BIN
ax1 .plot(x, abs(dict_carbon_tce[key]['Percent_Loss_du']) , 'r--', linewidth = 0.4)
ax2 .plot(x, abs(dict_carbon_tce[key]['Percent_Gain_du']) , 'g--', linewidth = 0.4)
ax1 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax2 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax3 .plot(x, abs(dict_carbon_bin[key]['Percent_Loss_du']) , 'r', linewidth = 0.4)
ax4 .plot(x, abs(dict_carbon_bin[key]['Percent_Gain_du']) , 'g', linewidth = 0.4)
ax3 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax4 .set_ylim(ar.min()*.95,ar.max()*1.05)
axs[k_idx] . legend(loc="upper left")
#for ax in axs.flat:
# ax.label_outer()
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
plt.suptitle ("Total Carbon Loss or Gain during %s TCEs"%variable.upper())
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=8) #Caption
fig.savefig(web_path+'Regional/%s_%s_per_Uptake_regional_du_ext.pdf'%(source_run,variable.upper()))
fig.savefig(path_save+'%s_TS_%s_per_Uptake_Regional_du_ext.pdf'%(source_run,variable.upper()))
plt.close(fig)
# Plotting the real GPP and Percent Change in Carbon Uptake for TCE + Bin w.r.t. Total GPP during extremes for every region
# -------------------------------------------------------------------------------------------------------------------------
x = dict_carbon_bin [ 'ALL'].index
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col',
gridspec_kw={'hspace': .4, 'wspace': .4}, figsize=(6,9))
txt ="The left y-axis denotes the total %s in the region per 25 years; Units: PgC\n"%variable.upper()
txt+="The right y-axis denotes the percent carbon uptake w.r.t. to %s during extremes\n"%variable.upper()
txt+="Red and Green represents Loss and Gain in Carbon Uptake\n"
txt+="The carbon uptake during TCE and bin extremes is shown by dashed and solid lines"
axs = axs.ravel()
for k_idx, key in enumerate(dict_carbon_bin.keys()):
axs[k_idx] .plot(x, dict_carbon_bin[key]['%s'%variable.upper()], 'k', linewidth = 0.6 ,label = key)
ar= np.array([abs(dict_carbon_bin[key]['Uptake_Loss'].min()),
abs(dict_carbon_bin[key]['Uptake_Loss'].max()),
abs(dict_carbon_bin[key]['Uptake_Gain'].max()),
abs(dict_carbon_bin[key]['Uptake_Gain'].min()),
abs(dict_carbon_tce[key]['Uptake_Loss'].min()),
abs(dict_carbon_tce[key]['Uptake_Loss'].max()),
abs(dict_carbon_tce[key]['Uptake_Gain'].max()),
abs(dict_carbon_tce[key]['Uptake_Gain'].min())])
ax1 = axs[k_idx] .twinx() # for representing Percent carbon gain durning TCE
ax2 = axs[k_idx] .twinx() # for representing Percent carbon loss durning TCE
ax3 = axs[k_idx] .twinx() # for representing Percent carbon gain durning BIN
ax4 = axs[k_idx] .twinx() # for representing Percent carbon loss durning BIN
ax1 .plot(x, abs(dict_carbon_tce[key]['Uptake_Loss']) , 'r--', linewidth = 0.4)
ax2 .plot(x, abs(dict_carbon_tce[key]['Uptake_Gain']) , 'g--', linewidth = 0.4)
ax1 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax2 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax3 .plot(x, abs(dict_carbon_bin[key]['Uptake_Loss']) , 'r', linewidth = 0.4)
ax4 .plot(x, abs(dict_carbon_bin[key]['Uptake_Gain']) , 'g', linewidth = 0.4)
ax3 .set_ylim(ar.min()*.95,ar.max()*1.05)
ax4 .set_ylim(ar.min()*.95,ar.max()*1.05)
axs[k_idx] . legend(loc="upper left")
#for ax in axs.flat:
# ax.label_outer()
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
plt.suptitle ("Uptake in Carbon %s"%variable.upper())
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=8) #Caption
fig.savefig(web_path+'Regional/%s_%s_Total_Uptake_PgC.pdf'%(source_run,variable.upper()))
fig.savefig(path_save+'%s_TS_%s_Total_Uptake_Regions_PgC.pdf'%(source_run,variable.upper()))
plt.close(fig)
# Plotting the GPP during extremes and for all times in pixels with atleast one extreme for every region
# -------------------------------------------------------------------------------------------------------------------------
x = dict_carbon_bin [ 'ALL'].index
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col',
gridspec_kw={'hspace': .4, 'wspace': .4}, figsize=(6,9))
plt.suptitle ("%s in Regions during extremes"%variable.upper())
txt ="The left y-axis denotes the total %s in the region per 25 years when pixel have at least one extreme\n"%variable.upper()
txt+="The right y-axis denotes the total %s in the region per 25 years in pixels during extremes\n"%variable.upper()
txt+="Blue and green represents TCE and Binary extremes\n"
txt+="The dashed and solid lines represent %s during extreme and for all times in a pixel with atleast one extreme "%variable.upper()
axs = axs.ravel()
for k_idx, key in enumerate(dict_carbon_bin.keys()):
#axs[k_idx] .plot(x, dict_carbon_bin[key]['Regional_%s'%variable.upper()], 'k', linewidth = 0.6 ,label = key) #Regional GPP
ar0 = np.array([abs(dict_carbon_bin[key]['%s_du_Ext'%variable.upper()].max()),
abs(dict_carbon_bin[key]['%s_du_Ext'%variable.upper()].min()),
abs(dict_carbon_tce[key]['%s_du_Ext'%variable.upper()].max()),
abs(dict_carbon_tce[key]['%s_du_Ext'%variable.upper()].min())])
ar= np.array([abs(dict_carbon_bin[key]['%s'%variable.upper()].min()),
abs(dict_carbon_bin[key]['%s'%variable.upper()].max()),
abs(dict_carbon_tce[key]['%s'%variable.upper()].min()),
abs(dict_carbon_tce[key]['%s'%variable.upper()].max())])
ax1 = axs[k_idx] .twinx() # for representing Percent carbon gain durning TCE
ax2 = axs[k_idx] .twinx() # for representing Percent carbon loss durning TCE
#ax3 = axs[k_idx] .twinx() # for representing Percent carbon gain durning BIN
#ax4 = axs[k_idx] .twinx() # for representing Percent carbon loss durning BIN
ax1 .plot(x, abs(dict_carbon_tce[key]['%s_du_Ext'%variable.upper()]) , 'b--', linewidth = 0.4)
ax2 .plot(x, abs(dict_carbon_bin[key]['%s_du_Ext'%variable.upper()]) , 'g--', linewidth = 0.4)
ax1 .set_ylim(ar0.min()*.95,ar0.max()*1.05)
ax2 .set_ylim(ar0.min()*.95,ar0.max()*1.05)
axs[k_idx].plot(x, abs(dict_carbon_tce[key]['%s'%variable.upper()]) , 'b', linewidth = 0.4, label = key)
axs[k_idx] .plot(x, abs(dict_carbon_bin[key]['%s'%variable.upper()]) , 'g', linewidth = 0.4)
axs[k_idx] .set_ylim(ar.min()*.95,ar.max()*1.05)
axs[k_idx] .set_ylim(ar.min()*.95,ar.max()*1.05)
axs[k_idx] . legend(loc="upper left")
#ax3 .plot(x, dict_carbon_bin[key]['Regional_%s'%variable.upper()], 'k', linewidth = 0.6 ,label = key) #Regional GPP
#for ax in axs.flat:
# ax.label_outer()
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=8) #Caption
fig.savefig(web_path+'Regional/%s_%s_GPP_du_Extremes_PgC.pdf'%(source_run,variable.upper()))
fig.savefig(path_save+'%s_TS_%s_Total_GPP_du_Extremes_PgC.pdf'%(source_run,variable.upper()))
plt.close(fig)
# Plotting the Carbon Uptake losses and gains for with TCE and without TCEs
# -------------------------------------------------------------------------------------------------------------------------
x = dict_carbon_bin [ 'ALL'].index
import pylab as plot
params = {'legend.fontsize': 6,
'legend.handlelength': 1,
'legend.frameon': 'False',
'axes.labelsize':'small',
'ytick.labelsize': 'small',
'font.size':5 }
plot.rcParams.update(params)
fig, axs = plt.subplots(nrows=9, ncols=3, sharex='col',
gridspec_kw={'hspace': .4, 'wspace': .4}, figsize=(6,9))
plt.suptitle ("%s Uptake loss and gain with TCE and BIN extremes"%variable.upper())
txt ="The left y-axis denotes the total %s Uptake loss and gains during binary extremes\n"%variable.upper()
txt+="The right y-axis denotes the total %s Uptake loss and gains during TCE extremes\n"%variable.upper()
txt+="Red and green represents uptakes losses and gains; units PgC \n"
txt+="The dashed and solid lines represent TCE and Bin extremes"
axs = axs.ravel()
for k_idx, key in enumerate(dict_carbon_bin.keys()):
#axs[k_idx] .plot(x, dict_carbon_bin[key]['Regional_%s'%variable.upper()], 'k', linewidth = 0.6 ,label = key) #Regional GPP
ar = np.array([abs(dict_carbon_bin[key]['Uptake_Gain'].max()),
abs(dict_carbon_bin[key]['Uptake_Gain'].min()),
abs(dict_carbon_bin[key]['Uptake_Loss'].max()),
abs(dict_carbon_bin[key]['Uptake_Loss'].min())])
ar0 = np.array([abs(dict_carbon_tce[key]['Uptake_Gain'].max()),
abs(dict_carbon_tce[key]['Uptake_Gain'].min()),
abs(dict_carbon_tce[key]['Uptake_Loss'].max()),
abs(dict_carbon_tce[key]['Uptake_Loss'].min())])
ax1 = axs[k_idx] .twinx() # for representing Percent carbon gain durning TCE
ax2 = axs[k_idx] .twinx() # for representing Percent carbon loss durning TCE
#ax3 = axs[k_idx] .twinx() # for representing Percent carbon gain durning BIN
#ax4 = axs[k_idx] .twinx() # for representing Percent carbon loss durning BIN
ax1 .plot(x, abs(dict_carbon_tce[key]['Uptake_Gain']) , 'g--', linewidth = 0.4)
ax2 .plot(x, abs(dict_carbon_tce[key]['Uptake_Loss']) , 'r--', linewidth = 0.4)
ax1 .set_ylim(ar0.min()*.95,ar0.max()*1.05)
ax2 .set_ylim(ar0.min()*.95,ar0.max()*1.05)
axs[k_idx] .plot(x, abs(dict_carbon_bin[key]['Uptake_Gain']) , 'g', linewidth = 0.4, label = key)
axs[k_idx] .plot(x, abs(dict_carbon_bin[key]['Uptake_Loss']) , 'r', linewidth = 0.4)
axs[k_idx] .set_ylim(ar.min()*.95,ar.max()*1.05)
axs[k_idx] .set_ylim(ar.min()*.95,ar.max()*1.05)
axs[k_idx] . legend(loc="upper left")
#ax3 .plot(x, dict_carbon_bin[key]['Regional_%s'%variable.upper()], 'k', linewidth = 0.6 ,label = key) #Regional GPP
#for ax in axs.flat:
# ax.label_outer()
for tick in axs[-3].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-2].get_xticklabels():
tick.set_rotation(45)
for tick in axs[-1].get_xticklabels():
tick.set_rotation(45)
plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=8) #Caption
fig.savefig(web_path+'Regional/%s_%s_Uptake_Losses_and_Gains_PgC.pdf'%(source_run,variable.upper()))
fig.savefig(path_save+'%s_TS_%s_Uptake_Losses_and_Gains_PgC.pdf'%(source_run,variable.upper()))
plt.close(fig)
# Stacked Bar plot to highlight the Statistics of C-Flux and Freq of extremes
# ---------------------------------------------------------------------------
# Spatial Plot of Integrated NBPs during extremes
# -----------------------------------------------
# Common to rest of the spatial figures:
# ======================================
import colorsys as cs
val = 0.8
Rd = cs.rgb_to_hsv(1,0,0)
Rd = cs.hsv_to_rgb(Rd[0],Rd[1],val)
Gn = cs.rgb_to_hsv(0,1,0)
Gn = cs.hsv_to_rgb(Gn[0],Gn[0],val)
RdGn = {'red' : ((0.0, 0.0, Rd[0]),
(0.5, 1.0, 1.0 ),
(1.0, Gn[0], 0.0 )),
'green': ((0.0, 0.0, Rd[1]),
(0.5, 1.0, 1.0 ),
(1.0, Gn[1], 0.0 )),
'blue' : ((0.0, 0.0, Rd[2]),
(0.5, 1.0, 1.0 ),
(1.0, Gn[2], 0.0 ))}
plt.register_cmap(name = 'RdGn',data = RdGn)
# The index of the region in region mask
#srex_idx = srex_idxs[np.array(srex_abr) == reg_abr][0]
Wins_to_Plot = ['1850-74', '1900-24', '1950-74', '2000-24', '2050-74', '2075-99']
sub_fig_text = ['(a)', '(b)', '(c)',
'(d)', '(e)', '(f)']
# Plotting of "NBP_du_Ext"
# =====================================
values_range = []
sign = {}
for r in srex_abr:
sign[r] = {}
for wi in Wins_to_Plot:
values_range.append(dict_carbon_freq_bin[r].loc[wi,'NBP_du_Ext'])
if dict_carbon_freq_bin[r].loc[wi,'NBP_du_Ext'] > 0:
sign[r][wi] = '+'
elif dict_carbon_freq_bin[r].loc[wi,'NBP_du_Ext'] < 0:
sign[r][wi] = u"\u2212"
else:
sign[r][wi] = '*'
print ("To check for the range of values")
print (np.array(values_range).min())
print (np.array(values_range).max())
levels = np.arange(-6,6,2)
print (levels)
# Creating the NBP Values for 1850-74 for all regions for NBP du Ext
ploting_stats = {}
for wi in Wins_to_Plot:
ploting_stats[wi] = {}
all_masked = np.ma.masked_equal(np.ma.zeros(srex_mask_ma.shape),0)
for s_idx in srex_idxs:
tmp = np.ma.masked_equal(srex_mask_ma,s_idx+ 1).mask # +1 because srex_idxs start from 1
all_masked[tmp] = dict_carbon_freq_bin[srex_abr[s_idx]].loc[wi,'NBP_du_Ext'] # for 1850-74
del tmp
all_masked = np.ma.masked_array(all_masked, mask = srex_mask_ma.mask)
ploting_stats[wi] ['NBP_du_Ext'] = all_masked
# test plot
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
#proj_output = ccrs.Robinson(central_longitude=0)
proj_output = ccrs.PlateCarree()
fig = plt.figure(figsize = (12,9), dpi = 200)
ax = {}
gl = {}
for plot_idx in range(len(Wins_to_Plot)):
gl[plot_idx] = 0
if plot_idx == 0 :
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output
)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], ploting_stats[Wins_to_Plot[plot_idx]]['NBP_du_Ext'],
transform=ccrs.PlateCarree(),vmax=6,vmin=-6,cmap='RdYlGn')
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].text ( srex_centroids[srex_idx][0], srex_centroids[srex_idx][-1], sign[abr][Wins_to_Plot[plot_idx]],
horizontalalignment='center',
transform = proj_trans)
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
elif plot_idx>0:
ax[plot_idx] = fig.add_subplot(
2, 3, plot_idx+1, projection= proj_output,
sharex=ax[0], sharey=ax[0]
)
h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], ploting_stats[Wins_to_Plot[plot_idx]]['NBP_du_Ext'],
transform=ccrs.PlateCarree(),vmax=6,vmin=-6,cmap='RdYlGn')
for srex_idx,abr in enumerate (srex_abr):
ax[plot_idx].text ( srex_centroids[srex_idx][0], srex_centroids[srex_idx][-1],
sign[abr][Wins_to_Plot[plot_idx]],
horizontalalignment='center',
color = 'blue', fontweight = 'bold',
transform = proj_trans)
ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
for plot_idx in range(len(Wins_to_Plot)):
ax[plot_idx].coastlines(alpha=0.75)
ax[plot_idx].text(-90, -10, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
horizontalalignment="right",
verticalalignment='center',
fontsize = 9)
gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='gray', alpha=0.5, linestyle='--')
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
plt.colorbar( h, cax=cax, orientation='vertical', pad=0.04, shrink=0.95);
#plt.colorbar(h, orientation='horizontal', pad=0.04);
ax[1].set_title("Integrated NBP during NBP Extreme Events (PgC)", fontsize = 16)
fig.savefig(web_path + "Spatial_Maps/Spatial_NBP_Du_Exts.pdf")
fig.savefig(web_path + "Spatial_Maps/Spatial_NBP_Du_Exts.png")
fig.savefig(path_save + "Spatial/Spatial_NBP_Du_Exts.pdf")
plt.close(fig)
# Plotting of "Uptake_Change"
# =====================================
values_range = []
sign = {}
Col_Name = 'Uptake_Change'
for r in srex_abr:
sign[r] = {}
for wi in Wins_to_Plot:
values_range.append(dict_carbon_freq_bin[r].loc[wi,Col_Name])
if dict_carbon_freq_bin[r].loc[wi, Col_Name] > 0:
sign[r][wi] = '+'
elif dict_carbon_freq_bin[r].loc[wi, Col_Name] < 0:
sign[r][wi] = u"\u2212"
else:
sign[r][wi] = '*'
print ("To check for the range of values")
print (np.array(values_range).min())
print (
|
np.array(values_range)
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Visualisation
^^^^^^^^^^^^^
Standard plotting and mapping procedures
.. autosummary::
:nosignatures:
:toctree: generated/
plot_ppi
plot_ppi_crosshair
plot_rhi
create_cg
plot_scan_strategy
plot_plan_and_vert
plot_max_plan_and_vert
add_lines
add_patches
"""
# standard libraries
import os.path as path
import warnings
# site packages
import numpy as np
import matplotlib.pyplot as pl
from matplotlib import patches, axes, lines
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axisartist import (SubplotHost, ParasiteAxesAuxTrans,
GridHelperCurveLinear)
from mpl_toolkits.axisartist.grid_finder import FixedLocator, DictFormatter
import mpl_toolkits.axisartist.angle_helper as ah
from matplotlib.ticker import NullFormatter, FuncFormatter
from matplotlib.collections import LineCollection, PolyCollection
# wradlib modules
from . import georef as georef
from . import util as util
def plot_ppi(data, r=None, az=None, autoext=True,
site=(0, 0, 0), proj=None, elev=0.,
fig=None, ax=111, func='pcolormesh',
cg=False, rf=1., refrac=False,
**kwargs):
"""Plots a Plan Position Indicator (PPI).
The implementation of this plot routine is in cartesian axes and does all
coordinate transforms beforehand. This allows zooming into the data as well
as making it easier to plot additional data (like gauge locations) without
having to convert them to the radar's polar coordinate system.
Using ``cg=True`` the plotting is done in a curvelinear grid axes.
Additional data can be plotted in polar coordinates or cartesian
coordinates depending which axes object is used.
``**kwargs`` may be used to try to influence the
:func:`matplotlib.pyplot.pcolormesh`, :func:`matplotlib.pyplot.contour`,
:func:`matplotlib.pyplot.contourf` and
:func:`wradlib.georef.polar.spherical_to_proj` routines under the hood.
There is one major caveat concerning the values of ``r`` and ``az``.
Due to the way :func:`matplotlib.pyplot.pcolormesh` works, ``r`` should
give the location of the start of each range bin, while ``az`` should give
the angle also at the begin (i.e. 'leftmost') of the beam.
This might be in contrast to other conventions, which might define ranges
and angles at the center of bin and beam.
This affects especially the default values set for ``r`` and ``az``, but ìt
should be possible to accommodate all other conventions by setting ``r``
and ``az`` properly.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
The data to be plotted. It is assumed that the first dimension is over
the azimuth angles, while the second dimension is over the range bins
r : :class:`numpy:numpy.ndarray`
The ranges. Units may be chosen arbitrarily, unless proj is set. In
that case the units must be meters. If None, a default is
calculated from the dimensions of ``data``.
rf: float
If present, factor for scaling range axes, defaults to 1.
az : :class:`numpy:numpy.ndarray`
The azimuth angles in degrees. If None, a default is
calculated from the dimensions of ``data``.
autoext : bool
This routine uses :func:`matplotlib.pyplot.pcolormesh` to draw the
bins.
As this function needs one set of coordinates more than would usually
be provided by ``r`` and ``az``, setting ``autoext`` to True
automatically extends ``r`` and ``az`` so that all of ``data`` will
be plotted.
refrac: bool
If True, the effect of refractivity of the earth's atmosphere on the
beam propagation will be taken into account. If False, simple
trigonometry will be used to calculate beam propagation.
Functionality for this will be provided by function
:func:`wradlib.georef.misc.bin_distance`. Therefore, if ``refrac`` is
True, ``r`` must be given in meters.
site : tuple
Tuple of coordinates of the radar site.
If ``proj`` is not used, this simply becomes the offset for the origin
of the coordinate system.
If ``proj`` is used, values must be given as (longitude, latitude)
tuple of geographical coordinates.
proj : osr spatial reference object
GDAL OSR Spatial Reference Object describing projection
If this parameter is not None, ``site`` must be set. Then the function
will attempt to georeference the radar bins and display the PPI in the
coordinate system defined by the projection string.
elev : float or array of same shape as ``az``
Elevation angle of the scan or individual azimuths.
May improve georeferencing coordinates for larger elevation angles.
fig : :class:`matplotlib:matplotlib.figure.Figure`
If given, the RHI will be plotted into this figure object. Axes are
created as needed. If None, a new figure object will be created or
current figure will be used, depending on ``ax``.
ax : :class:`matplotlib:matplotlib.axes.Axes` | matplotlib grid definition
If matplotlib Axes object is given, the PPI will be plotted into this
axes object.
If matplotlib grid definition is given (nrows/ncols/plotnumber),
axis are created in the specified place.
Defaults to '111', only one subplot/axis.
func : str
Name of plotting function to be used under the hood.
Defaults to 'pcolormesh'. 'contour' and 'contourf' can be selected too.
cg : bool
If True, the data will be plotted on curvelinear axes.
See also
--------
:func:`wradlib.georef.projection.reproject`
:func:`wradlib.georef.projection.create_osr`
Returns
-------
ax : :class:`matplotlib:matplotlib.axes.Axes`
The axes object into which the PPI was plotted
pm : :class:`matplotlib:matplotlib.collections.QuadMesh` | \
:class:`matplotlib:matplotlib.contour.QuadContourSet`
The result of the plotting function. Necessary, if you want to
add a colorbar to the plot.
Note
----
If ``cg`` is True, the ``cgax`` - curvelinear Axes (r-theta-grid)
is returned. ``caax`` - Cartesian Axes (x-y-grid) and ``paax`` -
parasite axes object for plotting polar data can be derived like this::
caax = cgax.parasites[0]
paax = cgax.parasites[1]
The function :func:`~wradlib.vis.create_cg` uses the Matplotlib AXISARTIST
namespace `mpl_toolkits.axisartist \
<https://matplotlib.org/mpl_toolkits/axes_grid/users/axisartist.html>`_.
Here are some limitations to normal Matplotlib Axes. While using the
Matplotlib `AxesGrid Toolkit \
<https://matplotlib.org/mpl_toolkits/axes_grid/index.html>`_
most of the limitations can be overcome.
See `Matplotlib AxesGrid Toolkit User’s Guide \
<https://matplotlib.org/mpl_toolkits/axes_grid/users/index.html>`_.
Examples
--------
See :ref:`/notebooks/visualisation/wradlib_plot_ppi_example.ipynb`,
and :ref:`/notebooks/visualisation/wradlib_plot_curvelinear_grids.ipynb`.
"""
# kwargs handling
kw_spherical = {}
if 're' in kwargs:
re = kwargs.pop('re')
kw_spherical['re'] = re
if 'ke' in kwargs:
ke = kwargs.pop('ke')
kw_spherical['ke'] = ke
kwargs['zorder'] = kwargs.pop('zorder', 0)
if (proj is not None) & cg:
cg = False
warnings.warn(
"WARNING: `cg` cannot be used with `proj`, falling back.")
# providing 'reasonable defaults', based on the data's shape
if r is None:
d1 = np.arange(data.shape[1], dtype=np.float)
else:
d1 = np.asanyarray(r.copy())
if az is None:
d2 = np.arange(data.shape[0], dtype=np.float)
else:
d2 = np.asanyarray(az.copy())
if autoext & ('pcolormesh' in func):
# the ranges need to go 'one bin further', assuming some regularity
# we extend by the distance between the preceding bins.
x = np.append(d1, d1[-1] + (d1[-1] - d1[-2]))
# the angular dimension is supposed to be cyclic, so we just add the
# first element
y = np.append(d2, d2[0])
else:
# no autoext basically is only useful, if the user supplied the correct
# dimensions himself.
x = d1
y = d2
if 'contour' in func:
# add first azimuth as last for y and data
y = np.append(d2, d2[0])
data = np.vstack((data, data[0][np.newaxis, ...]))
# move to center
x += (x[1] - x[0]) / 2.
# get angle difference correct if y[1]=360-res/2 and y[0]=0+res/2
ydiff = np.abs((y[1] - y[0]) % 360)
y += ydiff / 2.
if refrac & (proj is None):
# with refraction correction, significant at higher elevations
# calculate new range values
re = kwargs.pop('re', 6370040.)
ke = kwargs.pop('ke', 4 / 3.)
x = georef.bin_distance(x, elev, site[2], re, ke=ke)
# axes object is given
if isinstance(ax, axes.Axes):
if cg:
caax = ax.parasites[0]
paax = ax.parasites[1]
else:
if fig is None:
if ax is 111:
# create new figure if there is only one subplot
fig = pl.figure()
else:
# assume current figure
fig = pl.gcf()
if cg:
# create curvelinear axes
ax, caax, paax = create_cg('PPI', fig, ax)
# this is in fact the outermost thick "ring"
ax.axis["lon"] = ax.new_floating_axis(1, np.max(x) / rf)
ax.axis["lon"].major_ticklabels.set_visible(False)
# and also set tickmarklength to zero for better presentation
ax.axis["lon"].major_ticks.set_ticksize(0)
else:
ax = fig.add_subplot(ax)
if cg:
xx, yy = np.meshgrid(y, x)
# set bounds to min/max
xa = yy * np.sin(np.radians(xx)) / rf
ya = yy * np.cos(np.radians(xx)) / rf
plax = paax
else:
# coordinates for all vertices
xx, yy = np.meshgrid(x, y)
plax = ax
if proj:
# with georeferencing
if r is None:
# if we produced a default, this one is still in 'kilometers'
# therefore we need to get from km to m
xx *= 1000
# projected to the final coordinate system
kw_spherical['proj'] = proj
coords = georef.spherical_to_proj(xx, yy, elev, site, **kw_spherical)
xx = coords[..., 0]
yy = coords[..., 1]
else:
if cg:
yy = yy / rf
data = data.transpose()
else:
# no georeferencing -> simple trigonometry
xxx = (xx * np.cos(np.radians(90. - yy)) + site[0]) / rf
yy = (xx * np.sin(np.radians(90. - yy)) + site[1]) / rf
xx = xxx
# plot the stuff
plotfunc = getattr(plax, func)
pm = plotfunc(xx, yy, data, **kwargs)
if cg:
# show curvelinear and cartesian grids
ax.set_ylim(np.min(ya), np.max(ya))
ax.set_xlim(np.min(xa), np.max(xa))
ax.grid(True)
caax.grid(True)
else:
ax.set_aspect('equal')
return ax, pm
def plot_ppi_crosshair(site, ranges, angles=None,
proj=None, elev=0., ax=None, **kwargs):
"""Plots a Crosshair for a Plan Position Indicator (PPI).
Parameters
----------
site : tuple
Tuple of coordinates of the radar site.
If `proj` is not used, this simply becomes the offset for the origin
of the coordinate system.
If `proj` is used, values must be given as (longitude, latitude)
tuple of geographical coordinates.
ranges : list
List of ranges, for which range circles should be drawn.
If ``proj`` is None arbitrary units may be used (such that they fit
with the underlying PPI plot.
Otherwise the ranges must be given in meters.
angles : list
List of angles (in degrees) for which straight lines should be drawn.
These lines will be drawn starting from the center and until the
largest range.
proj : osr spatial reference object
GDAL OSR Spatial Reference Object describing projection
The function will calculate lines and circles according to
georeferenced coordinates taking beam propagation, earth's curvature
and scale effects due to projection into account.
Depending on the projection, crosshair lines might not be straight and
range circles might appear elliptical (also check if the aspect of the
axes might not also be responsible for this).
elev : float or array of same shape as az
Elevation angle of the scan or individual azimuths.
May improve georeferencing coordinates for larger elevation angles.
ax : :class:`matplotlib:matplotlib.axes.Axes`
If given, the crosshair will be plotted into this axes object. If None
matplotlib's current axes (function gca()) concept will be used to
determine the axes.
Keyword Arguments
-----------------
line : dict
dictionary, which will be passed to the crosshair line objects using
the standard keyword inheritance mechanism. If not given defaults will
be used.
circle : dict
dictionary, which will be passed to the range circle line objects using
the standard keyword inheritance mechanism. If not given defaults will
be used.
See also
--------
:func:`~wradlib.vis.plot_ppi` - plotting a PPI in cartesian coordinates
Returns
-------
ax : :class:`matplotlib:matplotlib.axes.Axes`
The axes object into which the PPI was plotted
Examples
--------
See :ref:`/notebooks/visualisation/wradlib_plot_ppi_example.ipynb`.
"""
# if we didn't get an axes object, find the current one
if ax is None:
ax = pl.gca()
if angles is None:
angles = [0, 90, 180, 270]
# set default line keywords
linekw = dict(color='gray', linestyle='dashed')
# update with user settings
linekw.update(kwargs.get('line', {}))
# set default circle keywords
circkw = dict(edgecolor='gray', linestyle='dashed', facecolor='none')
# update with user settings
circkw.update(kwargs.get('circle', {}))
# determine coordinates for 'straight' lines
if proj:
# projected
# reproject the site coordinates
psite = georef.reproject(*site, projection_target=proj)
# these lines might not be straigt so we approximate them with 10
# segments. Produce polar coordinates
rr, az = np.meshgrid(np.linspace(0, ranges[-1], 10), angles)
# convert from spherical to projection
coords = georef.spherical_to_proj(rr, az, elev, site, proj=proj)
nsewx = coords[..., 0]
nsewy = coords[..., 1]
else:
# no projection
psite = site
rr, az = np.meshgrid(np.linspace(0, ranges[-1], 2), angles)
# use simple trigonometry to calculate coordinates
nsewx, nsewy = (psite[0] + rr * np.cos(np.radians(90 - az)),
psite[1] + rr * np.sin(np.radians(90 - az)))
# mark the site, just in case nothing else would be drawn
ax.plot(*psite[:2], marker='+', **linekw)
# draw the lines
for i in range(len(angles)):
ax.add_line(lines.Line2D(nsewx[i, :], nsewy[i, :], **linekw))
# draw the range circles
if proj:
# produce an approximation of the circle
x, y = np.meshgrid(ranges, np.arange(360))
poly = georef.spherical_to_proj(x, y, elev, site, proj=proj)[..., :2]
poly = np.swapaxes(poly, 0, 1)
for p in poly:
ax.add_patch(patches.Polygon(p, **circkw))
else:
# in the unprojected case, we may use 'true' circles.
for r in ranges:
ax.add_patch(patches.Circle(psite, r, **circkw))
# there should be not much wrong, setting the axes aspect to equal
# by default
ax.set_aspect('equal')
# return the axes object for later use
return ax
def plot_rhi(data, r=None, th=None, th_res=None, yoffset=0., autoext=True,
refrac=True, rf=1., fig=None, ax=111, func='pcolormesh', cg=False,
**kwargs):
"""Plots a Range Height Indicator (RHI).
The implementation of this plot routine is in cartesian axes and does all
coordinate transforms beforehand. This allows zooming into the data as well
as making it easier to plot additional data (like gauge locations) without
having to convert them to the radar's polar coordinate system.
Using ``cg=True`` the plotting is done in a curvelinear grid axes.
Additional data can be plotted in polar coordinates or cartesian
coordinates depending which axes object is used.
``**kwargs`` may be used to try to influence the
:func:`matplotlib.pyplot.pcolormesh`, :func:`matplotlib.pyplot.contour`
and :func:`matplotlib.pyplot.contourf` routines under the hood.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
The data to be plotted. It is assumed that the first dimension is over
the elevation angles, while the second dimension is over the range bins
r : :class:`numpy:numpy.ndarray`
The ranges. Units may be chosen arbitrarily. If None, a default is
calculated from the dimensions of ``data``.
rf: float
If present, factor for scaling range axis, defaults to 1.
th : :class:`numpy:numpy.ndarray`
The elevation angles in degrees. If None, a default is
calculated from the dimensions of ``data``.
th_res : float or np.array of same shape as ``th``
In RHI's it happens that the elevation angles are spaced wider than
the beam width. If this beam width (in degrees) is given in ``th_res``,
plot_rhi will plot the beams accordingly. Otherwise the behavior of
:func:`matplotlib.pyplot.pcolormesh` assumes all beams to be adjacent
to each other, which might lead to unexpected results.
yoffset : float
Altitude offset that would typically represent the altitude of
the radar antenna. Units must be consistent with units of ``r``.
autoext : bool
This routine uses :func:`matplotlib.pyplot.pcolormesh` to draw
the bins.
As this function needs one set of coordinates more than would usually
provided by ``r`` and ``az``, setting ``autoext`` to True automatically
extends ``r`` and ``az`` so that all of ``data`` will be plotted.
refrac : bool
If True, the effect of refractivity of the earth's atmosphere on the
beam propagation will be taken into account. If False, simple
trigonometry will be used to calculate beam propagation.
Functionality for this will be provided by functions
:func:`wradlib.georef.misc.site_distance` and
:func:`wradlib.georef.misc.bin_altitude`, which assume distances to be
given in meters. Therefore, if ``refrac`` is True, ``r`` must be given
in meters.
fig : :class:`matplotlib:matplotlib.figure.Figure`
If given, the RHI will be plotted into this figure object. Axes are
created as needed. If None, a new figure object will be created or
current figure will be used, depending on ``ax``.
ax : :class:`matplotlib:matplotlib.axes.Axes` | matplotlib grid definition
If matplotlib Axes object is given, the RHI will be plotted into this
axes object.
If matplotlib grid definition is given (nrows/ncols/plotnumber),
axis are created in the specified place.
Defaults to '111', only one subplot/axis.
func : str
Name of plotting function to be used under the hood.
Defaults to 'pcolormesh'. 'contour' and 'contourf' can be selected too.
cg : bool
If True, the data will be plotted on curvelinear axes.
See also
--------
:func:`wradlib.vis.create_cg` : creation of curvelinear grid axes objects
Returns
-------
ax : :class:`matplotlib:matplotlib.axes.Axes`
The axes object into which the RHI was plotted.
pm : :class:`matplotlib:matplotlib.collections.QuadMesh` | \
:class:`matplotlib:matplotlib.contour.QuadContourSet`
The result of the plotting function. Necessary, if you want to
add a colorbar to the plot.
Note
----
If ``cg`` is True, the ``cgax`` - curvelinear Axes (r-theta-grid)
is returned. ``caax`` - Cartesian Axes (x-y-grid) and ``paax`` -
parasite axes object for plotting polar data can be derived like this::
caax = cgax.parasites[0]
paax = cgax.parasites[1]
The function :func:`~wradlib.vis.create_cg` uses the Matplotlib AXISARTIST
namespace `mpl_toolkits.axisartist \
<https://matplotlib.org/mpl_toolkits/axes_grid/users/axisartist.html>`_.
Here are some limitations to normal Matplotlib Axes. While using the
Matplotlib `AxesGrid Toolkit \
<https://matplotlib.org/mpl_toolkits/axes_grid/index.html>`_
most of the limitations can be overcome.
See `Matplotlib AxesGrid Toolkit User’s Guide \
<https://matplotlib.org/mpl_toolkits/axes_grid/users/index.html>`_.
Examples
--------
See :ref:`/notebooks/visualisation/wradlib_plot_curvelinear_grids.ipynb`.
"""
# kwargs handling
kwargs['zorder'] = kwargs.pop('zorder', 0)
# autogenerate axis dimensions
if r is None:
d1 = np.arange(data.shape[1], dtype=np.float)
else:
d1 = np.asanyarray(r.copy())
if th is None:
# assume, data is evenly spaced between 0 and 90 degree
d2 = np.linspace(0., 90., num=data.shape[0], endpoint=True)
# d2 = np.arange(data.shape[0], dtype=np.float)
else:
d2 = np.asanyarray(th.copy())
if autoext & ('pcolormesh' in func):
# extend the range by the delta of the two last bins
x = np.append(d1, d1[-1] + d1[-1] - d1[-2])
# RHIs usually aren't cyclic, so we best guess a regular extension
# here as well
y = np.append(d2, d2[-1] + d2[-1] - d2[-2])
else:
# hopefully, the user supplied everything correctly...
x = d1
y = d2
if th_res is not None:
# we are given a beam resolution and thus may not just glue each
# beam to its neighbor
# solving this still with the efficient pcolormesh but interlacing
# the data with masked values, simulating the gap between beams
# make a temporary data array with one dimension twice the size of
# the original
img = np.ma.empty((data.shape[0], data.shape[1] * 2))
# mask everything
img.mask = np.ma.masked
# set the data in the first half of the temporary array
# this automatically unsets the mask
img[:, :data.shape[1]] = data
# reshape so that data and masked lines interlace each other
img = img.reshape((-1, data.shape[1]))
# produce lower and upper y coordinates for the actual data
yl = d2 - th_res * 0.5
yu = d2 + th_res * 0.5
# glue them together to achieve the proper dimensions for the
# interlaced array
y = np.concatenate([yl[None, :], yu[None, :]], axis=0).T.ravel()
else:
img = data
# fix reference for contour functions
if 'contour' in func:
x += (x[1] - x[0]) / 2
y += (y[1] - y[0]) / 2
# axes object given
if isinstance(ax, axes.Axes):
if cg:
caax = ax.parasites[0]
paax = ax.parasites[1]
else:
if fig is None:
# create new figure if there is only one subplot
if ax is 111:
fig = pl.figure()
else:
fig = pl.gcf()
if cg:
# create curvelinear axes
ax, caax, paax = create_cg('RHI', fig, ax)
# this is in fact the outermost thick "ring" aka max_range
ax.axis["lon"] = ax.new_floating_axis(1, np.max(x) / rf)
ax.axis["lon"].major_ticklabels.set_visible(False)
# and also set tickmarklength to zero for better presentation
ax.axis["lon"].major_ticks.set_ticksize(0)
else:
ax = fig.add_subplot(ax)
# coordinates for all vertices
xx, yy = np.meshgrid(x, y)
plax = ax
if refrac:
# observing air refractivity, so ground distances and beam height
# must be calculated specially
re = kwargs.pop('re', 6370040.)
ke = kwargs.pop('ke', 4/3.)
yyy = georef.bin_altitude(xx, yy, yoffset, re, ke=ke)
xxx = georef.site_distance(xx, yy, yyy, re, ke=ke)
xxx /= rf
yyy /= rf
if cg:
plax = caax
else:
if cg:
xxx, yyy = np.meshgrid(y, x)
yyy /= rf
img = img.transpose()
plax = paax
else:
# otherwise plane trigonometry will do
xxx = xx * np.cos(np.radians(yy)) / rf
yyy = xx * np.sin(np.radians(yy)) / rf
yyy += yoffset / rf
# plot the stuff
plotfunc = getattr(plax, func)
pm = plotfunc(xxx, yyy, img, **kwargs)
# return references to important and eventually new objects
if cg:
# set bounds to maximum
ax.set_ylim(0, np.max(x) / rf)
ax.set_xlim(0,
|
np.max(x)
|
numpy.max
|
import tensorflow as tf
import torch
import numpy as np
import itertools
from scipy.stats import unitary_group
from neurophox.config import TF_COMPLEX
from neurophox.helpers import fix_phase_tf, tri_phase_tf, fix_phase_torch
from neurophox.tensorflow import MeshLayer
from neurophox.torch import RMTorch
from neurophox.numpy import RMNumpy
from neurophox.meshmodel import RectangularMeshModel
from neurophox.ml.linear import complex_mse
from torch.autograd.functional import jacobian
TEST_CASES = itertools.product([7, 8], (0, 0.1), (True, False))
class PhaseControlTest(tf.test.TestCase):
def test_mask_pcf(self):
def random_mask_init(x, use_torch=False):
x_mask = np.ones_like(x).flatten()
x_mask[:x_mask.size // 2] = 0
np.random.shuffle(x_mask)
x_mask = np.reshape(x_mask, x.shape)
fix_phase = fix_phase_torch if use_torch else fix_phase_tf
return (x, fix_phase(x, x_mask)), x_mask
for units, bs_error, hadamard in TEST_CASES:
with self.subTest(units=units, bs_error=bs_error, hadamard=hadamard):
np.random.seed(0)
target = unitary_group.rvs(units, random_state=0)
identity_matrix = tf.eye(units, dtype=TF_COMPLEX)
rm_numpy = RMNumpy(units)
theta_init, theta_mask = random_mask_init(rm_numpy.theta)
phi_init, phi_mask = random_mask_init(rm_numpy.phi)
gamma_init, gamma_mask = random_mask_init(rm_numpy.gamma)
mesh_model = RectangularMeshModel(
units=units,
hadamard=hadamard,
bs_error=bs_error,
theta_init=theta_init,
phi_init=phi_init,
gamma_init=gamma_init
)
rm = MeshLayer(mesh_model)
with tf.GradientTape() as tape:
loss = complex_mse(rm(identity_matrix), target)
grads = tape.gradient(loss, rm.trainable_variables)
# t_loss = torch.nn.MSELoss(reduction='mean')
# rm_torch = RMTorch(
# units=units,
# hadamard=hadamard,
# bs_error=bs_error,
# theta_init=random_mask_init(rm_numpy.theta, use_torch=True)[0],
# phi_init=random_mask_init(rm_numpy.phi, use_torch=True)[0],
# gamma_init=rm_numpy.gamma
# )
#
# torch_loss = t_loss(torch.view_as_real(rm_torch(torch.eye(units, dtype=torch.cfloat))),
# torch.view_as_real(torch.as_tensor(target, dtype=torch.cfloat)))
# var = torch_loss.sum()
# var.backward()
# print(torch.autograd.grad(var, [rm_torch.theta]))
theta_grad, phi_grad, gamma_grad = grads[0].numpy(), grads[1].numpy(), grads[2].numpy()
theta_grad_zeros = theta_grad[np.where(theta_mask == 0)]
phi_grad_zeros = phi_grad[np.where(phi_mask == 0)]
gamma_grad_zeros = gamma_grad[np.where(gamma_mask == 0)]
self.assertAllClose(theta_grad_zeros, np.zeros_like(theta_grad_zeros))
self.assertAllClose(phi_grad_zeros,
|
np.zeros_like(phi_grad_zeros)
|
numpy.zeros_like
|
import math
import numpy as np
import fasttext
from gensim.models import KeyedVectors
from tqdm import tqdm
import logging
logger = logging.getLogger('nmtpytorch')
def uniform(dim, bias=None):
stdv = 1. / math.sqrt(dim)
x =
|
np.random.uniform(-stdv, stdv, dim)
|
numpy.random.uniform
|
import numpy as np
from numpy.testing import assert_allclose
from glue.core.roi_pretransforms import ProjectionMplTransform
from glue.core.state import GlueSerializer, GlueUnSerializer
def roundtrip_transform(transform):
gs = GlueSerializer(transform)
out_str = gs.dumps()
obj = GlueUnSerializer.loads(out_str)
return obj.object('__main__')
def test_simple_polar_mpl_transform():
angles = np.deg2rad(np.array([0, 45, 90, 180, 300, 810, 0, 0]))
radii = np.array([0, 5, 4, 1, 0, 2, -10, 10])
transform = ProjectionMplTransform('polar', [0, 2 * np.pi], [-5, 5], 'linear', 'linear')
x, y = transform(angles, radii)
expected_x = np.array([0.75, 0.8535533905932736, 0.5, 0.2, .625, 0.5, np.nan, 1.25])
expected_y = np.array([0.5, 0.8535533905932736, 0.95, 0.5, 0.28349364905389035,
0.85, np.nan, 0.5])
assert_allclose(x, expected_x)
assert_allclose(y, expected_y)
new_transform = roundtrip_transform(transform)
new_x, new_y = new_transform(angles, radii)
assert_allclose(new_x, x, rtol=1e-14)
assert_allclose(new_y, y, rtol=1e-14)
def test_log_polar_mpl_transform():
angles = np.deg2rad(np.array([0, 90, 180]))
radii = np.array([10, 100, 1000])
transform = ProjectionMplTransform('polar', [0, 2 * np.pi], [1, 10000], 'linear', 'log')
x, y = transform(angles, radii)
expected_x = np.array([0.5, 0.5, 0.25])
expected_y = np.array([0.5, 0.625, 0.5])
assert_allclose(x, expected_x)
assert_allclose(y, expected_y)
new_transform = roundtrip_transform(transform)
new_x, new_y = new_transform(angles, radii)
assert_allclose(new_x, x, rtol=1e-14)
assert_allclose(new_y, y, rtol=1e-14)
def test_wedge_polar_mpl_transform():
angles = np.deg2rad(np.array([0, 45, 90, 135, 180]))
radii = np.array([0.1, 0.2, 0.3, 0.4, 0.5])
transform = ProjectionMplTransform('polar', [0, np.pi], [0, 0.5], 'linear', 'linear')
x, y = transform(angles, radii)
assert_allclose(x, np.array([0.6, 0.64142136, 0.5, 0.21715729, 0]))
# For just the upper half, y is between 0.25 and 0.75
assert_allclose(y, np. array([0.25, 0.39142136, 0.55, 0.53284271, 0.25]))
new_transform = roundtrip_transform(transform)
new_x, new_y = new_transform(angles, radii)
assert_allclose(new_x, x, rtol=1e-14)
assert_allclose(new_y, y, rtol=1e-14)
def test_aitoff_mpl_transform():
transform = ProjectionMplTransform('aitoff', [-np.pi, np.pi],
[-np.pi / 2, np.pi / 2], 'linear', 'linear')
long = np.deg2rad(np.array([0, -90, 0, 45]))
lat = np.deg2rad(np.array([0, 0, 45, -45]))
x, y = transform(long, lat)
expected_x = np.array([0.5, 0.25, 0.5, 0.59771208])
expected_y = np.array([0.5, 0.5, 0.75, 0.24466602])
|
assert_allclose(x, expected_x)
|
numpy.testing.assert_allclose
|
import os
import sys
import warnings
import pickle
from IPython.core.display import display
import numpy as np
import pandas as pd
from generator_labeler.ActiveModel import RandomForestQuantileRegressor
from generator_labeler.FeatureExtraction import PredictorFeatureExtraction as PFE
from generator_labeler.Analysis.models import get_X_y
from generator_labeler.Analysis.GeneratedJobsAnalyzer import load_generated_dataset, load_validation_dataset
from generator_labeler.FeatureExtraction.PredictorFeatureExtraction import preprocess_jobs_data_info, fill_jobs_cardinality
from generator_labeler.JobExecutionSampler.unsupervised_sampler import UniformAgglomerativeSampler, RandomSampler
from generator_labeler.ActiveModel.ActiveQuantileForest import QuantileForestModel
from generator_labeler.paper_results import IMDB_config, TPCH_config, TPCH_config_2
import matplotlib.pyplot as plt
import seaborn as sns
# TDGen dependencies
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score
sns.set_context("talk")
sns.set_style("whitegrid")
np.random.seed(42)
# np.random.seed(51)
def compute_data_plan_features(config):
args_params = config.parse_args(sys.argv)
jobs_data_info = preprocess_jobs_data_info(args_params["generatedJobsInfo"])
data_sizes = config.data_sizes
data_plan_features = []
for d_id in data_sizes:
data_f = fill_jobs_cardinality(jobs_data_info, data_size=d_id)
data_plan_features.append(data_f)
data_plan_features = pd.concat(data_plan_features)
with open(os.path.join(config.dest_folder, "data_plan_features.pkl"), "wb") as handle:
pickle.dump(data_plan_features, handle)
def load_data_and_preprocess(config, load_original_cards=False):
args_params = config.parse_args(sys.argv)
# Load dataset
if config.plan_data_features_path is not None:
print("#####################################################")
print("## WARNING! Loading pre-existing features dataset! ##")
print("#####################################################")
plan_data_features = pd.read_csv(config.plan_data_features_path).set_index(["plan_id", "data_id"])
else:
plan_data_features, exec_plans_graph = load_generated_dataset(args_params, data_sizes=config.data_sizes, load_original_cards=load_original_cards)
# Log of labels and features
plan_data_features["Log_netRunTime"] = np.log(plan_data_features["netRunTime"])
sourceCardinalitySum = plan_data_features["sourceCardinalitySum"].copy()
sourceCardinalitySum[sourceCardinalitySum == 0] = 1 # Solves a bug in uniform sampler, because log of 0 is minus inf
plan_data_features["Log_sourceCardinalitySum"] = np.log(sourceCardinalitySum)
return plan_data_features
def run(config, load_original_cards=False, random_sampling=False):
# Load plan_data_features
plan_data_features = load_data_and_preprocess(config, load_original_cards=load_original_cards)
# Persist features
plan_data_features.to_csv(os.path.join(config.dest_folder, "plan_data_features.csv"))
# Remove outliers
plan_data_features_no_out = PFE.remove_outliers(plan_data_features.copy(), "netRunTime", b=0.01)
plan_data_features_no_out.to_csv(os.path.join(config.dest_folder, "plan_data_features_no_out.csv"))
# Init learning process
df = plan_data_features_no_out.copy()
dev_df = df.copy()
test_df = df[~df.index.isin(dev_df.index)].copy()
if random_sampling:
print("Random init sampling...")
sample_model = RandomSampler(51, config.feature_cols, config.label_col, seed=42)
else:
sample_model = UniformAgglomerativeSampler(50, config.feature_cols, config.label_col, config.sample_col)
sample_model.fit(dev_df, verbose=True)
# save init_job_sample_ids
np.savetxt(os.path.join(config.dest_folder, "init_job_sample_ids.txt"), sample_model.sample_ids, fmt="%d")
train_data_df = sample_model.transform(dev_df.copy())
val_data_df = dev_df.loc[~dev_df.index.isin(train_data_df.index), :]
test_data_df = test_df.copy()
X_train, y_train = get_X_y(train_data_df, config.feature_cols, config.label_col)
ids_train = train_data_df.reset_index()[["plan_id", "data_id"]]
print("Train data:", X_train.shape)
X_test, y_test = get_X_y(val_data_df, config.feature_cols, config.label_col)
ids_test = val_data_df.reset_index()[["plan_id", "data_id"]]
print("Test data:", X_test.shape)
results = test_active_learning(X_train.copy(), y_train.copy(), ids_train.copy(),
X_test.copy(), y_test.copy(), ids_test.copy(),
config.feature_cols,
n_iter=config.n_iter,
verbose=True)
with open(os.path.join(config.dest_folder, "learning_process.pkl"), "wb") as handle:
pickle.dump(results, handle)
def active_learning_iteration(X_train, y_train, ids_train, X_test, y_test, ids_test, feature_cols, verbose=False):
results = {}
qf_model = QuantileForestModel(random_state=42)
qf_model.fit(X_train, y_train)
qf_model.cross_validate(X_train, y_train)
qf_model.validate(X_test, y_test)
y_pred = qf_model.predict(X_test)
y_pred_upper = qf_model.predict(X_test, quantile=75)
y_pred_lower = qf_model.predict(X_test, quantile=25)
if verbose:
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(y_test[p], marker=".", linewidth=1, label="y_true", color="#1f77b4")
ax.errorbar(np.arange(len(y_pred)), y_pred[p],
yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.',
color="#ff7f0e", label="Pred. interval")
ax.set_title(f"{type(qf_model).__name__} - Score[r2]: {qf_model.test_scores['r2']:.2f}")
ax.set_ylabel("Log(Runtime)")
ax.set_xlabel("Test jobs")
ax.legend()
# plt.show()
plt.close()
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(np.exp(y_test[p]), marker=".", linewidth=1, label="y_true", color="#1f77b4")
ax.errorbar(np.arange(len(y_pred)), np.exp(y_pred[p]), yerr=np.array(
[np.exp(y_pred[p]) - np.exp(y_pred_lower[p]),
|
np.exp(y_pred_upper[p])
|
numpy.exp
|
import numpy as np
from keras.utils import to_categorical
import copy
from common.utils import eligibility_traces, default_config, make_env, RunningMeanStd, str2bool, discount_rewards
from common.ppo_independant import PPOPolicyNetwork, ValueNetwork
render = False
normalize_inputs = True
config = default_config()
LAMBDA = float(config['agent']['lambda'])
lr_actor = float(config['agent']['lr_actor'])
meta_skip_etrace = str2bool(config['agent']['meta_skip_etrace'])
env = make_env(config, normalize_inputs)
n_agent = env.n_agent
T = env.T
GAMMA = env.GAMMA
n_episode = env.n_episode
max_steps = env.max_steps
n_actions = env.n_actions
n_signal = env.n_signal
max_u = env.max_u
i_episode = 0
meta_Pi = []
meta_V = []
for i in range(n_agent):
meta_Pi.append(PPOPolicyNetwork(num_features=env.input_size + 2, num_actions=n_signal, layer_size=128, epsilon=0.1, learning_rate=lr_actor))
meta_V.append(ValueNetwork(num_features=env.input_size + 2, hidden_size=128, learning_rate=0.001))
Pi = [[] for _ in range(n_agent)]
V = [[] for _ in range(n_agent)]
for i in range(n_agent):
for j in range(n_signal):
Pi[i].append(PPOPolicyNetwork(num_features=env.input_size, num_actions=n_actions, layer_size=256, epsilon=0.1, learning_rate=lr_actor))
V[i].append(ValueNetwork(num_features=env.input_size, hidden_size=256, learning_rate=0.001))
if normalize_inputs:
meta_obs_rms = [RunningMeanStd(shape=2) for _ in range(n_agent)]
while i_episode < n_episode:
i_episode += 1
avg = [0] * n_agent
u_bar = [0] * n_agent
utili = [0] * n_agent
u = [[] for _ in range(n_agent)]
ep_actions = [[] for _ in range(n_agent)]
ep_rewards = [[] for _ in range(n_agent)]
ep_states = [[] for _ in range(n_agent)]
meta_z = [[] for _ in range(n_agent)]
meta_rewards = [[] for _ in range(n_agent)]
meta_states = [[] for _ in range(n_agent)]
signal = [0] * n_agent
rat = [0.0] * n_agent
score = 0
steps = 0
su = [0.] * n_agent
su = np.array(su)
obs = env.reset()
done = False
while steps < max_steps and not done:
if steps % T == 0:
for i in range(n_agent):
h = copy.deepcopy(obs[i])
h.append(rat[i])
h.append(utili[i])
if normalize_inputs:
h[-2:] = list(meta_obs_rms[i].obs_filter(np.array(h)[-2:]))
p_z = meta_Pi[i].get_dist(np.array([h]))[0]
z = np.random.choice(range(n_signal), p=p_z)
signal[i] = z
meta_z[i].append(to_categorical(z, n_signal))
meta_states[i].append(h)
steps += 1
action = []
for i in range(n_agent):
h = copy.deepcopy(obs[i])
p = Pi[i][signal[i]].get_dist(np.array([h]))[0]
action.append(np.random.choice(range(n_actions), p=p))
ep_states[i].append(h)
ep_actions[i].append(to_categorical(action[i], n_actions))
obs, rewards, done = env.step(action)
su += np.array(rewards)
score += sum(rewards)
for i in range(n_agent):
u[i].append(rewards[i])
u_bar[i] = sum(u[i]) / len(u[i])
'''
avg=copy.deepcopy(u_bar)
for j in range(10):
for i in range(n_agent):
s=0
for k in range(3):
m=np.random.randint(0,n_agent)
s+=avg[m]
avg[i]=(avg[i]*0.02+(avg[i]+s)/(3+1)*0.98)+(np.random.rand()-0.5)*0.0001
'''
for i in range(n_agent):
avg[i] = sum(u_bar) / len(u_bar)
if avg[i] != 0:
rat[i] = (u_bar[i] - avg[i]) / avg[i]
else:
rat[i] = 0
# print(avg[i])#might help to define max_u
if max_u != None:
utili[i] = min(1, avg[i] / max_u)
else:
utili[i] = avg[i]
for i in range(n_agent):
if signal[i] == 0:
ep_rewards[i].append(rewards[i])
else:
h = copy.deepcopy(obs[i])
h.append(rat[i])
h.append(utili[i])
if normalize_inputs:
h[-2:] = list(meta_obs_rms[i].obs_filter(
|
np.array(h)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Unittests for pointcloud
@author: simlk
"""
import os
# import sys
import unittest
# import time
import logging
import numpy as np
import tempfile
import json
# import ctypes
from thatsDEM2 import pointcloud, osr_utils
LOG = logging.getLogger(__name__)
class TestPointcloud(unittest.TestCase):
def test_pointcloud_constructor1(self):
LOG.info("Testing pointcloud constructor")
pc = pointcloud.Pointcloud(np.ones((2, 2)), np.ones(
2), some_attr=np.ones(2, dtype=np.uint8))
self.assertIn("some_attr", pc.attributes)
self.assertTrue(pc.some_attr.dtype == np.uint8)
def test_pointcloud_constructor_bad(self):
LOG.info("Testing pointcloud constructor -bad")
with self.assertRaises(AssertionError):
pc = pointcloud.Pointcloud(np.ones((2, 2)), np.ones(
2), some_attr=np.ones(4, dtype=np.uint8))
def test_pointcloud_empty_like(self):
LOG.info("Testing pointcloud empty_like factory function")
pc = pointcloud.Pointcloud(
np.ones((2, 2)), np.ones(2), some_attr=np.ones(2))
empty = pointcloud.empty_like(pc)
self.assertSetEqual(pc.attributes, empty.attributes)
self.assertEqual(empty.size, 0)
def test_extend_pointcloud1(self):
LOG.info("Testing pointcloud extension - bad")
pc1 = pointcloud.Pointcloud(
np.ones((2, 2)), np.ones(2), some_attr=np.ones(2))
pc2 = pointcloud.Pointcloud(np.ones((2, 2)),
|
np.ones(2)
|
numpy.ones
|
""" Unit test for the jsf module.
"""
import unittest
from typing import List, Optional
import numpy as np
import numpy.testing as nptest
import pandas as pd
from datafold.dynfold.base import TransformType
from datafold.dynfold.jsf import JointlySmoothFunctions, JsfDataset, _ColumnSplitter
from datafold.pcfold import TSCDataFrame
from datafold.pcfold.kernels import GaussianKernel
def generate_parameters(_x, _y):
return np.column_stack(
[
_x,
_y,
]
)
def generate_observations(_x, _z, div=5, mult=6):
return np.column_stack(
[
(div / 2 * _z + _x / 2 + 2 / 3) * np.cos(mult * np.pi * _z) / 2,
(div / 2 * _z + _x / 2 + 2 / 3) * np.sin(mult * np.pi * _z) / 2,
]
)
def generate_points(n_samples):
rng = np.random.default_rng(42)
xyz = rng.uniform(low=-0.5, high=0.5, size=(n_samples, 3))
x, y, z = (
xyz[:, 0].reshape(-1, 1),
xyz[:, 1].reshape(-1, 1),
xyz[:, 2].reshape(-1, 1),
)
parameters = generate_parameters(x, y)
effective_parameter = parameters[:, 0] + parameters[:, 1] ** 2
observations = generate_observations(effective_parameter, z[:, 0], 2, 2)
return parameters, observations, effective_parameter
class ColumnSplittingTest(unittest.TestCase):
def test_splitting(self):
observations = [np.random.rand(1000, i + 1) for i in range(3)]
columns_splitter = _ColumnSplitter(
[
JsfDataset("observation0", slice(0, 1)),
JsfDataset("observation1", slice(1, 3)),
JsfDataset("observation2", slice(3, 6)),
]
)
X = np.column_stack(observations)
split_X = columns_splitter.split(X)
for expected_observation, actual_observation in zip(observations, split_X):
nptest.assert_array_equal(expected_observation, actual_observation)
class JointlySmoothFunctionsTest(unittest.TestCase):
def setUp(self):
self.parameters, self.observations, self.effective_parameter = generate_points(
1000
)
self.X = np.column_stack([self.parameters, self.observations])
self.datasets = [
JsfDataset("parameters", slice(0, 2)),
JsfDataset("observations", slice(2, 4)),
]
@staticmethod
def _compute_rayleigh_quotients(matrix, eigenvectors):
"""Compute Rayleigh quotients."""
n = eigenvectors.shape[1]
rayleigh_quotients = np.zeros(n)
for i in range(n):
v = eigenvectors[:, i]
rayleigh_quotients[i] = np.dot(v, matrix @ v) / np.dot(v, v)
rayleigh_quotients = np.sort(
|
np.abs(rayleigh_quotients)
|
numpy.abs
|
# -*- coding: utf-8 -*-
#GSASIIpwdGUI - powder data display routines
########### SVN repository information ###################
# $Date: 2019-09-13 14:54:35 -0500 (Fri, 13 Sep 2019) $
# $Author: vondreele $
# $Revision: 4146 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIpwdGUI.py $
# $Id: GSASIIpwdGUI.py 4146 2019-09-13 19:54:35Z vondreele $
########### SVN repository information ###################
'''
*GSASIIpwdGUI: Powder Pattern GUI routines*
-------------------------------------------
Used to define GUI controls for the routines that interact
with the powder histogram (PWDR) data tree items.
'''
from __future__ import division, print_function
import platform
import sys
import os.path
# Don't depend on graphics for scriptable
try:
import wx
import wx.grid as wg
except ImportError:
pass
import numpy as np
import numpy.linalg as nl
import numpy.ma as ma
import math
import copy
import random as ran
if '2' in platform.python_version_tuple()[0]:
import cPickle
else:
import pickle as cPickle
import scipy.interpolate as si
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 4146 $")
import GSASIImath as G2mth
import GSASIIpwd as G2pwd
import GSASIIfiles as G2fil
import GSASIIobj as G2obj
import GSASIIlattice as G2lat
import GSASIIspc as G2spc
import GSASIIindex as G2indx
import GSASIIplot as G2plt
import GSASIIdataGUI as G2gd
import GSASIIphsGUI as G2phsG
import GSASIIctrlGUI as G2G
import GSASIIElemGUI as G2elemGUI
import GSASIIElem as G2elem
import GSASIIsasd as G2sasd
import G2shapes
VERY_LIGHT_GREY = wx.Colour(235,235,235)
WACV = wx.ALIGN_CENTER_VERTICAL
if '2' in platform.python_version_tuple()[0]:
GkDelta = unichr(0x0394)
Pwr10 = unichr(0x0b9)+unichr(0x2070)
Pwr20 = unichr(0x0b2)+unichr(0x2070)
Pwrm1 = unichr(0x207b)+unichr(0x0b9)
Pwrm2 = unichr(0x207b)+unichr(0x0b2)
Pwrm6 = unichr(0x207b)+unichr(0x2076)
Pwrm4 = unichr(0x207b)+unichr(0x2074)
Angstr = unichr(0x00c5)
else:
GkDelta = chr(0x0394)
Pwr10 = chr(0x0b9)+chr(0x2070)
Pwr20 = chr(0x0b2)+chr(0x2070)
Pwrm1 = chr(0x207b)+chr(0x0b9)
Pwrm2 = chr(0x207b)+chr(0x0b2)
Pwrm6 = chr(0x207b)+chr(0x2076)
Pwrm4 = chr(0x207b)+chr(0x2074)
Angstr = chr(0x00c5)
# trig functions in degrees
sind = lambda x: math.sin(x*math.pi/180.)
tand = lambda x: math.tan(x*math.pi/180.)
cosd = lambda x: math.cos(x*math.pi/180.)
asind = lambda x: 180.*math.asin(x)/math.pi
################################################################################
###### class definitions
################################################################################
class SubCellsDialog(wx.Dialog):
def __init__(self,parent,title,controls,SGData,items,phaseDict):
wx.Dialog.__init__(self,parent,-1,title,
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.panel = None
self.controls = controls
self.SGData = SGData #for parent phase
self.items = items
self.phaseDict = phaseDict
self.Draw()
def Draw(self):
def RefreshGrid(event):
r,c = event.GetRow(),event.GetCol()
br = self.items[r]
phase = self.phaseDict[br]
rLab = magDisplay.GetRowLabelValue(r)
pname = '(%s) %s'%(rLab,phase['Name'])
if c == 0:
mSGData = phase['SGData']
text,table = G2spc.SGPrint(mSGData,AddInv=True)
if 'magAtms' in phase:
msg = 'Magnetic space group information'
text[0] = ' Magnetic Space Group: '+mSGData['MagSpGrp']
text[3] = ' The magnetic lattice point group is '+mSGData['MagPtGp']
OprNames,SpnFlp = G2spc.GenMagOps(mSGData)
G2G.SGMagSpinBox(self.panel,msg,text,table,mSGData['SGCen'],OprNames,
mSGData['SpnFlp'],False).Show()
else:
msg = 'Space Group Information'
G2G.SGMessageBox(self.panel,msg,text,table).Show()
elif c == 1:
maxequiv = phase['maxequiv']
mSGData = phase['SGData']
Uvec = phase['Uvec']
Trans = phase['Trans']
ifMag = False
if 'magAtms' in phase:
ifMag = True
allmom = phase.get('allmom',False)
magAtms = phase.get('magAtms','')
mAtoms = TestMagAtoms(phase,magAtms,self.SGData,Uvec,Trans,allmom,maxequiv)
else:
mAtoms = TestAtoms(phase,self.controls[15],self.SGData,Uvec,Trans,maxequiv)
Atms = []
AtCods = []
atMxyz = []
for ia,atom in enumerate(mAtoms):
atom[0] += '_%d'%ia
SytSym,Mul,Nop,dupDir = G2spc.SytSym(atom[2:5],mSGData)
Atms.append(atom[:2]+['',]+atom[2:5])
AtCods.append('1')
if 'magAtms' in phase:
MagSytSym = G2spc.MagSytSym(SytSym,dupDir,mSGData)
CSI = G2spc.GetCSpqinel(mSGData['SpnFlp'],dupDir)
atMxyz.append([MagSytSym,CSI[0]])
else:
CSI = G2spc.GetCSxinel(SytSym)
atMxyz.append([SytSym,CSI[0]])
G2phsG.UseMagAtomDialog(self.panel,pname,Atms,AtCods,atMxyz,ifMag=ifMag,ifOK=True).Show()
elif c in [2,3]:
if c == 2:
title = 'Conjugacy list for '+pname
items = phase['altList']
elif c == 3:
title = 'Super groups list list for '+pname
items = phase['supList']
if not items[0]:
wx.MessageBox(pname+' is a maximal subgroup',caption='Super group is parent',style=wx.ICON_INFORMATION)
return
SubCellsDialog(self.panel,title,self.controls,self.SGData,items,self.phaseDict).Show()
if self.panel: self.panel.Destroy()
self.panel = wx.Panel(self)
rowLabels = [str(i+1) for i in range(len(self.items))]
colLabels = ['Space Gp','Uniq','nConj','nSup','Trans','Vec','a','b','c','alpha','beta','gamma','Volume']
Types = [wg.GRID_VALUE_STRING,]+3*[wg.GRID_VALUE_LONG,]+2*[wg.GRID_VALUE_STRING,]+ \
3*[wg.GRID_VALUE_FLOAT+':10,5',]+3*[wg.GRID_VALUE_FLOAT+':10,3',]+[wg.GRID_VALUE_FLOAT+':10,2']
table = []
for ip in self.items:
phase = self.phaseDict[ip]
natms = phase.get('nAtoms',1)
try:
nConj = len(phase['altList'])
nSup = len(phase['supList'])
except KeyError:
nConj = 0
nSup = 0
cell = list(phase['Cell'])
trans = G2spc.Trans2Text(phase['Trans'])
vec = G2spc.Latt2text([phase['Uvec'],])
row = [phase['Name'],natms,nConj,nSup,trans,vec]+cell
table.append(row)
CellsTable = G2G.Table(table,rowLabels=rowLabels,colLabels=colLabels,types=Types)
mainSizer = wx.BoxSizer(wx.VERTICAL)
magDisplay = G2G.GSGrid(self.panel)
magDisplay.SetTable(CellsTable, True)
magDisplay.Bind(wg.EVT_GRID_CELL_LEFT_CLICK,RefreshGrid)
magDisplay.AutoSizeColumns(False)
mainSizer.Add(magDisplay,0,WACV)
OkBtn = wx.Button(self.panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.Destroy()
# self.EndModal(wx.ID_OK)
class RDFDialog(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,-1,'Background radial distribution function',
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.panel = None
self.result = {'UseObsCalc':'obs-calc','maxR':20.0,'Smooth':'linear'}
self.Draw()
def Draw(self):
def OnUseOC(event):
self.result['UseObsCalc'] = useOC.GetValue()
def OnSmCombo(event):
self.result['Smooth'] = smCombo.GetValue()
if self.panel: self.panel.Destroy()
self.panel = wx.Panel(self)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(wx.StaticText(self.panel,label='Background RDF controls:'),0,WACV)
plotType = wx.BoxSizer(wx.HORIZONTAL)
plotType.Add(wx.StaticText(self.panel,label=' Select plot type:'),0,WACV)
Choices = ['obs-back','calc-back','obs-calc']
useOC = wx.ComboBox(self.panel,value=Choices[2],choices=Choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
useOC.SetValue(self.result['UseObsCalc'])
useOC.Bind(wx.EVT_COMBOBOX,OnUseOC)
plotType.Add(useOC,0,WACV)
mainSizer.Add(plotType,0,WACV)
dataSizer = wx.BoxSizer(wx.HORIZONTAL)
dataSizer.Add(wx.StaticText(self.panel,label=' Smoothing type: '),0,WACV)
smChoice = ['linear','nearest',]
smCombo = wx.ComboBox(self.panel,value=self.result['Smooth'],choices=smChoice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
smCombo.Bind(wx.EVT_COMBOBOX, OnSmCombo)
dataSizer.Add(smCombo,0,WACV)
dataSizer.Add(wx.StaticText(self.panel,label=' Maximum radial dist.: '),0,WACV)
maxR = G2G.ValidatedTxtCtrl(self.panel,self.result,'maxR',nDig=(10,1),min=10.,max=50.,
typeHint=float)
dataSizer.Add(maxR,0,WACV)
mainSizer.Add(dataSizer,0,WACV)
OkBtn = wx.Button(self.panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(self.panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
btnSizer.Add(cancelBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def GetSelection(self):
return self.result
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
################################################################################
##### Setup routines
################################################################################
def GetFileBackground(G2frame,xye,Pattern):
bxye = np.zeros(len(xye[1]))
if 'BackFile' in Pattern[0]:
backfile,mult = Pattern[0]['BackFile'][:2]
if backfile:
bId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,backfile)
if bId:
bxye = mult*G2frame.GPXtree.GetItemPyData(bId)[1][1]
else:
print('Error: background PWDR {} not found'.format(backfile))
Pattern[0]['BackFile'][0] = ''
return bxye
def IsHistogramInAnyPhase(G2frame,histoName):
'''Tests a Histogram to see if it is linked to any phases.
Returns the name of the first phase where the histogram is used.
'''
phases = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases')
if phases:
item, cookie = G2frame.GPXtree.GetFirstChild(phases)
while item:
data = G2frame.GPXtree.GetItemPyData(item)
histoList = data['Histograms'].keys()
if histoName in histoList:
return G2frame.GPXtree.GetItemText(item)
item, cookie = G2frame.GPXtree.GetNextChild(phases, cookie)
return False
else:
return False
def SetupSampleLabels(histName,dataType,histType):
'''Setup a list of labels and number formatting for use in
labeling sample parameters.
:param str histName: Name of histogram, ("PWDR ...")
:param str dataType:
'''
parms = []
parms.append(['Scale','Histogram scale factor: ',[10,7]])
if 'C' in histType:
parms.append(['Gonio. radius','Goniometer radius (mm): ',[10,3]])
if 'PWDR' in histName:
if dataType == 'Debye-Scherrer':
if 'T' in histType:
parms += [['Absorption',u'Sample absorption (\xb5\xb7r/l): ',[10,4]],]
else:
parms += [['DisplaceX',u'Sample X displ. perp. to beam (\xb5m): ',[10,3]],
['DisplaceY',u'Sample Y displ. || to beam (\xb5m): ',[10,3]],
['Absorption',u'Sample absorption (\xb5\xb7r): ',[10,4]],]
elif dataType == 'Bragg-Brentano':
parms += [['Shift',u'Sample displacement(\xb5m): ',[10,4]],
['Transparency',u'Sample transparency(1/\xb5eff, cm): ',[10,3]],
['SurfRoughA','Surface roughness A: ',[10,4]],
['SurfRoughB','Surface roughness B: ',[10,4]]]
elif 'SASD' in histName:
parms.append(['Thick','Sample thickness (mm)',[10,3]])
parms.append(['Trans','Transmission (meas)',[10,3]])
parms.append(['SlitLen',u'Slit length (Q,\xc5'+Pwrm1+')',[10,3]])
parms.append(['Omega','Goniometer omega:',[10,3]])
parms.append(['Chi','Goniometer chi:',[10,3]])
parms.append(['Phi','Goniometer phi:',[10,3]])
parms.append(['Azimuth','Detector azimuth:',[10,3]])
parms.append(['Time','Clock time (s):',[12,3]])
parms.append(['Temperature','Sample temperature (K): ',[10,3]])
parms.append(['Pressure','Sample pressure (MPa): ',[10,3]])
return parms
def SetDefaultSASDModel():
'Fills in default items for the SASD Models dictionary'
return {'Back':[0.0,False],
'Size':{'MinDiam':50,'MaxDiam':10000,'Nbins':100,'logBins':True,'Method':'MaxEnt',
'Distribution':[],'Shape':['Spheroid',1.0],
'MaxEnt':{'Niter':100,'Precision':0.01,'Sky':-3},
'IPG':{'Niter':100,'Approach':0.8,'Power':-1},'Reg':{},},
'Pair':{'Method':'Moore','MaxRadius':100.,'NBins':100,'Errors':'User',
'Percent error':2.5,'Background':[0,False],'Distribution':[],
'Moore':10,'Dist G':100.,'Result':[],},
'Particle':{'Matrix':{'Name':'vacuum','VolFrac':[0.0,False]},'Levels':[],},
'Shapes':{'outName':'run','NumAA':100,'Niter':1,'AAscale':1.0,'Symm':1,'bias-z':0.0,
'inflateV':1.0,'AAglue':0.0,'pdbOut':False,'boxStep':4.0},
'Current':'Size dist.','BackFile':'',
}
def SetDefaultREFDModel():
'''Fills in default items for the REFD Models dictionary which are
defined as follows for each layer:
* Name: name of substance
* Thick: thickness of layer in Angstroms (not present for top & bottom layers)
* Rough: upper surface roughness for layer (not present for toplayer)
* Penetration: mixing of layer substance into layer above-is this needed?
* DenMul: multiplier for layer scattering density (default = 1.0)
Top layer defaults to vacuum (or air/any gas); can be substituted for some other substance.
Bottom layer default: infinitely thisck Silicon; can be substituted for some other substance.
'''
return {'Layers':[{'Name':'vacuum','DenMul':[1.0,False],}, #top layer
{'Name':'vacuum','Rough':[0.,False],'Penetration':[0.,False],'DenMul':[1.0,False],}], #bottom layer
'Scale':[1.0,False],'FltBack':[0.0,False],'Zero':'Top','dQ type':'None','Layer Seq':[], #globals
'Minimizer':'LMLS','Resolution':[0.,'Const dq/q'],'Recomb':0.5,'Toler':0.5, #minimizer controls
'DualFitFiles':['',],'DualFltBacks':[[0.0,False],],'DualScales':[[1.0,False],]} #optional stuff for multidat fits?
def SetDefaultSubstances():
'Fills in default items for the SASD Substances dictionary'
return {'Substances':{'vacuum':{'Elements':{},'Volume':1.0,'Density':0.0,'Scatt density':0.0,'XImag density':0.0},
'unit scatter':{'Elements':None,'Volume':None,'Density':None,'Scatt density':1.0,'XImag density':1.0}}}
def GetFileList(G2frame,fileType):
fileList = []
Id, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while Id:
name = G2frame.GPXtree.GetItemText(Id)
if fileType in name.split()[0]:
fileList.append(name)
Id, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
return fileList
def GetHistsLikeSelected(G2frame):
'''Get the histograms that match the current selected one:
The histogram prefix and data type (PXC etc.), the number of
wavelengths and the instrument geometry (Debye-Scherrer etc.)
must all match. The current histogram is not included in the list.
:param wx.Frame G2frame: pointer to main GSAS-II data tree
'''
histList = []
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))
hType = inst['Type'][0]
if 'Lam1' in inst:
hLam = 2
elif 'Lam' in inst:
hLam = 1
else:
hLam = 0
sample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Sample Parameters'))
# hGeom = sample.get('Type')
hstName = G2frame.GPXtree.GetItemText(G2frame.PatternId)
hPrefix = hstName.split()[0]+' '
# cycle through tree looking for items that match the above
item, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while item:
name = G2frame.GPXtree.GetItemText(item)
if name.startswith(hPrefix) and name != hstName:
cGeom,cType,cLam, = '?','?',-1
subitem, subcookie = G2frame.GPXtree.GetFirstChild(item)
while subitem:
subname = G2frame.GPXtree.GetItemText(subitem)
if subname == 'Sample Parameters':
sample = G2frame.GPXtree.GetItemPyData(subitem)
# cGeom = sample.get('Type')
elif subname == 'Instrument Parameters':
inst,inst2 = G2frame.GPXtree.GetItemPyData(subitem)
cType = inst['Type'][0]
if 'Lam1' in inst:
cLam = 2
elif 'Lam' in inst:
cLam = 1
else:
cLam = 0
subitem, subcookie = G2frame.GPXtree.GetNextChild(item, subcookie)
if cLam == hLam and cType == hType: # and cGeom == hGeom:
if name not in histList: histList.append(name)
item, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
return histList
def SetCopyNames(histName,dataType,addNames=[]):
'''Determine the items in the sample parameters that should be copied,
depending on the histogram type and the instrument type.
'''
copyNames = ['Scale',]
histType = 'HKLF'
if 'PWDR' in histName:
histType = 'PWDR'
if 'Debye' in dataType:
copyNames += ['DisplaceX','DisplaceY','Absorption']
else: #Bragg-Brentano
copyNames += ['Shift','Transparency','SurfRoughA','SurfRoughB']
elif 'SASD' in histName:
histType = 'SASD'
copyNames += ['Materials','Thick',]
if len(addNames):
copyNames += addNames
return histType,copyNames
def CopyPlotCtrls(G2frame):
'''Global copy: Copy plot controls from current histogram to others.
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No other histograms match '+hst,G2frame)
return
sourceData = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
if 'Offset' not in sourceData[0]: #patch for old data
sourceData[0].update({'Offset':[0.0,0.0],'delOffset':0.02,'refOffset':-1.0,
'refDelt':0.01,})
G2frame.GPXtree.SetItemPyData(G2frame.PatternId,sourceData)
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy plot controls from\n'+str(hst[5:])+' to...',
'Copy plot controls', histList)
results = []
try:
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetSelections()
finally:
dlg.Destroy()
copyList = []
for i in results:
copyList.append(histList[i])
keys = ['Offset','delOffset','refOffset','refDelt']
source = dict(zip(keys,[sourceData[0][item] for item in keys]))
for hist in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
data = G2frame.GPXtree.GetItemPyData(Id)
data[0].update(source)
G2frame.GPXtree.SetItemPyData(Id,data)
print ('Copy of plot controls successful')
def CopySelectedHistItems(G2frame):
'''Global copy: Copy items from current histogram to others.
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No other histograms match '+hst,G2frame)
return
choices = ['Limits','Background','Instrument Parameters','Sample Parameters']
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy which histogram sections from\n'+str(hst[5:]),
'Select copy sections', choices, filterBox=False)
dlg.SetSelections(range(len(choices)))
choiceList = []
if dlg.ShowModal() == wx.ID_OK:
choiceList = [choices[i] for i in dlg.GetSelections()]
if not choiceList: return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy parameters from\n'+str(hst[5:])+' to...',
'Copy parameters', histList)
results = []
try:
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetSelections()
finally:
dlg.Destroy()
copyList = []
for i in results:
copyList.append(histList[i])
if 'Limits' in choiceList: # Limits
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Limits'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Limits'),
copy.deepcopy(data))
if 'Background' in choiceList: # Background
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Background'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Background'),
copy.deepcopy(data))
if 'Instrument Parameters' in choiceList: # Instrument Parameters
# for now all items in Inst. parms are copied
data,data1 = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId,'Instrument Parameters'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters')
)[0].update(copy.deepcopy(data))
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters')
)[1].update(copy.deepcopy(data1))
if 'Sample Parameters' in choiceList: # Sample Parameters
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId,'Sample Parameters'))
# selects items to be copied
histType,copyNames = SetCopyNames(hst,data['Type'],
addNames = ['Omega','Chi','Phi','Gonio. radius','InstrName'])
copyDict = {parm:data[parm] for parm in copyNames}
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters')
).update(copy.deepcopy(copyDict))
def TestMagAtoms(phase,magAtms,SGData,Uvec,Trans,allmom,maxequiv=100,maximal=False):
found = False
anymom = False
phase['Keep'] = False
if not magAtms:
phase['Keep'] = True
return []
invTrans = nl.inv(Trans)
atCodes = []
Phase = {'General':{'AtomPtrs':[2,1],'SGData':copy.deepcopy(phase['SGData'])},'Atoms':[]}
for matm in magAtms:
XYZ = G2spc.GenAtom(matm[3:6],SGData,False,Move=True)
xyzs = [xyz[0] for xyz in XYZ]
atCodes += len(xyzs)*['1',]
xyzs,atCodes = G2lat.ExpandCell(xyzs,atCodes,0,Trans)
for ix,x in enumerate(xyzs):
xyz = G2lat.TransformXYZ(x-Uvec,invTrans.T,np.zeros(3))%1.
Phase['Atoms'].append(matm[:2]+list(xyz))
SytSym,Mul,Nop,dupDir = G2spc.SytSym(xyz,phase['SGData'])
CSI = G2spc.GetCSpqinel(phase['SGData']['SpnFlp'],dupDir)
if any(CSI[0]):
anymom = True
if allmom:
if not any(CSI[0]):
phase['Keep'] = False
found = True
uAtms = G2lat.GetUnique(Phase,atCodes)[0]
natm = len(uAtms)
if anymom and natm <= maxequiv and not found:
phase['Keep'] = True
if maximal and phase['supList'][0]:
phase['Keep'] = False
return uAtms
def TestAtoms(phase,magAtms,SGData,Uvec,Trans,maxequiv=100,maximal=False):
phase['Keep'] = True
invTrans = nl.inv(Trans)
atCodes = []
Phase = {'General':{'AtomPtrs':[2,1],'SGData':copy.deepcopy(phase['SGData'])},'Atoms':[]}
for matm in magAtms:
XYZ = G2spc.GenAtom(matm[3:6],SGData,False,Move=True)
xyzs = [xyz[0] for xyz in XYZ]
atCodes += len(xyzs)*['1',]
xyzs,atCodes = G2lat.ExpandCell(xyzs,atCodes,0,Trans)
for ix,x in enumerate(xyzs):
xyz = G2lat.TransformXYZ(x-Uvec,invTrans.T,np.zeros(3))%1.
Phase['Atoms'].append(matm[:2]+list(xyz))
uAtms = G2lat.GetUnique(Phase,atCodes)[0]
natm = len(uAtms)
if natm > maxequiv: #too many allowed atoms found
phase['Keep'] = False
if maximal and phase['supList'][0]:
phase['Keep'] = False
return uAtms
################################################################################
##### Powder Peaks
################################################################################
def UpdatePeakGrid(G2frame, data):
'''respond to selection of PWDR powder peaks data tree item.
'''
def OnAutoSearch(event):
PatternId = G2frame.PatternId
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
profile = Pattern[1]
bxye = GetFileBackground(G2frame,profile,Pattern)
x0 = profile[0]
iBeg = np.searchsorted(x0,limits[0])
iFin = np.searchsorted(x0,limits[1])
x = x0[iBeg:iFin]
y0 = (profile[1]+bxye)[iBeg:iFin]
ysig = 1.0*np.std(y0)
offset = [-1,1]
ymask = ma.array(y0,mask=(y0<ysig))
for off in offset:
ymask = ma.array(ymask,mask=(ymask-np.roll(y0,off)<=0.))
indx = ymask.nonzero()
mags = ymask[indx]
poss = x[indx]
refs = list(zip(poss,mags))
if 'C' in Inst['Type'][0]:
refs = G2mth.sortArray(refs,0,reverse=True) #small 2-Thetas first
else: #'T'OF
refs = G2mth.sortArray(refs,0,reverse=False) #big TOFs first
for i,ref1 in enumerate(refs): #reject picks closer than 1 FWHM
for ref2 in refs[i+1:]:
if abs(ref2[0]-ref1[0]) < 2.*G2pwd.getFWHM(ref1[0],inst):
del(refs[i])
if 'C' in Inst['Type'][0]:
refs = G2mth.sortArray(refs,1,reverse=True)
else: #'T'OF
refs = G2mth.sortArray(refs,1,reverse=False)
for pos,mag in refs:
data['peaks'].append(G2mth.setPeakparms(inst,inst2,pos,mag))
UpdatePeakGrid(G2frame,data)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnCopyPeaks(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy peak list from\n'+str(hst[5:])+' to...',
'Copy peaks', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Peak List'),copy.deepcopy(data))
def OnLoadPeaks(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II PWDR peaks list file', pth, '',
'PWDR peak list files (*.pkslst)|*.pkslst',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
peaks = []
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
while S:
if '#' in S:
S = File.readline()
continue
try:
peaks.append(eval(S))
except:
break
S = File.readline()
File.close()
finally:
dlg.Destroy()
data = {'peaks':peaks,'sigDict':{}}
UpdatePeakGrid(G2frame,data)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnSavePeaks(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II PWDR peaks list file', pth, '',
'PWDR peak list files (*.pkslst)|*.pkslst',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .pkslst
filename = os.path.splitext(filename)[0]+'.pkslst'
File = open(filename,'w')
File.write("#GSAS-II PWDR peaks list file; do not add/delete items!\n")
for item in data:
if item == 'peaks':
for pk in data[item]:
File.write(str(pk)+'\n')
File.close()
print ('PWDR peaks list saved to: '+filename)
finally:
dlg.Destroy()
def OnUnDo(event):
DoUnDo()
G2frame.dataWindow.UnDo.Enable(False)
def DoUnDo():
print ('Undo last refinement')
file = open(G2frame.undofile,'rb')
PatternId = G2frame.PatternId
for item in ['Background','Instrument Parameters','Peak List']:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item),cPickle.load(file))
if G2frame.dataWindow.GetName() == item:
if item == 'Background':
UpdateBackground(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
elif item == 'Instrument Parameters':
UpdateInstrumentGrid(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
elif item == 'Peak List':
UpdatePeakGrid(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
print (item+' recovered')
file.close()
def SaveState():
G2frame.undofile = os.path.join(G2frame.dirname,'GSASII.save')
file = open(G2frame.undofile,'wb')
PatternId = G2frame.PatternId
for item in ['Background','Instrument Parameters','Peak List']:
cPickle.dump(G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId,item)),file,1)
file.close()
G2frame.dataWindow.UnDo.Enable(True)
def OnLSQPeakFit(event):
if reflGrid.IsCellEditControlEnabled(): # complete any grid edits in progress
reflGrid.HideCellEditControl()
reflGrid.DisableCellEditControl()
if not G2frame.GSASprojectfile: #force a save of the gpx file so SaveState can write in the same directory
G2frame.OnFileSaveas(event)
wx.CallAfter(OnPeakFit,'LSQ')
def OnOneCycle(event):
if reflGrid.IsCellEditControlEnabled(): # complete any grid edits in progress
reflGrid.HideCellEditControl()
reflGrid.DisableCellEditControl()
wx.CallAfter(OnPeakFit,'LSQ',oneCycle=True)
def OnSeqPeakFit(event):
histList = G2gd.GetGPXtreeDataNames(G2frame,['PWDR',])
od = {'label_1':'Copy to next','value_1':False,'label_2':'Reverse order','value_2':False}
dlg = G2G.G2MultiChoiceDialog(G2frame, 'Sequential peak fits',
'Select dataset to include',histList,extraOpts=od)
names = []
if dlg.ShowModal() == wx.ID_OK:
for sel in dlg.GetSelections():
names.append(histList[sel])
dlg.Destroy()
if not names:
return
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Sequential peak fit results')
if Id:
SeqResult = G2frame.GPXtree.GetItemPyData(Id)
else:
SeqResult = {}
Id = G2frame.GPXtree.AppendItem(parent=G2frame.root,text='Sequential peak fit results')
SeqResult = {'SeqPseudoVars':{},'SeqParFitEqList':[]}
SeqResult['histNames'] = names
dlg = wx.ProgressDialog('Sequential peak fit','Data set name = '+names[0],len(names),
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
controls = {'deriv type':'analytic','min dM/M':0.001,}
print ('Peak Fitting with '+controls['deriv type']+' derivatives:')
oneCycle = False
FitPgm = 'LSQ'
prevVaryList = []
peaks = None
varyList = None
if od['value_2']:
names.reverse()
try:
for i,name in enumerate(names):
print (' Sequential fit for '+name)
GoOn = dlg.Update(i,newmsg='Data set name = '+name)[0]
if not GoOn:
dlg.Destroy()
break
PatternId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,name)
if i and od['value_1']:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),copy.deepcopy(peaks))
prevVaryList = varyList[:]
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
data = Pattern[1]
fixback = GetFileBackground(G2frame,data,Pattern)
peaks['sigDict'],result,sig,Rvals,varyList,parmDict,fullvaryList,badVary = G2pwd.DoPeakFit(FitPgm,peaks['peaks'],
background,limits,inst,inst2,data,fixback,prevVaryList,oneCycle,controls)
if len(result[0]) != len(fullvaryList):
dlg.Destroy()
print (' ***** Sequential peak fit stopped at '+name+' *****')
break
else:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),copy.deepcopy(peaks))
SeqResult[name] = {'variables':result[0],'varyList':varyList,'sig':sig,'Rvals':Rvals,
'covMatrix':np.eye(len(result[0])),'title':name,'parmDict':parmDict,
'fullVary':fullvaryList,'badVary':badVary}
print (' ***** Sequential peak fit successful *****')
finally:
dlg.Destroy()
SeqResult['histNames'] = histList
G2frame.GPXtree.SetItemPyData(Id,SeqResult)
G2frame.G2plotNB.Delete('Sequential refinement') #clear away probably invalid plot
G2frame.GPXtree.SelectItem(Id)
def OnClearPeaks(event):
dlg = wx.MessageDialog(G2frame,'Delete all peaks?','Clear peak list',wx.OK|wx.CANCEL)
try:
if dlg.ShowModal() == wx.ID_OK:
peaks = {'peaks':[],'sigDict':{}}
finally:
dlg.Destroy()
UpdatePeakGrid(G2frame,peaks)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnPeakFit(FitPgm,oneCycle=False):
SaveState()
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
if not controls:
controls = {'deriv type':'analytic','min dM/M':0.001,} #fill in defaults if needed
print ('Peak Fitting with '+controls['deriv type']+' derivatives:')
PatternId = G2frame.PatternId
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
if not peaks:
G2frame.ErrorDialog('No peaks!','Nothing to fit!')
return
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
data = Pattern[1]
bxye = GetFileBackground(G2frame,data,Pattern)
dlg = wx.ProgressDialog('Residual','Peak fit Rwp = ',101.0,
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
screenSize = wx.ClientDisplayRect()
Size = dlg.GetSize()
if 50 < Size[0] < 500: # sanity check on size, since this fails w/Win & wx3.0
dlg.SetSize((int(Size[0]*1.2),Size[1])) # increase size a bit along x
dlg.SetPosition(wx.Point(screenSize[2]-Size[0]-305,screenSize[1]+5))
try:
peaks['sigDict'] = G2pwd.DoPeakFit(FitPgm,peaks['peaks'],background,limits,inst,inst2,data,bxye,[],oneCycle,controls,dlg)[0]
finally:
# dlg.Destroy()
print ('finished')
newpeaks = copy.copy(peaks)
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),newpeaks)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdatePeakGrid,G2frame,newpeaks)
def OnResetSigGam(event):
PatternId = G2frame.PatternId
Inst,Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
if not peaks['peaks']:
G2frame.ErrorDialog('No peaks!','Nothing to do!')
return
newpeaks = {'peaks':[],'sigDict':{}}
for peak in peaks['peaks']:
newpeaks['peaks'].append(G2mth.setPeakparms(Inst,Inst2,peak[0],peak[2]))
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),newpeaks)
UpdatePeakGrid(G2frame,newpeaks)
# def RefreshPeakGrid(event):
#
# event.StopPropagation()
# data['peaks'] = G2frame.PeakTable.GetData()
# T = []
# for peak in data['peaks']:T.append(peak[0])
# D = dict(zip(T,data['peaks']))
# T.sort()
# X = []
# for key in T: X.append(D[key])
# data['peaks'] = X
def setBackgroundColors():
for r in range(reflGrid.GetNumberRows()):
for c in range(reflGrid.GetNumberCols()):
if reflGrid.GetColLabelValue(c) in ['position','intensity','alpha','beta','sigma','gamma']:
if float(reflGrid.GetCellValue(r,c)) < 0.:
reflGrid.SetCellBackgroundColour(r,c,wx.RED)
else:
reflGrid.SetCellBackgroundColour(r,c,wx.WHITE)
def KeyEditPeakGrid(event):
'''Respond to pressing a key to act on selection of a row, column or cell
in the Peak List table
'''
rowList = reflGrid.GetSelectedRows()
colList = reflGrid.GetSelectedCols()
selectList = reflGrid.GetSelectedCells()
data = G2frame.GPXtree.GetItemPyData(G2frame.PickId)
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif rowList and (event.GetKeyCode() == wx.WXK_DELETE or event.GetKeyCode() == 8):
# pressing the delete key or backspace deletes selected peak(s)
reflGrid.ClearSelection()
reflGrid.ClearGrid()
rowList.sort()
rowList.reverse()
nDel = 0
for row in rowList:
G2frame.PeakTable.DeleteRow(row)
nDel += 1
if nDel:
msg = wg.GridTableMessage(G2frame.PeakTable,
wg.GRIDTABLE_NOTIFY_ROWS_DELETED,0,nDel)
reflGrid.ProcessTableMessage(msg)
data['peaks'] = G2frame.PeakTable.GetData()[:-nDel]
G2frame.GPXtree.SetItemPyData(G2frame.PickId,data)
setBackgroundColors()
elif colList and (event.GetKeyCode() == 89 or event.GetKeyCode() == 78):
reflGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if G2frame.PeakTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(G2frame.PeakTable.GetNumberRows()): data['peaks'][row][col]=True
elif key == 78: #'N'
for row in range(G2frame.PeakTable.GetNumberRows()): data['peaks'][row][col]=False
elif selectList and (event.GetKeyCode() == 89 or event.GetKeyCode() == 78):
reflGrid.ClearSelection()
key = event.GetKeyCode()
for row,col in selectList:
if G2frame.PeakTable.GetTypeName(row,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
data['peaks'][row][col]=True
elif key == 78: #'N'
data['peaks'][row][col]=False
else:
event.Skip()
return
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdatePeakGrid,G2frame,data)
def SelectVars(rows):
'''Set or clear peak refinement variables for peaks listed in rows
'''
refOpts = {reflGrid.GetColLabelValue(i):i+1 for i in range(reflGrid.GetNumberCols()) if reflGrid.GetColLabelValue(i) != "refine"}
dlg = G2G.G2MultiChoiceDialog(G2frame,'Select columns to refine',
'Refinement Selection', sorted(refOpts.keys()),
filterBox=False,toggle=False)
sels = []
try:
if dlg.ShowModal() == wx.ID_OK:
sels = [sorted(refOpts.keys())[i] for i in dlg.GetSelections()]
else:
return
finally:
dlg.Destroy()
for r in rows:
for lbl,c in refOpts.items():
data['peaks'][r][c] = lbl in sels
UpdatePeakGrid(G2frame,data)
def OnRefineSelected(event):
'''set refinement flags for the selected peaks
'''
rows = list(set([row for row,col in reflGrid.GetSelectedCells()] +
reflGrid.GetSelectedRows()))
if not rows:
wx.MessageBox('No selected rows. You must select rows or cells before using this command',
caption='No selected peaks')
return
SelectVars(rows)
def OnRefineAll(event):
'''set refinement flags for all peaks
'''
SelectVars(range(reflGrid.GetNumberRows()))
# def onCellListSClick(event):
# '''Called when a peak is selected so that it can be highlighted in the plot
# '''
# event.Skip()
# c = event.GetRow(),event.GetCol()
# if c < 0: # replot except whan a column is selected
# wx.CallAfter(G2plt.PlotPatterns,G2frame,plotType='PWDR')
#
def onCellListDClick(event):
'''Called after a double-click on a cell label'''
r,c = event.GetRow(),event.GetCol()
if r < 0 and c < 0:
for row in range(reflGrid.GetNumberRows()):
reflGrid.SelectRow(row,True)
for col in range(reflGrid.GetNumberCols()):
reflGrid.SelectCol(col,True)
elif r > 0: #row label: select it and replot!
reflGrid.ClearSelection()
reflGrid.SelectRow(r,True)
wx.CallAfter(G2frame.reflGrid.ForceRefresh)
wx.CallAfter(G2plt.PlotPatterns,G2frame,plotType='PWDR')
elif c > 0: #column label: just select it (& redisplay)
reflGrid.ClearSelection()
reflGrid.SelectCol(c,True)
if reflGrid.GetColLabelValue(c) != 'refine': return
choice = ['Y - vary all','N - vary none',]
dlg = wx.SingleChoiceDialog(G2frame,'Select refinement option for '+reflGrid.GetColLabelValue(c-1),
'Refinement controls',choice)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
if sel == 0:
for row in range(reflGrid.GetNumberRows()): data['peaks'][row][c]=True
else:
for row in range(reflGrid.GetNumberRows()): data['peaks'][row][c]=False
wx.CallAfter(UpdatePeakGrid,G2frame,data)
#======================================================================
# beginning of UpdatePeakGrid init
#======================================================================
G2frame.GetStatusBar().SetStatusText('Global refine: select refine column & press Y or N',1)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.PeakMenu)
G2frame.Bind(wx.EVT_MENU, OnAutoSearch, id=G2G.wxID_AUTOSEARCH)
G2frame.Bind(wx.EVT_MENU, OnCopyPeaks, id=G2G.wxID_PEAKSCOPY)
G2frame.Bind(wx.EVT_MENU, OnSavePeaks, id=G2G.wxID_PEAKSAVE)
G2frame.Bind(wx.EVT_MENU, OnLoadPeaks, id=G2G.wxID_PEAKLOAD)
G2frame.Bind(wx.EVT_MENU, OnUnDo, id=G2G.wxID_UNDO)
G2frame.Bind(wx.EVT_MENU, OnRefineSelected, id=G2frame.dataWindow.peaksSel.GetId())
G2frame.Bind(wx.EVT_MENU, OnRefineAll, id=G2frame.dataWindow.peaksAll.GetId())
G2frame.Bind(wx.EVT_MENU, OnLSQPeakFit, id=G2G.wxID_LSQPEAKFIT)
G2frame.Bind(wx.EVT_MENU, OnOneCycle, id=G2G.wxID_LSQONECYCLE)
G2frame.Bind(wx.EVT_MENU, OnSeqPeakFit, id=G2G.wxID_SEQPEAKFIT)
G2frame.Bind(wx.EVT_MENU, OnClearPeaks, id=G2G.wxID_CLEARPEAKS)
G2frame.Bind(wx.EVT_MENU, OnResetSigGam, id=G2G.wxID_RESETSIGGAM)
if data['peaks']:
G2frame.dataWindow.AutoSearch.Enable(False)
G2frame.dataWindow.PeakCopy.Enable(True)
G2frame.dataWindow.PeakFit.Enable(True)
G2frame.dataWindow.PFOneCycle.Enable(True)
G2frame.dataWindow.SeqPeakFit.Enable(True)
else:
G2frame.dataWindow.PeakFit.Enable(False)
G2frame.dataWindow.PeakCopy.Enable(False)
G2frame.dataWindow.PFOneCycle.Enable(False)
G2frame.dataWindow.AutoSearch.Enable(True)
G2frame.dataWindow.SeqPeakFit.Enable(False)
G2frame.PickTable = []
rowLabels = []
PatternId = G2frame.PatternId
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))[0]
for i in range(len(data['peaks'])): rowLabels.append(str(i+1))
if 'C' in Inst['Type'][0]:
colLabels = ['position','refine','intensity','refine','sigma','refine','gamma','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,1',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
else:
colLabels = ['position','refine','intensity','refine','alpha','refine',
'beta','refine','sigma','refine','gamma','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,1',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
T = []
for peak in data['peaks']:
T.append(peak[0])
D = dict(zip(T,data['peaks']))
T.sort()
if 'T' in Inst['Type'][0]: #want big TOF's first
T.reverse()
X = []
for key in T: X.append(D[key])
data['peaks'] = X
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
G2frame.GPXtree.SetItemPyData(G2frame.PickId,data)
G2frame.PeakTable = G2G.Table(data['peaks'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Peak List')
G2frame.dataWindow.currentGrids = []
reflGrid = G2G.GSGrid(parent=G2frame.dataWindow)
reflGrid.SetTable(G2frame.PeakTable, True)
setBackgroundColors()
# reflGrid.Bind(wg.EVT_GRID_CELL_CHANGE, RefreshPeakGrid)
reflGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
# reflGrid.Bind(wg.EVT_GRID_LABEL_LEFT_CLICK, onCellListSClick)
# G2frame.dataWindow.Bind(wg.EVT_GRID_CELL_LEFT_CLICK, onCellListSClick)
reflGrid.Bind(wg.EVT_GRID_LABEL_LEFT_DCLICK, onCellListDClick)
# G2frame.dataWindow.Bind(wg.EVT_GRID_CELL_LEFT_DCLICK, onCellListDClick)
reflGrid.AutoSizeColumns(False)
reflGrid.SetScrollRate(10,10)
G2frame.reflGrid = reflGrid
mainSizer.Add(reflGrid,1,wx.ALL|wx.EXPAND,1)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Background
################################################################################
def UpdateBackground(G2frame,data):
'''respond to selection of PWDR background data tree item.
'''
def OnBackFlagCopy(event):
flag = data[0][1]
backDict = data[-1]
if backDict['nDebye']:
DBflags = []
for term in backDict['debyeTerms']:
DBflags.append(term[1::2])
if backDict['nPeaks']:
PKflags = []
for term in backDict['peaksList']:
PKflags.append(term[1::2])
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy bkg ref. flags from\n'+str(hst[5:])+' to...',
'Copy bkg flags', histList)
copyList = []
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
backData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Background'))
backData[0][1] = copy.copy(flag)
bkDict = backData[-1]
if bkDict['nDebye'] == backDict['nDebye']:
for i,term in enumerate(bkDict['debyeTerms']):
term[1::2] = copy.copy(DBflags[i])
if bkDict['nPeaks'] == backDict['nPeaks']:
for i,term in enumerate(bkDict['peaksList']):
term[1::2] = copy.copy(PKflags[i])
def OnBackCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy bkg params from\n'+str(hst[5:])+' to...',
'Copy parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Background'),copy.deepcopy(data))
CalcBack(Id)
def OnBackSave(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II background parameters file', pth, '',
'background parameter files (*.pwdrbck)|*.pwdrbck',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .pwdrbck
filename = os.path.splitext(filename)[0]+'.pwdrbck'
File = open(filename,'w')
File.write("#GSAS-II background parameter file; do not add/delete items!\n")
File.write(str(data[0])+'\n')
for item in data[1]:
if item in ['nPeaks','background PWDR','nDebye'] or not len(data[1][item]):
File.write(item+':'+str(data[1][item])+'\n')
else:
File.write(item+':\n')
for term in data[1][item]:
File.write(str(term)+'\n')
File.close()
print ('Background parameters saved to: '+filename)
finally:
dlg.Destroy()
def OnBackLoad(event):
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II background parameters file', pth, '',
'background parameter files (*.pwdrbck)|*.pwdrbck',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
newback = [[],{}]
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
if S[0] == '#': #skip the heading
S = File.readline() #should contain the std. bck fxn
newback[0] = eval(S[:-1])
S = File.readline()
while S and ':' in S:
[item,vals] = S[:-1].split(':')
if item in ['nPeaks','nDebye']:
newback[1][item] = int(vals)
elif 'PWDR' in item:
newback[1][item] = eval(vals)
elif item in ['FixedPoints','debyeTerms','peaksList']:
newback[1][item] = []
S = File.readline()
while ':' not in S:
newback[1][item].append(eval(S[:-1]))
S = File.readline()
else:
continue
S = File.readline()
File.close()
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Background'),newback)
finally:
dlg.Destroy()
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallLater(100,UpdateBackground,G2frame,newback)
def OnBkgFit(event):
def SetInstParms(Inst):
dataType = Inst['Type'][0]
insVary = []
insNames = []
insVals = []
for parm in Inst:
insNames.append(parm)
insVals.append(Inst[parm][1])
if parm in ['U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)','alpha',
'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q',] and Inst[parm][2]:
Inst[parm][2] = False
# insVary.append(parm)
instDict = dict(zip(insNames,insVals))
instDict['X'] = max(instDict['X'],0.01)
instDict['Y'] = max(instDict['Y'],0.01)
if 'SH/L' in instDict:
instDict['SH/L'] = max(instDict['SH/L'],0.002)
return dataType,instDict,insVary
PatternId = G2frame.PatternId
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
# sort the points for convenience and then separate them; extend the range if needed
if 'FixedPoints' not in background[1]:
msg = ("You have not defined any fixed background points. "+
"Use the Fixed Points/Add menu item to define points that will be fit."+
'\n\nSee the "Fitting the Starting Background using Fixed Points" tutorial for more details.')
print (msg)
G2frame.ErrorDialog('No points',msg)
return
background[1]['FixedPoints'] = sorted(background[1]['FixedPoints'],key=lambda pair:pair[0])
X = [x for x,y in background[1]['FixedPoints']]
Y = [y for x,y in background[1]['FixedPoints']]
if X[0] > limits[0]:
X = [limits[0]] + X
Y = [Y[0]] + Y
if X[-1] < limits[1]:
X += [limits[1]]
Y += [Y[-1]]
# interpolate the fixed points onto the grid of data points within limits
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)[1]
xBeg = np.searchsorted(pwddata[0],limits[0])
xFin = np.searchsorted(pwddata[0],limits[1])
xdata = pwddata[0][xBeg:xFin]
ydata = si.interp1d(X,Y)(ma.getdata(xdata))
W = [1]*len(xdata)
Z = [0]*len(xdata)
# load instrument and background params
print (' NB: Any instrument parameter refinement flags will be cleared')
dataType,insDict,insVary = SetInstParms(inst)
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(background)
# how many background parameters are refined?
if len(bakVary)*1.5 > len(X):
msg = ("You are attempting to vary "+str(len(bakVary))+
" background terms with only "+str(len(X))+" background points"+
"\nAdd more points or reduce the number of terms")
print (msg)
G2frame.ErrorDialog('Too few points',msg)
return
wx.BeginBusyCursor()
try:
G2pwd.DoPeakFit('LSQ',[],background,limits,inst,inst2,
np.array((xdata,ydata,W,Z,Z,Z)),Z,prevVaryList=bakVary,controls=controls)
finally:
wx.EndBusyCursor()
# compute the background values and plot them
parmDict = {}
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(background)
parmDict.update(bakDict)
parmDict.update(insDict)
# Note that this generates a MaskedArrayFutureWarning, but these items are not always masked
pwddata[3][xBeg:xFin] *= 0.
pwddata[5][xBeg:xFin] *= 0.
pwddata[4][xBeg:xFin] = G2pwd.getBackground('',parmDict,bakType,dataType,xdata)[0]
G2plt.PlotPatterns(G2frame,plotType='PWDR')
# show the updated background values
wx.CallLater(100,UpdateBackground,G2frame,data)
def OnBkgClear(event):
if 'FixedPoints' not in data[1]:
return
else:
data[1]['FixedPoints'] = []
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnPeaksMove(event):
if not data[1]['nPeaks']:
G2frame.ErrorDialog('Error','No peaks to move')
return
Peaks = {'peaks':[],'sigDict':{}}
for peak in data[1]['peaksList']:
Peaks['peaks'].append([peak[0],0,peak[2],0,peak[4],0,peak[6],0])
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'),Peaks)
def OnMakeRDF(event):
dlg = RDFDialog(G2frame)
try:
if dlg.ShowModal() == wx.ID_OK:
RDFcontrols = dlg.GetSelection()
else:
return
finally:
dlg.Destroy()
PatternId = G2frame.PatternId
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)[1]
auxPlot = G2pwd.MakeRDF(RDFcontrols,background,inst,pwddata)
if '2' in platform.python_version_tuple()[0]:
superMinusOne = unichr(0xaf)+unichr(0xb9)
else:
superMinusOne = chr(0xaf)+chr(0xb9)
for plot in auxPlot:
XY = np.array(plot[:2])
if 'D(R)' in plot[2]:
xlabel = r'$R, \AA$'
ylabel = r'$D(R), arb. units$'
else:
xlabel = r'$Q,\AA$'+superMinusOne
ylabel = r'$I(Q)$'
G2plt.PlotXY(G2frame,[XY,],Title=plot[2],labelX=xlabel,labelY=ylabel,lines=True)
def BackSizer():
def OnNewType(event):
data[0][0] = bakType.GetValue()
def OnBakRef(event):
data[0][1] = bakRef.GetValue()
def OnBakTerms(event):
data[0][2] = int(bakTerms.GetValue())
M = len(data[0])
N = data[0][2]+3
item = data[0]
if N > M: #add terms
for i in range(M,N):
item.append(0.0)
elif N < M: #delete terms
for i in range(N,M):
del(item[-1])
G2frame.GPXtree.SetItemPyData(BackId,data)
wx.CallLater(100,UpdateBackground,G2frame,data)
def AfterChange(invalid,value,tc):
if invalid: return
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
backSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Background function: '),0,WACV)
bakType = wx.ComboBox(G2frame.dataWindow,value=data[0][0],
choices=Choices,style=wx.CB_READONLY|wx.CB_DROPDOWN)
bakType.Bind(wx.EVT_COMBOBOX, OnNewType)
topSizer.Add(bakType)
topSizer.Add((5,0),0)
bakRef = wx.CheckBox(G2frame.dataWindow,label=' Refine?')
bakRef.SetValue(bool(data[0][1]))
bakRef.Bind(wx.EVT_CHECKBOX, OnBakRef)
topSizer.Add(bakRef,0,WACV)
backSizer.Add(topSizer)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of coeff.: '),0,WACV)
bakTerms = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[0][2]),choices=[str(i+1) for i in range(36)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
bakTerms.Bind(wx.EVT_COMBOBOX,OnBakTerms)
topSizer.Add(bakTerms,0,WACV)
topSizer.Add((5,0),0)
backSizer.Add(topSizer)
backSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Background coefficients:'),0,WACV)
bakSizer = wx.FlexGridSizer(0,5,5,5)
for i,value in enumerate(data[0][3:]):
bakVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[0],i+3,nDig=(10,4),OnLeave=AfterChange)
bakSizer.Add(bakVal,0,WACV)
backSizer.Add(bakSizer)
return backSizer
def DebyeSizer():
def OnDebTerms(event):
data[1]['nDebye'] = int(debTerms.GetValue())
M = len(data[1]['debyeTerms'])
N = data[1]['nDebye']
if N > M: #add terms
for i in range(M,N):
data[1]['debyeTerms'].append([1.0,False,1.0,False,0.010,False])
elif N < M: #delete terms
for i in range(N,M):
del(data[1]['debyeTerms'][-1])
if N == 0:
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdateBackground,G2frame,data)
def KeyEditPeakGrid(event):
colList = debyeGrid.GetSelectedCols()
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
debyeGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if debyeTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(debyeGrid.GetNumberRows()): data[1]['debyeTerms'][row][col]=True
elif key == 78: #'N'
for row in range(debyeGrid.GetNumberRows()): data[1]['debyeTerms'][row][col]=False
def OnCellChange(event):
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
debSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Debye scattering: '),0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of terms: '),0,WACV)
debTerms = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[1]['nDebye']),choices=[str(i) for i in range(21)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
debTerms.Bind(wx.EVT_COMBOBOX,OnDebTerms)
topSizer.Add(debTerms,0,WACV)
topSizer.Add((5,0),0)
debSizer.Add(topSizer)
if data[1]['nDebye']:
debSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Debye diffuse terms:'),0,WACV)
rowLabels = []
for i in range(len(data[1]['debyeTerms'])): rowLabels.append(str(i))
colLabels = ['A','refine','R','refine','U','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,2',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
debyeTable = G2G.Table(data[1]['debyeTerms'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
debyeGrid = G2G.GSGrid(parent=G2frame.dataWindow)
debyeGrid.SetTable(debyeTable, True)
debyeGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
debyeGrid.Bind(wg.EVT_GRID_CELL_CHANGED,OnCellChange)
debyeGrid.AutoSizeColumns(False)
debSizer.Add(debyeGrid)
return debSizer
def PeaksSizer():
def OnPeaks(event):
data[1]['nPeaks'] = int(peaks.GetValue())
M = len(data[1]['peaksList'])
N = data[1]['nPeaks']
if N > M: #add terms
for i in range(M,N):
data[1]['peaksList'].append([1.0,False,1.0,False,0.10,False,0.10,False])
elif N < M: #delete terms
for i in range(N,M):
del(data[1]['peaksList'][-1])
if N == 0:
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdateBackground,G2frame,data)
def KeyEditPeakGrid(event):
colList = peaksGrid.GetSelectedCols()
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
peaksGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if peaksTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(peaksGrid.GetNumberRows()): data[1]['peaksList'][row][col]=True
elif key == 78: #'N'
for row in range(peaksGrid.GetNumberRows()): data[1]['peaksList'][row][col]=False
def OnCellChange(event):
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
peaksSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Peaks in background: '),0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of peaks: '),0,WACV)
peaks = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[1]['nPeaks']),choices=[str(i) for i in range(30)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
peaks.Bind(wx.EVT_COMBOBOX,OnPeaks)
topSizer.Add(peaks,0,WACV)
topSizer.Add((5,0),0)
peaksSizer.Add(topSizer)
G2frame.dataWindow.currentGrids = []
if data[1]['nPeaks']:
peaksSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Peak list:'),0,WACV)
rowLabels = []
for i in range(len(data[1]['peaksList'])): rowLabels.append(str(i))
colLabels = ['pos','refine','int','refine','sig','refine','gam','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,2',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
peaksTable = G2G.Table(data[1]['peaksList'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
peaksGrid = G2G.GSGrid(parent=G2frame.dataWindow)
peaksGrid.SetTable(peaksTable, True)
peaksGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
peaksGrid.Bind(wg.EVT_GRID_CELL_CHANGED,OnCellChange)
peaksGrid.AutoSizeColumns(False)
peaksSizer.Add(peaksGrid)
return peaksSizer
def BackFileSizer():
def OnBackPWDR(event):
data[1]['background PWDR'][0] = back.GetValue()
if data[1]['background PWDR'][0]:
curHist = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data[1]['background PWDR'][0])
if not Id:
G2G.G2MessageBox(G2frame,'Histogram not found -- how did this happen?','Missing histogram')
back.SetValue('')
data[1]['background PWDR'][0] = back.GetValue()
return
bkgHist = G2frame.GPXtree.GetItemPyData(Id)
if len(bkgHist[1][0]) != len(curHist[1][0]):
G2G.G2MessageBox(G2frame,'Histogram have different lengths','Mismatched histograms')
back.SetValue('')
data[1]['background PWDR'][0] = back.GetValue()
return
CalcBack()
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def AfterChange(invalid,value,tc):
if invalid: return
CalcBack()
G2plt.PlotPatterns(G2frame,plotType='PWDR')
fileSizer = wx.BoxSizer(wx.VERTICAL)
fileSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Fixed background file:'),0,WACV)
if 'background PWDR' not in data[1]:
data[1]['background PWDR'] = ['',-1.,False]
backSizer = wx.BoxSizer(wx.HORIZONTAL)
Choices = ['',]+G2gd.GetGPXtreeDataNames(G2frame,['PWDR',])
Source = G2frame.GPXtree.GetItemText(G2frame.PatternId)
Choices.pop(Choices.index(Source))
back = wx.ComboBox(parent=G2frame.dataWindow,value=data[1]['background PWDR'][0],choices=Choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
back.Bind(wx.EVT_COMBOBOX,OnBackPWDR)
backSizer.Add(back)
backSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' multiplier'),0,WACV)
backMult = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[1]['background PWDR'],1,nDig=(10,3),OnLeave=AfterChange)
backSizer.Add(backMult,0,WACV)
fileSizer.Add(backSizer)
return fileSizer
def CalcBack(PatternId=G2frame.PatternId):
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
backData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
dataType = inst['Type'][0]
insDict = {inskey:inst[inskey][1] for inskey in inst}
parmDict = {}
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(data)
parmDict.update(bakDict)
parmDict.update(insDict)
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)
xBeg = np.searchsorted(pwddata[1][0],limits[0])
xFin = np.searchsorted(pwddata[1][0],limits[1])
fixBack = backData[1]['background PWDR']
try: #typically bad grid value or no fixed bkg file
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,fixBack[0])
fixData = G2frame.GPXtree.GetItemPyData(Id)
fixedBkg = {'_fixedVary':False,'_fixedMult':fixBack[1],'_fixedValues':fixData[1][1][xBeg:xFin]}
pwddata[1][4][xBeg:xFin] = G2pwd.getBackground('',parmDict,bakType,dataType,pwddata[1][0][xBeg:xFin],fixedBkg)[0]
except:
pass
# UpdateBackground execution starts here
if len(data) < 2: #add Debye diffuse & peaks scattering here
data.append({'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[]})
if 'nPeaks' not in data[1]:
data[1].update({'nPeaks':0,'peaksList':[]})
G2frame.dataWindow.currentGrids = []
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.BackMenu)
G2frame.Bind(wx.EVT_MENU,OnBackCopy,id=G2G.wxID_BACKCOPY)
G2frame.Bind(wx.EVT_MENU,OnBackFlagCopy,id=G2G.wxID_BACKFLAGCOPY)
G2frame.Bind(wx.EVT_MENU,OnBackSave,id=G2G.wxID_BACKSAVE)
G2frame.Bind(wx.EVT_MENU,OnBackLoad,id=G2G.wxID_BACKLOAD)
G2frame.Bind(wx.EVT_MENU,OnPeaksMove,id=G2G.wxID_BACKPEAKSMOVE)
G2frame.Bind(wx.EVT_MENU,OnMakeRDF,id=G2G.wxID_MAKEBACKRDF)
G2frame.Bind(wx.EVT_MENU,OnBkgFit,id=G2frame.dataWindow.wxID_BackPts['Fit'])
G2frame.Bind(wx.EVT_MENU,OnBkgClear,id=G2frame.dataWindow.wxID_BackPts['Clear'])
BackId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Background')
Choices = ['chebyschev','cosine','Q^2 power series','Q^-2 power series','lin interpolate','inv interpolate','log interpolate']
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(BackSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(DebyeSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(PeaksSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(BackFileSizer())
G2frame.dataWindow.SetDataSize()
################################################################################
##### Limits
################################################################################
def UpdateLimitsGrid(G2frame, data,plottype):
'''respond to selection of PWDR Limits data tree item.
'''
def AfterChange(invalid,value,tc):
if invalid: return
plottype = G2frame.GPXtree.GetItemText(G2frame.PatternId)[:4]
wx.CallAfter(G2plt.PlotPatterns,G2frame,newPlot=False,plotType=plottype) #unfortunately this resets the plot width
def LimitSizer():
limits = wx.FlexGridSizer(0,3,0,5)
labels = ['Tmin','Tmax']
for i in [0,1]:
limits.Add(wx.StaticText(G2frame.dataWindow,
label=' Original {} {:.4f}'.format(labels[i],data[0][i])),0,WACV)
limits.Add(wx.StaticText(G2frame.dataWindow,label=' New: '),0,WACV)
limits.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[1],i, \
min=data[0][0],max=data[0][1],nDig=(10,4),typeHint=float,OnLeave=AfterChange))
return limits
def ExclSizer():
def OnDelExcl(event):
Obj = event.GetEventObject()
item = Indx[Obj.GetId()]
del(data[item+2])
G2plt.PlotPatterns(G2frame,newPlot=False,plotType=plottype)
wx.CallAfter(UpdateLimitsGrid,G2frame,data,plottype)
Indx = {}
excl = wx.FlexGridSizer(0,3,0,5)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' From: '),0,WACV)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' To: '),0,WACV)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' Delete?: '),0,WACV)
for Id,item in enumerate(data[2:]):
for i in [0,1]:
excl.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,item,i, \
min=data[0][0],max=data[0][1],nDig=(10,4),typeHint=float,OnLeave=AfterChange))
delExcl = wx.CheckBox(G2frame.dataWindow,label='')
Indx[delExcl.GetId()] = Id
delExcl.Bind(wx.EVT_CHECKBOX,OnDelExcl)
excl.Add(delExcl,0,WACV)
return excl
def OnAddExcl(event):
G2frame.ifGetExclude = True
print ('Add excluded region')
def OnLimitCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy limits from\n'+str(hst[5:])+' to...',
'Copy limits', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Limits'),copy.deepcopy(data))
finally:
dlg.Destroy()
def Draw():
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Data used in refinement'),0,WACV)
mainSizer.Add((5,5))
mainSizer.Add(LimitSizer())
if len(data)>2:
mainSizer.Add((0,5),0)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Excluded regions:'),0,WACV)
mainSizer.Add(ExclSizer())
G2frame.dataWindow.SetDataSize()
G2frame.ifGetExclude = False
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.LimitMenu)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Limits')
G2frame.Bind(wx.EVT_MENU,OnLimitCopy,id=G2G.wxID_LIMITCOPY)
G2frame.Bind(wx.EVT_MENU,OnAddExcl,id=G2G.wxID_ADDEXCLREGION)
Draw()
################################################################################
##### Instrument parameters
################################################################################
def UpdateInstrumentGrid(G2frame,data):
'''respond to selection of PWDR/SASD/REFD Instrument Parameters
data tree item.
'''
if 'Bank' not in data: #get it from name; absent for default parms selection
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
if 'Bank' in hst:
bank = int(hst.split('Bank')[1].split('_')[0])
data['Bank'] = [bank,bank,0]
else:
data['Bank'] = [1,1,0]
def keycheck(keys):
good = []
for key in keys:
if key in ['Type','Bank','U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)','alpha',
'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','Polariz.',
'Lam','Azimuth','2-theta','fltPath','difC','difA','difB','Zero','Lam1','Lam2']:
good.append(key)
return good
def updateData(inst,ref):
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[0]
for item in data:
try:
data[item] = [data[item][0],inst[item],ref[item]]
except KeyError:
try:
data[item] = [data[item][0],inst[item]]
except KeyError:
pass #skip 'Polariz.' for N-data
def RefreshInstrumentGrid(event,doAnyway=False):
if doAnyway or event.GetRow() == 1:
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'))
newpeaks = []
for peak in peaks['peaks']:
newpeaks.append(G2mth.setPeakparms(data,Inst2,peak[0],peak[2]))
peaks['peaks'] = newpeaks
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'),peaks)
def OnCalibrate(event):
Pattern = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
xye = ma.array(ma.getdata(Pattern[1]))
cw = np.diff(xye[0])
IndexPeaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Index Peak List'))
if not len(IndexPeaks[0]):
G2frame.ErrorDialog('Can not calibrate','Index Peak List empty')
return
if not np.any(IndexPeaks[1]):
G2frame.ErrorDialog('Can not calibrate','Peak positions not refined')
return False
Ok = False
for peak in IndexPeaks[0]:
if peak[2] and peak[3]:
Ok = True
if not Ok:
G2frame.ErrorDialog('Can not calibrate','Index Peak List not indexed')
return
if G2pwd.DoCalibInst(IndexPeaks,data):
UpdateInstrumentGrid(G2frame,data)
XY = []
Sigs = []
for ip,peak in enumerate(IndexPeaks[0]):
if peak[2] and peak[3]:
binwid = cw[np.searchsorted(xye[0],peak[0])]
XY.append([peak[-1],peak[0],binwid])
Sigs.append(IndexPeaks[1][ip])
if len(XY):
XY = np.array(XY)
G2plt.PlotCalib(G2frame,data,XY,Sigs,newPlot=True)
else:
G2frame.ErrorDialog('Can not calibrate','Nothing selected for refinement')
def OnLoad(event):
'''Loads instrument parameters from a G2 .instprm file
in response to the Instrument Parameters-Operations/Load Profile menu
If instprm file has multiple banks each with header #Bank n: ..., this
finds matching bank no. to load - rejects nonmatches.
Note that similar code is found in ReadPowderInstprm (GSASII.py)
'''
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[0]
bank = data['Bank'][0]
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = []
newVals = []
Found = False
while S:
if S[0] == '#':
if Found:
break
if 'Bank' in S:
if bank == int(S.split(':')[0].split()[1]):
S = File.readline()
continue
else:
S = File.readline()
while S and '#Bank' not in S:
S = File.readline()
continue
else: #a non #Bank file
S = File.readline()
continue
Found = True
[item,val] = S[:-1].split(':')
newItems.append(item)
try:
newVals.append(float(val))
except ValueError:
newVals.append(val)
S = File.readline()
File.close()
if Found:
Inst,Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Instrument Parameters'))
if 'Bank' not in Inst: #patch for old .instprm files - may cause faults for TOF data
Inst['Bank'] = [1,1,0]
data = G2fil.makeInstDict(newItems,newVals,len(newVals)*[False,])
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Instrument Parameters'),[data,Inst2])
RefreshInstrumentGrid(event,doAnyway=True) #to get peaks updated
else:
G2frame.ErrorDialog('No match','Bank %d not in %s'%(bank,filename),G2frame)
UpdateInstrumentGrid(G2frame,data)
G2plt.PlotPeakWidths(G2frame)
finally:
dlg.Destroy()
def OnSave(event):
'''Respond to the Instrument Parameters Operations/Save Profile menu
item: writes current parameters to a .instprm file
It does not write Bank n: on # line & thus can be used any time w/o clash of bank nos.
'''
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .instprm
filename = os.path.splitext(filename)[0]+'.instprm'
File = open(filename,'w')
File.write("#GSAS-II instrument parameter file; do not add/delete items!\n")
for item in data:
File.write(item+':'+str(data[item][1])+'\n')
File.close()
print ('Instrument parameters saved to: '+filename)
finally:
dlg.Destroy()
def OnSaveAll(event):
'''Respond to the Instrument Parameters Operations/Save all Profile menu & writes
selected inst parms. across multiple banks into a single file
Each block starts with #Bank n: GSAS-II instrument... where n is bank no.
item: writes parameters from selected PWDR entries to a .instprm file
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
histList.insert(0,hst)
saveList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Save instrument parameters from',
'Save instrument parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
saveList.append(histList[i])
finally:
dlg.Destroy()
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .instprm
filename = os.path.splitext(filename)[0]+'.instprm'
File = open(filename,'w')
for hist in saveList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in inst: #patch
bank = 1
if 'Bank' in hist:
bank = int(hist.split('Bank')[1])
inst['Bank'] = [bank,bank,0]
bank = inst['Bank'][0]
File.write("#Bank %d: GSAS-II instrument parameter file; do not add/delete items!\n"%(bank))
for item in inst:
File.write(item+':'+str(inst[item][1])+'\n')
File.close()
finally:
dlg.Destroy()
def OnReset(event):
insVal.update(insDef)
updateData(insVal,insRef)
RefreshInstrumentGrid(event,doAnyway=True) #to get peaks updated
UpdateInstrumentGrid(G2frame,data)
G2plt.PlotPeakWidths(G2frame)
def OnInstFlagCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
keys = list(data.keys())
try:
keys.remove('Source')
except ValueError:
pass
flags = dict(zip(keys,[data[key][2] for key in keys]))
instType = data['Type'][0]
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy inst ref. flags from\n'+hst[5:],
'Copy refinement flags', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
instData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in instData:
instData['Bank'] = [1,1,0]
if len(data) == len(instData) and instType == instData['Type'][0]: #don't mix data types or lam & lam1/lam2 parms!
for item in instData:
if item not in ['Source',]:
instData[item][2] = copy.copy(flags[item])
else:
print (item+' not copied - instrument parameters not commensurate')
def OnInstCopy(event):
#need fix for dictionary
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
copyData = copy.deepcopy(data)
del copyData['Azimuth'] #not to be copied!
instType = data['Type'][0]
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy inst params from\n'+hst,
'Copy parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
instData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in instData:
instData['Bank'] = [1,1,0]
if len(data) == len(instData) and instType == instData['Type'][0]: #don't mix data types or lam & lam1/lam2 parms!
instData.update(copyData)
else:
print (item+' not copied - instrument parameters not commensurate')
def AfterChange(invalid,value,tc):
if invalid: return
updateData(insVal,insRef)
def NewProfile(invalid,value,tc):
if invalid: return
updateData(insVal,insRef)
G2plt.PlotPeakWidths(G2frame)
def OnItemRef(event):
Obj = event.GetEventObject()
item = RefObj[Obj.GetId()]
insRef[item] = Obj.GetValue()
updateData(insVal,insRef)
def OnCopy1Val(event):
'''Select one instrument parameter value to edit and copy to many histograms
optionally allow values to be edited in a table
'''
updateData(insVal,insRef)
G2G.SelectEdit1Var(G2frame,data,labelLst,elemKeysLst,dspLst,refFlgElem)
insVal.update({key:data[key][1] for key in instkeys})
insRef.update({key:data[key][2] for key in instkeys})
wx.CallAfter(MakeParameterWindow)
def lblWdef(lbl,dec,val):
'Label parameter showing the default value'
fmt = "%15."+str(dec)+"f"
return " " + lbl + " (" + (fmt % val).strip() + "): "
def RefineBox(item):
'Define a refine checkbox with binding'
#wid = wx.CheckBox(G2frame.dataWindow,label=' Refine? ')
wid = wx.CheckBox(G2frame.dataWindow,label='')
wid.SetValue(bool(insRef[item]))
RefObj[wid.GetId()] = item
wid.Bind(wx.EVT_CHECKBOX, OnItemRef)
return wid
def OnLamPick(event):
data['Source'][1] = lamType = event.GetEventObject().GetValue()
if 'P' in insVal['Type']:
insVal['Lam1'] = waves[lamType][0]
insVal['Lam2'] = waves[lamType][1]
elif 'S' in insVal['Type'] and 'synch' not in lamType:
insVal['Lam'] = meanwaves[lamType]
updateData(insVal,insRef)
i,j= wx.__version__.split('.')[0:2]
if int(i)+int(j)/10. > 2.8:
pass # repaint crashes wxpython 2.9
wx.CallLater(100, MakeParameterWindow)
#wx.CallAfter(MakeParameterWindow)
else:
wx.CallAfter(MakeParameterWindow)
def MakeParameterWindow():
'Displays the Instrument parameters in the dataWindow frame'
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
instSizer = wx.FlexGridSizer(0,3,5,5)
subSizer = wx.BoxSizer(wx.HORIZONTAL)
if insVal['Bank'] == None: #patch
insVal['Bank'] = 1
text = ' Histogram Type: %s Bank: %d'%(insVal['Type'],insVal['Bank'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,text),0,WACV)
mainSizer.Add(subSizer)
labelLst[:],elemKeysLst[:],dspLst[:],refFlgElem[:] = [],[],[],[]
if 'P' in insVal['Type']: #powder data
[instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt),0,WACV) for txt in [' Name (default)',' Value','Refine?']]
if 'C' in insVal['Type']: #constant wavelength
labelLst.append('Azimuth angle')
elemKeysLst.append(['Azimuth',1])
dspLst.append([10,2])
refFlgElem.append(None)
if 'Lam1' in insVal:
subSizer = wx.BoxSizer(wx.HORIZONTAL)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: '),0,WACV)
txt = '%7.2f'%(insVal['Azimuth'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Ka1/Ka2: '),0,WACV)
txt = u' %8.6f/%8.6f\xc5'%(insVal['Lam1'],insVal['Lam2'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
waveSizer = wx.BoxSizer(wx.HORIZONTAL)
waveSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Source type: '),0,WACV)
# PATCH?: for now at least, Source is not saved anywhere before here
if 'Source' not in data: data['Source'] = ['CuKa','?']
choice = ['TiKa','CrKa','FeKa','CoKa','CuKa','MoKa','AgKa']
lamPick = wx.ComboBox(G2frame.dataWindow,value=data['Source'][1],choices=choice,style=wx.CB_READONLY|wx.CB_DROPDOWN)
lamPick.Bind(wx.EVT_COMBOBOX, OnLamPick)
waveSizer.Add(lamPick,0)
subSizer.Add(waveSizer,0)
mainSizer.Add(subSizer)
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef('I(L2)/I(L1)',4,insDef['I(L2)/I(L1)'])),0,WACV)
key = 'I(L2)/I(L1)'
labelLst.append(key)
elemKeysLst.append([key,1])
dspLst.append([10,4])
refFlgElem.append([key,2])
ratVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(ratVal,0)
instSizer.Add(RefineBox(key),0,WACV)
else: # single wavelength
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: '),0,WACV)
txt = '%7.2f'%(insVal['Azimuth'])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
instSizer.Add((5,5),0)
key = 'Lam'
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef[key])),0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,6),typeHint=float,OnLeave=AfterChange)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append([key,1])
dspLst.append([10,6])
instSizer.Add(waveVal,0,WACV)
refFlgElem.append([key,2])
instSizer.Add(RefineBox(key),0,WACV)
for item in ['Zero','Polariz.']:
if item in insDef:
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append([10,4])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,4,insDef[item])),0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
refFlgElem.append([item,2])
instSizer.Add(RefineBox(item),0,WACV)
for item in ['U','V','W','X','Y','Z','SH/L']:
nDig = (10,3)
if item == 'SH/L':
nDig = (10,5)
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append(nDig)
refFlgElem.append([item,2])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,nDig[1],insDef[item])),0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=nDig,typeHint=float,OnLeave=NewProfile)
instSizer.Add(itemVal,0,WACV)
instSizer.Add(RefineBox(item),0,WACV)
elif 'T' in insVal['Type']: #time of flight (neutrons)
subSizer = wx.BoxSizer(wx.HORIZONTAL)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Flight path: '),0,WACV)
txt = '%8.3f'%(insVal['fltPath'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('flight path')
elemKeysLst.append(['fltPath',1])
dspLst.append([10,2])
refFlgElem.append(None)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' 2-theta: '),0,WACV)
txt = '%7.2f'%(insVal['2-theta'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('2-theta')
elemKeysLst.append(['2-theta',1])
dspLst.append([10,2])
refFlgElem.append(None)
if 'Pdabc' in Inst2:
Items = ['sig-0','sig-1','sig-2','sig-q','X','Y','Z']
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' difC: '),0,WACV)
txt = '%8.2f'%(insVal['difC'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('difC')
elemKeysLst.append(['difC',1])
dspLst.append([10,2])
refFlgElem.append(None)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' alpha, beta: fixed by table'),0,WACV)
else:
Items = ['difC','difA','difB','Zero','alpha','beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','X','Y','Z']
mainSizer.Add((5,5),0)
mainSizer.Add(subSizer)
mainSizer.Add((5,5),0)
for item in Items:
if item == '':
instSizer.Add((5,5),0)
instSizer.Add((5,5),0)
instSizer.Add((5,5),0)
continue
nDig = (10,3)
if 'beta' in item:
nDig = (12,6)
instSizer.Add(
wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,nDig[1],insDef[item])),
0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=nDig,typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append(nDig)
refFlgElem.append([item,2])
instSizer.Add(RefineBox(item),0,WACV)
elif 'PKS' in insVal['Type']: #peak positions only
key = 'Lam'
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef[key])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,6),typeHint=float,OnLeave=AfterChange)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append([key,1])
dspLst.append([10,6])
instSizer.Add(waveVal,0,WACV)
refFlgElem.append([key,2])
for item in ['Zero',]:
if item in insDef:
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append([10,4])
instSizer.Add(
wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,4,insDef[item])),
0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
refFlgElem.append([item,2])
elif 'S' in insVal['Type']: #single crystal data
if 'C' in insVal['Type']: #constant wavelength
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef['Lam'])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,'Lam',nDig=(10,6),typeHint=float,OnLeave=AfterChange)
instSizer.Add(waveVal,0,WACV)
labelLst.append(u'Lam (\xc5)')
waveSizer = wx.BoxSizer(wx.HORIZONTAL)
waveSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Source type: '),0,WACV)
# PATCH?: for now at least, Source is not saved anywhere before here
if 'Source' not in data: data['Source'] = ['CuKa','?']
choice = ['synchrotron','TiKa','CrKa','FeKa','CoKa','CuKa','MoKa','AgKa']
lamPick = wx.ComboBox(G2frame.dataWindow,value=data['Source'][1],choices=choice,style=wx.CB_READONLY|wx.CB_DROPDOWN)
lamPick.Bind(wx.EVT_COMBOBOX, OnLamPick)
waveSizer.Add(lamPick,0,WACV)
instSizer.Add(waveSizer,0,WACV)
elemKeysLst.append(['Lam',1])
dspLst.append([10,6])
refFlgElem.append(None)
else: #time of flight (neutrons)
pass #for now
elif insVal['Type'][0] in ['L','R',]:
if 'C' in insVal['Type']:
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef['Lam'])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,'Lam',nDig=(10,6),typeHint=float,OnLeave=AfterChange)
instSizer.Add(waveVal,0,WACV)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append(['Lam',1])
dspLst.append([10,6])
refFlgElem.append(None)
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: %7.2f'%(insVal['Azimuth'])),0,WACV)
labelLst.append('Azimuth angle')
elemKeysLst.append(['Azimuth',1])
dspLst.append([10,2])
refFlgElem.append(None)
else: #time of flight (neutrons)
pass #for now
mainSizer.Add(instSizer,0)
G2frame.dataWindow.SetDataSize()
# end of MakeParameterWindow
# beginning of UpdateInstrumentGrid code
#patch: make sure all parameter items are lists
patched = 0
for key in data:
if type(data[key]) is tuple:
data[key] = list(data[key])
patched += 1
if patched: print (patched,' instrument parameters changed from tuples')
if 'Z' not in data:
data['Z'] = [0.0,0.0,False]
#end of patch
labelLst,elemKeysLst,dspLst,refFlgElem = [],[],[],[]
instkeys = keycheck(data.keys())
if 'P' in data['Type'][0]: #powder data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = dict(zip(instkeys,[data[key][2] for key in instkeys]))
if 'NC' in data['Type'][0]:
del(insDef['Polariz.'])
del(insVal['Polariz.'])
del(insRef['Polariz.'])
elif 'S' in data['Type'][0]: #single crystal data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
elif 'L' in data['Type'][0]: #low angle data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
elif 'R' in data['Type'][0]: #low angle data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
RefObj = {}
#These from Intl. Tables C, Table 4.2.2.1, p. 177-179
waves = {'CuKa':[1.54051,1.54433],'TiKa':[2.74841,2.75207],'CrKa':[2.28962,2.29351],
'FeKa':[1.93597,1.93991],'CoKa':[1.78892,1.79278],'MoKa':[0.70926,0.713543],
'AgKa':[0.559363,0.563775]}
# meanwaves computed as (2*Ka1+Ka2)/3
meanwaves = {'CuKa':1.54178,'TiKa':2.74963,'CrKa':2.29092,'FeKa':1.93728,
'CoKa':1.79021,'MoKa':0.71069,'AgKa':0.56083}
Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[1]
G2gd.SetDataMenuBar(G2frame)
#patch
if 'P' in insVal['Type']: #powder data
if 'C' in insVal['Type']: #constant wavelength
if 'Azimuth' not in insVal:
insVal['Azimuth'] = 0.0
insDef['Azimuth'] = 0.0
insRef['Azimuth'] = False
# if 'T' in insVal['Type']:
# if 'difB' not in insVal:
# insVal['difB'] = 0.0
# insDef['difB'] = 0.0
# insRef['difB'] = False
#end of patch
if 'P' in insVal['Type']: #powder data menu commands
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.InstMenu)
G2frame.GetStatusBar().SetStatusText('NB: Azimuth is used for polarization only',1)
G2frame.Bind(wx.EVT_MENU,OnCalibrate,id=G2G.wxID_INSTCALIB)
G2frame.Bind(wx.EVT_MENU,OnLoad,id=G2G.wxID_INSTLOAD)
G2frame.Bind(wx.EVT_MENU,OnSave,id=G2G.wxID_INSTSAVE)
G2frame.Bind(wx.EVT_MENU,OnSaveAll,id=G2G.wxID_INSTSAVEALL)
G2frame.Bind(wx.EVT_MENU,OnReset,id=G2G.wxID_INSTPRMRESET)
G2frame.Bind(wx.EVT_MENU,OnInstCopy,id=G2G.wxID_INSTCOPY)
G2frame.Bind(wx.EVT_MENU,OnInstFlagCopy,id=G2G.wxID_INSTFLAGCOPY)
G2frame.Bind(wx.EVT_MENU,OnCopy1Val,id=G2G.wxID_INST1VAL)
elif 'L' in insVal['Type'] or 'R' in insVal['Type']: #SASD data menu commands
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.SASDInstMenu)
G2frame.Bind(wx.EVT_MENU,OnInstCopy,id=G2G.wxID_SASDINSTCOPY)
MakeParameterWindow()
################################################################################
##### Sample parameters
################################################################################
def UpdateSampleGrid(G2frame,data):
'''respond to selection of PWDR/SASD Sample Parameters
data tree item.
'''
def OnSampleSave(event):
'''Respond to the Sample Parameters Operations/Save menu
item: writes current parameters to a .samprm file
'''
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II sample parameters file', pth, '',
'sample parameter files (*.samprm)|*.samprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .samprm
filename = os.path.splitext(filename)[0]+'.samprm'
File = open(filename,'w')
File.write("#GSAS-II sample parameter file\n")
File.write("'Type':'"+str(data['Type'])+"'\n")
File.write("'Gonio. radius':"+str(data['Gonio. radius'])+"\n")
if data.get('InstrName'):
File.write("'InstrName':'"+str(data['InstrName'])+"'\n")
File.close()
finally:
dlg.Destroy()
def OnSampleLoad(event):
'''Loads sample parameters from a G2 .samprm file
in response to the Sample Parameters-Operations/Load menu
Note that similar code is found in ReadPowderInstprm (GSASII.py)
'''
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II sample parameters file', pth, '',
'sample parameter files (*.samprm)|*.samprm',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = {}
while S:
if S[0] == '#':
S = File.readline()
continue
[item,val] = S[:-1].split(':')
newItems[item.strip("'")] = eval(val)
S = File.readline()
File.close()
data.update(newItems)
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Sample Parameters'),data)
UpdateSampleGrid(G2frame,data)
finally:
dlg.Destroy()
def OnAllSampleLoad(event):
filename = ''
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose multihistogram metadata text file', pth, '',
'metadata file (*.*)|*.*',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = []
itemNames = []
Comments = []
while S:
if S[0] == '#':
Comments.append(S)
S = File.readline()
continue
S = S.replace(',',' ').replace('\t',' ')
Stuff = S[:-1].split()
itemNames.append(Stuff[0])
newItems.append(Stuff[1:])
S = File.readline()
File.close()
finally:
dlg.Destroy()
if not filename:
G2frame.ErrorDialog('Nothing to do','No file selected')
return
dataDict = dict(zip(itemNames,newItems))
ifany = False
Controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Controls'))
Names = [' ','Phi','Chi','Omega','Time','Temperature','Pressure']
freeNames = {}
for name in ['FreePrm1','FreePrm2','FreePrm3']:
freeNames[Controls[name]] = name
Names.append(Controls[name])
#import imp
#imp.reload(G2G)
dlg = G2G.G2ColumnIDDialog( G2frame,' Choose multihistogram metadata columns:',
'Select columns',Comments,Names,np.array(newItems).T)
try:
if dlg.ShowModal() == wx.ID_OK:
colNames,newData = dlg.GetSelection()
dataDict = dict(zip(itemNames,newData.T))
for item in colNames:
if item != ' ':
ifany = True
finally:
dlg.Destroy()
if not ifany:
G2frame.ErrorDialog('Nothing to do','No columns identified')
return
histList = [G2frame.GPXtree.GetItemText(G2frame.PatternId),]
histList += GetHistsLikeSelected(G2frame)
colIds = {}
for i,name in enumerate(colNames):
if name != ' ':
colIds[name] = i
for hist in histList:
name = hist.split()[1] #this is file name
newItems = {}
for item in colIds:
key = freeNames.get(item,item)
newItems[key] = float(dataDict[name][colIds[item]])
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(newItems)
UpdateSampleGrid(G2frame,data)
def OnSetScale(event):
if histName[:4] in ['REFD','PWDR']:
Scale = data['Scale'][0]
dlg = wx.MessageDialog(G2frame,'Rescale data by %.2f?'%(Scale),'Rescale data',wx.OK|wx.CANCEL)
try:
if dlg.ShowModal() == wx.ID_OK:
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,histName)
y,w = G2frame.GPXtree.GetItemPyData(pId)[1][1:3]
y *= Scale
w /= Scale**2
data['Scale'][0] = 1.0
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=histName[:4],newPlot=True)
UpdateSampleGrid(G2frame,data)
return
#SASD rescaliing
histList = []
item, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while item:
name = G2frame.GPXtree.GetItemText(item)
if 'SASD' in name and name != histName:
histList.append(name)
item, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
if not len(histList): #nothing to copy to!
return
dlg = wx.SingleChoiceDialog(G2frame,'Select reference histogram for scaling',
'Reference histogram',histList)
try:
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
refHist = histList[sel]
finally:
dlg.Destroy()
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))
Profile = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[1]
Data = [Profile,Limits,data]
refId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,refHist)
refSample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,refId, 'Sample Parameters'))
refLimits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,refId, 'Limits'))
refProfile = G2frame.GPXtree.GetItemPyData(refId)[1]
refData = [refProfile,refLimits,refSample]
G2sasd.SetScale(Data,refData)
G2plt.PlotPatterns(G2frame,plotType='SASD',newPlot=True)
UpdateSampleGrid(G2frame,data)
def OnRescaleAll(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
x0,y0,w0 = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[1][:3]
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
od = {'label_1':'Scaling range min','value_1':0.0,'label_2':'Scaling range max','value_2':10.}
dlg = G2G.G2MultiChoiceDialog(G2frame,
'Do scaling from\n'+str(hst[5:])+' to...','Rescale histograms', histList,extraOpts=od)
try:
if dlg.ShowModal() == wx.ID_OK:
Xmin = od['value_1']
Xmax = od['value_2']
iBeg = np.searchsorted(x0,Xmin)
iFin = np.searchsorted(x0,Xmax)
if iBeg > iFin:
wx.MessageBox('Wrong order for Xmin, Xmax','Error',style=wx.ICON_EXCLAMATION)
else:
sum0 = np.sum(y0[iBeg:iFin])
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
xi,yi,wi = G2frame.GPXtree.GetItemPyData(Id)[1][:3]
sumi = np.sum(yi[iBeg:iFin])
if sumi:
Scale = sum0/sumi
yi *= Scale
wi /= Scale**2
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=histName[:4],newPlot=True)
def OnSampleCopy(event):
histType,copyNames = SetCopyNames(histName,data['Type'],
addNames = ['Omega','Chi','Phi','Gonio. radius','InstrName'])
copyDict = {}
for parm in copyNames:
copyDict[parm] = data[parm]
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample params from\n'+str(hst[5:])+' to...',
'Copy sample parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(copy.deepcopy(copyDict))
finally:
dlg.Destroy()
def OnSampleCopySelected(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
Controls = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
# Assemble a list of item labels
TextTable = {key:label for key,label,dig in
SetupSampleLabels(hst,data.get('Type'),Inst['Type'][0])}
# get flexible labels
TextTable.update({key:Controls[key] for key in Controls if key.startswith('FreePrm')})
# add a few extra
TextTable.update({'Type':'Diffractometer type','InstrName':'Instrument Name',})
# Assemble a list of dict entries that would be labeled in the Sample
# params data window (drop ranId and items not used).
keyList = [i for i in data.keys() if i in TextTable]
keyText = [TextTable[i] for i in keyList]
# sort both lists together, ordered by keyText
keyText, keyList = zip(*sorted(list(zip(keyText,keyList)))) # sort lists
selectedKeys = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Select which sample parameters\nto copy',
'Select sample parameters', keyText)
try:
if dlg.ShowModal() == wx.ID_OK:
selectedKeys = [keyList[i] for i in dlg.GetSelections()]
finally:
dlg.Destroy()
if not selectedKeys: return # nothing to copy
copyDict = {}
for parm in selectedKeys:
copyDict[parm] = data[parm]
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample params from\n'+str(hst[5:])+' to...',
'Copy sample parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(copy.deepcopy(copyDict))
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=hst[:4],newPlot=False)
def OnSampleFlagCopy(event):
histType,copyNames = SetCopyNames(histName,data['Type'])
flagDict = {}
for parm in copyNames:
flagDict[parm] = data[parm][1]
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample ref. flags from\n'+str(hst[5:])+' to...',
'Copy sample flags', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
for name in copyNames:
sampleData[name][1] = copy.copy(flagDict[name])
finally:
dlg.Destroy()
def OnHistoChange():
'''Called when the histogram type is changed to refresh the window
'''
#wx.CallAfter(UpdateSampleGrid,G2frame,data)
wx.CallLater(100,UpdateSampleGrid,G2frame,data)
def SetNameVal():
inst = instNameVal.GetValue()
data['InstrName'] = inst.strip()
def OnNameVal(event):
event.Skip()
wx.CallAfter(SetNameVal)
def AfterChange(invalid,value,tc):
if invalid:
return
if tc.key == 0 and 'SASD' in histName: #a kluge for Scale!
G2plt.PlotPatterns(G2frame,plotType='SASD',newPlot=True)
elif tc.key == 'Thick':
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnMaterial(event):
Obj = event.GetEventObject()
Id = Info[Obj.GetId()]
data['Materials'][Id]['Name'] = Obj.GetValue()
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnVolFrac(invalid,value,tc):
Id = Info[tc.GetId()]
data['Materials'][not Id][key] = 1.-value
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnCopy1Val(event):
'Select one value to copy to many histograms and optionally allow values to be edited in a table'
G2G.SelectEdit1Var(G2frame,data,labelLst,elemKeysLst,dspLst,refFlgElem)
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def SearchAllComments(value,tc,*args,**kwargs):
'''Called when the label for a FreePrm is changed: the comments for all PWDR
histograms are searched for a "label=value" pair that matches the label (case
is ignored) and the values are then set to this value, if it can be converted
to a float.
'''
Id, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while Id:
name = G2frame.GPXtree.GetItemText(Id)
if 'PWDR' in name:
Comments = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Comments'))
Sample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id, 'Sample Parameters'))
for i,item in enumerate(Comments):
itemSp = item.split('=')
if value.lower() == itemSp[0].lower():
try:
Sample[tc.key] = float(itemSp[1])
except:
print('"{}" has an invalid value in Comments from {}'
.format(item.strip(),name))
Id, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
wx.CallLater(100,UpdateSampleGrid,G2frame,data)
######## DEBUG #######################################################
#import GSASIIpwdGUI
#reload(GSASIIpwdGUI)
#reload(G2gd)
######################################################################
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
histName = G2frame.GPXtree.GetItemText(G2frame.PatternId)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.SampleMenu)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Sample Parameters')
G2frame.Bind(wx.EVT_MENU, OnSetScale, id=G2G.wxID_SETSCALE)
G2frame.Bind(wx.EVT_MENU, OnSampleCopy, id=G2G.wxID_SAMPLECOPY)
G2frame.Bind(wx.EVT_MENU, OnSampleCopySelected, id=G2G.wxID_SAMPLECOPYSOME)
G2frame.Bind(wx.EVT_MENU, OnSampleFlagCopy, id=G2G.wxID_SAMPLEFLAGCOPY)
G2frame.Bind(wx.EVT_MENU, OnSampleSave, id=G2G.wxID_SAMPLESAVE)
G2frame.Bind(wx.EVT_MENU, OnSampleLoad, id=G2G.wxID_SAMPLELOAD)
G2frame.Bind(wx.EVT_MENU, OnCopy1Val, id=G2G.wxID_SAMPLE1VAL)
G2frame.Bind(wx.EVT_MENU, OnAllSampleLoad, id=G2G.wxID_ALLSAMPLELOAD)
G2frame.Bind(wx.EVT_MENU, OnRescaleAll, id=G2G.wxID_RESCALEALL)
if histName[:4] in ['SASD','REFD','PWDR']:
G2frame.dataWindow.SetScale.Enable(True)
Controls = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
#patch
if 'ranId' not in data:
data['ranId'] = ran.randint(0,sys.maxsize)
if not 'Gonio. radius' in data:
data['Gonio. radius'] = 200.0
if not 'Omega' in data:
data.update({'Omega':0.0,'Chi':0.0,'Phi':0.0})
if 'Azimuth' not in data:
data['Azimuth'] = 0.0
if type(data['Temperature']) is int:
data['Temperature'] = float(data['Temperature'])
if 'Time' not in data:
data['Time'] = 0.0
if 'FreePrm1' not in Controls:
Controls['FreePrm1'] = 'Sample humidity (%)'
if 'FreePrm2' not in Controls:
Controls['FreePrm2'] = 'Sample voltage (V)'
if 'FreePrm3' not in Controls:
Controls['FreePrm3'] = 'Applied load (MN)'
if 'FreePrm1' not in data:
data['FreePrm1'] = 0.
if 'FreePrm2' not in data:
data['FreePrm2'] = 0.
if 'FreePrm3' not in data:
data['FreePrm3'] = 0.
if 'SurfRoughA' not in data and 'PWDR' in histName:
data['SurfRoughA'] = [0.,False]
data['SurfRoughB'] = [0.,False]
if 'Trans' not in data and 'SASD' in histName:
data['Trans'] = 1.0
if 'SlitLen' not in data and 'SASD' in histName:
data['SlitLen'] = 0.0
if 'Shift' not in data:
data['Shift'] = [0.0,False]
if 'Transparency' not in data:
data['Transparency'] = [0.0,False]
data['InstrName'] = data.get('InstrName','')
#patch end
labelLst,elemKeysLst,dspLst,refFlgElem = [],[],[],[]
parms = SetupSampleLabels(histName,data.get('Type'),Inst['Type'][0])
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add((-1,-1),1,WACV|wx.EXPAND)
topSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Sample and Experimental Parameters'))
# add help button to bring up help web page
helpkey = G2frame.dataWindow.helpKey
topSizer.Add((30,-1))
topSizer.Add(G2G.HelpButton(G2frame.dataWindow,helpIndex=helpkey))
topSizer.Add((-1,-1),1,WACV|wx.EXPAND)
mainSizer.Add(topSizer,0,WACV|wx.EXPAND)
nameSizer = wx.BoxSizer(wx.HORIZONTAL)
nameSizer.Add(wx.StaticText(G2frame.dataWindow,wx.ID_ANY,' Instrument Name '),0,WACV)
nameSizer.Add((-1,-1),1,WACV)
instNameVal = wx.TextCtrl(G2frame.dataWindow,wx.ID_ANY,data['InstrName'],
size=(200,-1),style=wx.TE_PROCESS_ENTER)
nameSizer.Add(instNameVal)
instNameVal.Bind(wx.EVT_CHAR,OnNameVal)
mainSizer.Add(nameSizer,0,WACV)
mainSizer.Add((5,5),0)
labelLst.append('Instrument Name')
elemKeysLst.append(['InstrName'])
dspLst.append(None)
refFlgElem.append(None)
if 'PWDR' in histName:
nameSizer = wx.BoxSizer(wx.HORIZONTAL)
nameSizer.Add(wx.StaticText(G2frame.dataWindow,wx.ID_ANY,' Diffractometer type: '),
0,WACV)
if 'T' in Inst['Type'][0]:
choices = ['Debye-Scherrer',]
else:
choices = ['Debye-Scherrer','Bragg-Brentano',]
histoType = G2G.G2ChoiceButton(G2frame.dataWindow,choices,
strLoc=data,strKey='Type',
onChoice=OnHistoChange)
nameSizer.Add(histoType)
mainSizer.Add(nameSizer,0,WACV)
mainSizer.Add((5,5),0)
parmSizer = wx.FlexGridSizer(0,2,5,0)
for key,lbl,nDig in parms:
labelLst.append(lbl.strip().strip(':').strip())
dspLst.append(nDig)
if 'list' in str(type(data[key])):
parmRef = G2G.G2CheckBox(G2frame.dataWindow,' '+lbl,data[key],1)
parmSizer.Add(parmRef,0,WACV|wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[key],0,
nDig=nDig,typeHint=float,OnLeave=AfterChange)
elemKeysLst.append([key,0])
refFlgElem.append([key,1])
else:
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' '+lbl),
0,WACV|wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,key,
typeHint=float,OnLeave=AfterChange)
elemKeysLst.append([key])
refFlgElem.append(None)
parmSizer.Add(parmVal,0,WACV)
Info = {}
for key in ('FreePrm1','FreePrm2','FreePrm3'):
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,Controls,key,typeHint=str,
notBlank=False,OnLeave=SearchAllComments)
parmSizer.Add(parmVal,1,wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,key,typeHint=float)
parmSizer.Add(parmVal,0,WACV)
labelLst.append(Controls[key])
dspLst.append(None)
elemKeysLst.append([key])
refFlgElem.append(None)
mainSizer.Add(parmSizer,0)
mainSizer.Add((0,5),0)
if histName[:4] in ['SASD',]:
rho = [0.,0.]
anomrho = [0.,0.]
mu = 0.
subSizer = wx.FlexGridSizer(0,4,5,5)
Substances = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Substances'))
for Id,item in enumerate(data['Materials']):
subSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Material: '),0,WACV)
matsel = wx.ComboBox(G2frame.dataWindow,value=item['Name'],choices=list(Substances['Substances'].keys()),
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Info[matsel.GetId()] = Id
matsel.Bind(wx.EVT_COMBOBOX,OnMaterial)
subSizer.Add(matsel,0,WACV)
subSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Volume fraction: '),0,WACV)
volfrac = G2G.ValidatedTxtCtrl(G2frame.dataWindow,item,'VolFrac',
min=0.,max=1.,nDig=(10,3),typeHint=float,OnLeave=OnVolFrac)
subSizer.Add(volfrac,0,WACV)
try:
material = Substances['Substances'][item['Name']]
except KeyError:
print('ERROR - missing substance: '+item['Name'])
material = Substances['Substances']['vacuum']
mu += item['VolFrac']*material.get('XAbsorption',0.)
rho[Id] = material['Scatt density']
anomrho[Id] = material.get('XAnom density',0.)
data['Contrast'] = [(rho[1]-rho[0])**2,(anomrho[1]-anomrho[0])**2]
mainSizer.Add(subSizer,0)
conSizer = wx.BoxSizer(wx.HORIZONTAL)
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Contrast: %10.2f '%(data['Contrast'][0])),0,WACV)
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Anom. Contrast: %10.2f '%(data['Contrast'][1])),0,WACV)
mut = mu*data['Thick']
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Transmission (calc): %10.3f '%(np.exp(-mut))),0,WACV)
mainSizer.Add(conSizer,0)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Indexing Peaks
################################################################################
def UpdateIndexPeaksGrid(G2frame, data):
'''respond to selection of PWDR Index Peak List data
tree item.
'''
bravaisSymb = ['Fm3m','Im3m','Pm3m','R3-H','P6/mmm','I4/mmm',
'P4/mmm','Fmmm','Immm','Ammm','Bmmm','Cmmm','Pmmm','C2/m','P2/m','C1','P1']
IndexId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Index Peak List')
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
limitId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits')
Limits = G2frame.GPXtree.GetItemPyData(limitId)
def RefreshIndexPeaksGrid(event):
r,c = event.GetRow(),event.GetCol()
peaks = G2frame.IndexPeaksTable.GetData()
if c == 2:
peaks[r][c] = not peaks[r][c]
G2frame.IndexPeaksTable.SetData(peaks)
G2frame.indxPeaks.ForceRefresh()
if 'PKS' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2plt.PlotPowderLines(G2frame)
else:
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnReload(event):
peaks = []
sigs = []
Peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'))
for ip,peak in enumerate(Peaks['peaks']):
dsp = G2lat.Pos2dsp(Inst,peak[0])
peaks.append([peak[0],peak[2],True,False,0,0,0,dsp,0.0]) #SS?
try:
sig = Peaks['sigDict']['pos'+str(ip)]
except KeyError:
sig = 0.
sigs.append(sig)
data = [peaks,sigs]
G2frame.GPXtree.SetItemPyData(IndexId,data)
UpdateIndexPeaksGrid(G2frame,data)
def OnSave(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose Index peaks csv file', pth, '',
'indexing peaks file (*.csv)|*.csv',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
filename = os.path.splitext(filename)[0]+'.csv'
File = open(filename,'w')
names = 'h,k,l,position,intensity,d-Obs,d-calc\n'
File.write(names)
fmt = '%d,%d,%d,%.4f,%.1f,%.5f,%.5f\n'
for refl in data[0]:
if refl[3]:
File.write(fmt%(refl[4],refl[5],refl[6],refl[0],refl[1],refl[7],refl[8]))
File.close()
finally:
dlg.Destroy()
def KeyEditPickGrid(event):
colList = G2frame.indxPeaks.GetSelectedCols()
data = G2frame.GPXtree.GetItemPyData(IndexId)
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
G2frame.indxPeaks.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if G2frame.IndexPeaksTable.GetColLabelValue(col) in ['use',]:
if key == 89: #'Y'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col]=True
elif key == 78: #'N'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col]=False
elif key == 83: # 'S'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col] = not data[0][row][col]
if 'PWD' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.IndPeaksMenu)
G2frame.Bind(wx.EVT_MENU, OnReload, id=G2G.wxID_INDXRELOAD)
G2frame.Bind(wx.EVT_MENU, OnSave, id=G2G.wxID_INDEXSAVE)
G2frame.dataWindow.IndexPeaks.Enable(False)
G2frame.IndexPeaksTable = []
if len(data[0]):
G2frame.dataWindow.IndexPeaks.Enable(True)
Unit = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List'))
if Unit:
if len(Unit) == 4: #patch
Unit.append({})
if len(Unit) == 5: #patch
Unit.append({})
controls,bravais,cellist,dmin,ssopt,magcells = Unit
if 'T' in Inst['Type'][0]: #TOF - use other limit!
dmin = G2lat.Pos2dsp(Inst,Limits[1][0])
else:
dmin = G2lat.Pos2dsp(Inst,Limits[1][1])
G2frame.HKL = []
if ssopt.get('Use',False):
cell = controls[6:12]
A = G2lat.cell2A(cell)
ibrav = bravaisSymb.index(controls[5])
spc = controls[13]
SGData = G2spc.SpcGroup(spc)[1]
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'])[1]
Vec = ssopt['ModVec']
maxH = ssopt['maxH']
G2frame.HKL = G2pwd.getHKLMpeak(dmin,Inst,SGData,SSGData,Vec,maxH,A)
G2frame.HKL = np.array(G2frame.HKL)
data[0] = G2indx.IndexSSPeaks(data[0],G2frame.HKL)[1]
else: #select cell from table - no SS
for i,cell in enumerate(cellist):
if cell[-2]:
ibrav = cell[2]
A = G2lat.cell2A(cell[3:9])
G2frame.HKL = G2lat.GenHBravais(dmin,ibrav,A)
for hkl in G2frame.HKL:
hkl.insert(4,G2lat.Dsp2pos(Inst,hkl[3]))
G2frame.HKL = np.array(G2frame.HKL)
data[0] = G2indx.IndexPeaks(data[0],G2frame.HKL)[1]
break
rowLabels = []
for i in range(len(data[0])): rowLabels.append(str(i+1))
colLabels = ['position','intensity','use','indexed','h','k','l','d-obs','d-calc']
Types = [wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_FLOAT+':10,1',]+2*[wg.GRID_VALUE_BOOL,]+ \
3*[wg.GRID_VALUE_LONG,]+2*[wg.GRID_VALUE_FLOAT+':10,5',]
if len(data[0]) and len(data[0][0]) > 9:
colLabels = ['position','intensity','use','indexed','h','k','l','m','d-obs','d-calc']
Types = [wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_FLOAT+':10,1',]+2*[wg.GRID_VALUE_BOOL,]+ \
4*[wg.GRID_VALUE_LONG,]+2*[wg.GRID_VALUE_FLOAT+':10,5',]
G2frame.GPXtree.SetItemPyData(IndexId,data)
G2frame.IndexPeaksTable = G2G.Table(data[0],rowLabels=rowLabels,colLabels=colLabels,types=Types)
G2frame.dataWindow.currentGrids = []
G2frame.indxPeaks = G2G.GSGrid(parent=G2frame.dataWindow)
G2frame.indxPeaks.SetTable(G2frame.IndexPeaksTable, True)
G2frame.indxPeaks.SetScrollRate(10,10)
XY = []
Sigs = []
for r in range(G2frame.indxPeaks.GetNumberRows()):
for c in range(G2frame.indxPeaks.GetNumberCols()):
if c == 2:
G2frame.indxPeaks.SetReadOnly(r,c,isReadOnly=False)
else:
G2frame.indxPeaks.SetReadOnly(r,c,isReadOnly=True)
if data[0][r][2] and data[0][r][3]:
XY.append([data[0][r][-1],data[0][r][0]])
try:
sig = data[1][r]
except IndexError:
sig = 0.
Sigs.append(sig)
G2frame.indxPeaks.Bind(wg.EVT_GRID_CELL_LEFT_CLICK, RefreshIndexPeaksGrid)
G2frame.indxPeaks.Bind(wx.EVT_KEY_DOWN, KeyEditPickGrid)
G2frame.indxPeaks.AutoSizeColumns(False)
if len(XY):
XY = np.array(XY)
G2plt.PlotCalib(G2frame,Inst,XY,Sigs,newPlot=True)
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(G2frame.indxPeaks,0,wx.ALL|wx.EXPAND,1)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Unit cells
################################################################################
def UpdateUnitCellsGrid(G2frame, data):
'''respond to selection of PWDR Unit Cells data tree item.
'''
G2frame.ifGetExclude = False
UnitCellsId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List')
SPGlist = G2spc.spglist
bravaisSymb = ['Fm3m','Im3m','Pm3m','R3-H','P6/mmm','I4/mmm','P4/mmm',
'Fmmm','Immm','Ammm','Bmmm','Cmmm','Pmmm','I2/m','C2/m','P2/m','P1','C1']
spaceGroups = ['F m 3 m','I m 3 m','P m 3 m','R 3 m','P 6/m m m','I 4/m m m',
'P 4/m m m','F m m m','I m m m','A m m m','B m m m','C m m m','P m m m','I 2/m','C 2/m','P 2/m','P -1','C -1']
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))[1]
if 'C' in Inst['Type'][0] or 'PKS' in Inst['Type'][0]:
wave = G2mth.getWave(Inst)
dmin = G2lat.Pos2dsp(Inst,Limits[1])
else:
difC = Inst['difC'][1]
dmin = G2lat.Pos2dsp(Inst,Limits[0])
def SetLattice(controls):
ibrav = bravaisSymb.index(controls[5])
if controls[5] in ['Fm3m','Im3m','Pm3m']:
controls[7] = controls[8] = controls[6]
controls[9] = controls[10] = controls[11] = 90.
elif controls[5] in ['R3m','P6/mmm','I4/mmm','P4/mmm']:
controls[7] = controls[6]
controls[9] = controls[10] = controls[11] = 90.
if controls[5] in ['R3-H','P6/mmm']:
controls[11] = 120.
elif controls[5] in ['Fmmm','Immm','Ammm','Bmmm','Cmmm','Pmmm']:
controls[9] = controls[10] = controls[11] = 90.
elif controls[5] in ['C2/m','P2/m','I2/m']:
controls[9] = controls[11] = 90. # b unique
controls[12] = G2lat.calc_V(G2lat.cell2A(controls[6:12]))
return ibrav
def OnNcNo(event):
controls[2] = NcNo.GetValue()
def OnIfX20(event):
G2frame.ifX20 = x20.GetValue()
def OnBravais(event):
Obj = event.GetEventObject()
bravais[bravList.index(Obj.GetId())] = Obj.GetValue()
# wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnZeroVar(event):
controls[0] = zeroVar.GetValue()
def OnSSopt(event):
if controls[5] in ['Fm3m','Im3m','Pm3m']:
SSopt.SetValue(False)
G2frame.ErrorDialog('Cubic lattice','Incommensurate superlattice not possible with a cubic lattice')
return
ssopt['Use'] = SSopt.GetValue()
if 'ssSymb' not in ssopt:
ssopt.update({'ssSymb':'(abg)','ModVec':[0.1,0.1,0.1],'maxH':1})
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnSelMG(event):
ssopt['ssSymb'] = selMG.GetValue()
Vec = ssopt['ModVec']
modS = G2spc.splitSSsym(ssopt['ssSymb'])[0]
ssopt['ModVec'] = G2spc.SSGModCheck(Vec,modS)[0]
print (' Selecting: '+controls[13]+ssopt['ssSymb']+ 'maxH:'+str(ssopt['maxH']))
OnHklShow(event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnModVal(invalid,value,tc):
OnHklShow(tc.event)
def OnMoveMod(event):
Obj = event.GetEventObject()
ObjId = Obj.GetId()
Id,valObj = Indx[ObjId]
move = Obj.GetValue()*0.01
Obj.SetValue(0)
value = min(0.98,max(-0.98,float(valObj.GetValue())+move))
valObj.SetValue('%.4f'%(value))
ssopt['ModVec'][Id] = value
OnHklShow(event)
def OnMaxMH(event):
ssopt['maxH'] = int(maxMH.GetValue())
print (' Selecting: '+controls[13]+ssopt['ssSymb']+'maxH:'+str(ssopt['maxH']))
OnHklShow(event)
def OnButton(xpos,ypos):
modSym = ssopt['ssSymb'].split(')')[0]+')'
if modSym in ['(a0g)','(a1/2g)']:
ssopt['ModVec'][0] = xpos
ssopt['ModVec'][2] = ypos
elif modSym in ['(0bg)','(1/2bg)']:
ssopt['ModVec'][1] = xpos
ssopt['ModVec'][2] = ypos
elif modSym in ['(ab0)','(ab1/2)']:
ssopt['ModVec'][0] = xpos
ssopt['ModVec'][1] = ypos
vec = ssopt['ModVec']
print(' Trying: %s %s modulation vector = %.3f %.3f %.3f'%(controls[13],ssopt['ssSymb'],vec[0],vec[1],vec[2]))
OnHklShow(None)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnFindOneMV(event):
Peaks = np.copy(peaks[0])
print (' Trying: ',controls[13],ssopt['ssSymb'], ' maxH: 1')
dlg = wx.ProgressDialog('Elapsed time','Modulation vector search',
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE)
try:
ssopt['ModVec'],result = G2indx.findMV(Peaks,controls,ssopt,Inst,dlg)
if len(result[0]) == 2:
G2plt.PlotXYZ(G2frame,result[2],1./result[3],labelX='a',labelY='g',
newPlot=True,Title='Modulation vector search',buttonHandler=OnButton)
finally:
dlg.Destroy()
OnHklShow(event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnFindMV(event):
best = 1.
bestSS = ''
for ssSym in ssChoice:
ssopt['ssSymb'] = ssSym
Peaks = np.copy(peaks[0])
ssopt['ModVec'] = G2spc.SSGModCheck(ssopt['ModVec'],G2spc.splitSSsym(ssSym)[0],True)[0]
print (' Trying: '+controls[13]+ssSym+ ' maxH: 1')
ssopt['ModVec'],result = G2indx.findMV(Peaks,controls,ssopt,Inst,dlg=None)
OnHklShow(event)
if result[1] < best:
bestSS = ssSym
best = result[1]
ssopt['ssSymb'] = bestSS
ssopt['ModVec'],result = G2indx.findMV(Peaks,controls,ssopt,Inst,dlg=None)
if len(result[0]) == 2:
G2plt.PlotXYZ(G2frame,result[2],1./result[3],labelX='a',labelY='g',
newPlot=True,Title='Modulation vector search')
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnBravSel(event):
brav = bravSel.GetString(bravSel.GetSelection())
controls[5] = brav
controls[13] = SPGlist[brav][0]
ssopt['Use'] = False
wx.CallLater(100,UpdateUnitCellsGrid,G2frame,data)
def OnSpcSel(event):
controls[13] = spcSel.GetString(spcSel.GetSelection())
ssopt['SGData'] = G2spc.SpcGroup(controls[13])[1]
ssopt['Use'] = False
G2frame.dataWindow.RefineCell.Enable(True)
OnHklShow(event)
wx.CallLater(100,UpdateUnitCellsGrid,G2frame,data)
def SetCellValue(Obj,ObjId,value):
if controls[5] in ['Fm3m','Im3m','Pm3m']:
controls[6] = controls[7] = controls[8] = value
controls[9] = controls[10] = controls[11] = 90.0
Obj.SetValue(controls[6])
elif controls[5] in ['R3-H','P6/mmm','I4/mmm','P4/mmm']:
if ObjId == 0:
controls[6] = controls[7] = value
Obj.SetValue(controls[6])
else:
controls[8] = value
Obj.SetValue(controls[8])
controls[9] = controls[10] = controls[11] = 90.0
if controls[5] in ['R3-H','P6/mmm']:
controls[11] = 120.
elif controls[5] in ['Fmmm','Immm','Cmmm','Pmmm']:
controls[6+ObjId] = value
Obj.SetValue(controls[6+ObjId])
controls[9] = controls[10] = controls[11] = 90.0
elif controls[5] in ['I2/m','C2/m','P2/m']:
controls[9] = controls[11] = 90.0
if ObjId != 3:
controls[6+ObjId] = value
Obj.SetValue(controls[6+ObjId])
else:
controls[10] = value
Obj.SetValue(controls[10])
else:
controls[6+ObjId] = value
if ObjId < 3:
Obj.SetValue(controls[6+ObjId])
else:
Obj.SetValue(controls[6+ObjId])
controls[12] = G2lat.calc_V(G2lat.cell2A(controls[6:12]))
volVal.SetValue("%.3f"%(controls[12]))
def OnMoveCell(event):
Obj = event.GetEventObject()
ObjId = cellList.index(Obj.GetId())
valObj = valDict[Obj.GetId()]
inc = float(shiftChoices[shiftSel.GetSelection()][:-1])
move = Obj.GetValue() # +1 or -1
Obj.SetValue(0)
value = float(valObj.GetValue()) * (1. + move*inc/100.)
SetCellValue(valObj,ObjId//2,value)
OnHklShow(event)
def OnExportCells(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose Indexing Result csv file', pth, '',
'indexing result file (*.csv)|*.csv',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
filename = os.path.splitext(filename)[0]+'.csv'
File = open(filename,'w')
names = 'M20,X20,Bravais,a,b,c,alpha,beta,gamma,volume\n'
File.write(names)
fmt = '%.2f,%d,%s,%.4f,%.4f,%.4f,%.2f,%.2f,%.2f,%.3f\n'
for cell in cells:
File.write(fmt%(cell[0],cell[1],bravaisSymb[cell[2]], cell[3],cell[4],cell[5], cell[6],cell[7],cell[8],cell[9]))
File.close()
finally:
dlg.Destroy()
def OnCellChange(invalid,value,tc):
if invalid:
return
SetCellValue(tc,Info[tc.GetId()],value)
OnHklShow(tc.event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnHklShow(event):
PatternId = G2frame.PatternId
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Index Peak List'))
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Unit Cells List'))
# recompute dmin in case limits were changed
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))[1]
if 'C' in Inst['Type'][0] or 'PKS' in Inst['Type'][0]:
dmin = G2lat.Pos2dsp(Inst,Limits[1])
else:
dmin = G2lat.Pos2dsp(Inst,Limits[0])
cell = controls[6:12]
A = G2lat.cell2A(cell)
spc = controls[13]
SGData = ssopt.get('SGData',G2spc.SpcGroup(spc)[1])
Symb = SGData['SpGrp']
M20 = X20 = 0.
if ssopt.get('Use',False) and ssopt.get('ssSymb',''):
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'])[1]
if SSGData is None:
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'][:-1])[1] #skip trailing 's' for mag.
Symb = SSGData['SSpGrp']
Vec = ssopt['ModVec']
maxH = ssopt['maxH']
G2frame.HKL = G2pwd.getHKLMpeak(dmin,Inst,SGData,SSGData,Vec,maxH,A)
if len(peaks[0]):
peaks = [G2indx.IndexSSPeaks(peaks[0],G2frame.HKL)[1],peaks[1]] #keep esds from peak fit
M20,X20 = G2indx.calc_M20SS(peaks[0],G2frame.HKL)
else:
G2frame.HKL = G2pwd.getHKLpeak(dmin,SGData,A,Inst)
if len(peaks[0]):
peaks = [G2indx.IndexPeaks(peaks[0],G2frame.HKL)[1],peaks[1]] #keep esds from peak fit
M20,X20 = G2indx.calc_M20(peaks[0],G2frame.HKL)
G2frame.HKL =
|
np.array(G2frame.HKL)
|
numpy.array
|
# coding=UTF-8
"""
Description:
Author: <NAME> (<EMAIL>)
Date: 2021-06-06 18:22:16
LastEditors: <NAME> (<EMAIL>)
LastEditTime: 2021-06-06 18:22:29
"""
from typing import List
import numpy as np
import torch
from pyutils.compute import batch_diag, batch_eye_cpu
from pyutils.general import logger, profile
from scipy.stats import ortho_group
from torch.types import Device
try:
import matrix_parametrization_cuda
except ImportError as e:
logger.warning("Cannot import matrix_parametrization_cuda. Decomposers can only work on CPU mode")
__all__ = [
"RealUnitaryDecomposerBatch",
"ComplexUnitaryDecomposerBatch",
]
class RealUnitaryDecomposerBatch(object):
timer = False
def __init__(
self,
min_err: float = 1e-7,
timer: bool = False,
determine: bool = False,
alg: str = "reck",
dtype=np.float64,
) -> None:
self.min_err = min_err
self.timer = timer
self.determine = determine
assert alg.lower() in {"reck", "clements", "francis"}, logger.error(
f"Unitary decomposition algorithm can only be [reck, clements, francis], but got {alg}"
)
self.set_alg(alg)
self.dtype = dtype
def set_alg(self, alg):
assert alg.lower() in {"reck", "clements", "francis"}, logger.error(
f"Unitary decomposition algorithm can only be [reck, clements, francis], but got {alg}"
)
self.alg = alg
def build_plane_unitary(self, p, q, phi, N, transpose=True):
assert N > 0 and isinstance(N, int), "[E] Matrix size must be positive integer"
assert (
isinstance(p, int) and isinstance(q, int) and 0 <= p < q < N
), "[E] Integer value p and q must satisfy p < q"
assert isinstance(phi, float) or isinstance(phi, int), "[E] Value phi must be of type float or int"
U = np.eye(N)
c = np.cos(phi)
s = np.sin(phi)
U[p, p] = U[q, q] = c
U[q, p] = s if not transpose else -s
U[p, q] = -s if not transpose else s
return U
def cal_phi_batch_determine(self, u1: np.ndarray, u2: np.ndarray, is_first_col=False) -> np.ndarray:
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
cond1 = u1_abs < min_err
cond2 = u2_abs < min_err
cond1_n = ~cond1
cond2_n = ~cond2
if is_first_col:
phi = np.where(
cond1 & cond2,
0,
np.where(
cond1_n & cond2,
np.where(u1 > min_err, 0, -pi),
np.where(
cond1 & cond2_n, np.where(u2 > min_err, -0.5 * pi, 0.5 * pi), np.arctan2(-u2, u1)
),
),
)
else:
phi = np.where(
cond1 & cond2,
0,
np.where(
cond1_n & cond2,
np.where(u1 > min_err, 0, -pi),
np.where(
cond1 & cond2_n, np.where(u2 > min_err, -0.5 * pi, 0.5 * pi), np.arctan(-u2 / u1)
),
),
)
return phi
def cal_phi_batch_nondetermine(self, u1: np.ndarray, u2: np.ndarray, is_first_col=False) -> np.ndarray:
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
cond1 = u1_abs < min_err
cond2 = u2_abs < min_err
cond1_n = ~cond1
cond2_n = ~cond2
phi = np.where(
cond1 & cond2,
0,
np.where(
cond1_n & cond2,
np.where(u1 > min_err, 0, -pi),
np.where(cond1 & cond2_n, np.where(u2 > min_err, -0.5 * pi, 0.5 * pi), np.arctan2(-u2, u1)),
),
)
return phi
def cal_phi_determine(self, u1, u2, is_first_col=False):
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
if u1_abs < min_err and u2_abs < min_err:
phi = 0
elif u1_abs >= min_err and u2_abs < min_err:
phi = 0 if u1 > min_err else -pi
elif u1_abs < min_err and u2_abs >= min_err:
phi = -0.5 * pi if u2 > min_err else 0.5 * pi
else:
# solve the equation: u'_1n=0
if is_first_col:
phi = np.arctan2(-u2, u1) # 4 quadrant4
else:
phi = np.arctan(-u2 / u1)
return phi
def cal_phi_nondetermine(self, u1, u2):
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
if u1_abs < min_err and u2_abs < min_err:
phi = 0
elif u1_abs >= min_err and u2_abs < min_err:
phi = 0 if u1 > min_err else -pi
elif u1_abs < min_err and u2_abs >= min_err:
phi = -0.5 * pi if u2 > min_err else 0.5 * pi
else:
# solve the equation: u'_1n=0
phi = np.arctan2(-u2, u1) # 4 quadrant4
return phi
def decompose_kernel_batch(self, U: np.ndarray, dim, phi_list=None):
"""return U^(N-1); (phi_1,...,phi_N-2); (sigma_1,...,sigma_N-2)"""
N = U.shape[-1]
if phi_list is None:
phi_list = np.zeros(list(U.shape[:-2]) + [dim], dtype=np.float64)
calPhi_batch = self.cal_phi_batch_determine if self.determine else self.cal_phi_batch_nondetermine
for i in range(N - 1):
u1, u2 = U[..., 0, 0], U[..., 0, N - 1 - i]
phi = calPhi_batch(u1, u2, is_first_col=(i == 0))
phi_list[..., i] = phi
p, q = 0, N - i - 1
c, s = np.cos(phi)[..., np.newaxis], np.sin(phi)[..., np.newaxis]
col_p, col_q = U[..., :, p], U[..., :, q]
U[..., :, p], U[..., :, q] = col_p * c - col_q * s, col_p * s + col_q * c
return U, phi_list
def decompose_kernel_determine(self, U, phi_list):
"""return U^(N-1); (phi_1,...,phi_N-2); (sigma_1,...,sigma_N-2)"""
N = U.shape[0]
for i in range(N - 1):
u1, u2 = U[0, 0], U[0, N - 1 - i]
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
if u1_abs < min_err and u2_abs < min_err:
phi = 0
elif u1_abs >= min_err and u2_abs < min_err:
phi = 0 if u1 > min_err else -pi
elif u1_abs < min_err and u2_abs >= min_err:
phi = -0.5 * pi if u2 > min_err else 0.5 * pi
else:
# solve the equation: u'_1n=0
if i == 0:
phi = np.arctan2(-u2, u1) # 4 quadrant4
else:
phi = np.arctan(-u2 / u1)
phi_list[i] = phi
p, q = 0, N - i - 1
c, s = np.cos(phi), np.sin(phi)
row_p, row_q = U[:, p], U[:, q]
row_p_cos, row_p_sin = row_p * c, row_p * s
row_q_cos, row_q_sin = row_q * c, row_q * s
U[:, p], U[:, q] = row_p_cos - row_q_sin, row_q_cos + row_p_sin
return U, phi_list
def decompose_kernel_nondetermine(self, U, phi_list):
"""return U^(N-1); (phi_1,...,phi_N-2); (sigma_1,...,sigma_N-2)"""
N = U.shape[0]
pi = np.pi
half_pi = np.pi / 2
min_err = self.min_err
for i in range(N - 1):
# with TimerCtx() as t:
u1, u2 = U[0, 0], U[0, N - 1 - i]
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
cond1, cond2 = u1_abs >= min_err, u2_abs >= min_err
if cond1 & cond2:
phi = np.arctan2(-u2, u1)
elif ~cond1 & cond2:
phi = -half_pi if u2 > min_err else half_pi
elif cond1 & ~cond2:
phi = 0 if u1 > min_err else -pi
else:
phi = 0
phi_list[i] = phi
p, q = 0, N - i - 1
c = np.cos(phi)
s = (1 - c * c) ** 0.5 if phi > 0 else -((1 - c * c) ** 0.5)
row_p, row_q = U[:, p], U[:, q]
row_p_cos, row_p_sin = row_p * c, row_p * s
row_q_cos, row_q_sin = row_q * c, row_q * s
U[:, p], U[:, q] = row_p_cos - row_q_sin, row_q_cos + row_p_sin
return U, phi_list
@profile(timer=timer)
def decompose_francis_cpu(self, U):
#### This decomposition has follows the natural reflection of MZIs. Thus the circuit will give a reversed output.
### Francis style, 1962
N = U.shape[0]
assert N > 0 and U.shape[0] == U.shape[1], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros([N, N], dtype=self.dtype)
delta_list = np.zeros(N, dtype=self.dtype)
decompose_kernel = (
self.decompose_kernel_determine if self.determine else self.decompose_kernel_nondetermine
)
for i in range(N - 1):
U, _ = decompose_kernel(U, phi_list=phi_mat[i, :])
delta_list[i] = U[0, 0]
U = U[1:, 1:]
else:
delta_list[-1] = U[-1, -1]
return delta_list, phi_mat
@profile(timer=timer)
def decompose_francis_batch(self, U: np.ndarray):
N = U.shape[-1]
assert N > 0 and U.shape[-1] == U.shape[-2], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros(U.shape, dtype=np.float64)
delta_list = np.zeros(U.shape[:-1], dtype=np.float64)
for i in range(N - 1):
U, _ = self.decompose_kernel_batch(U, dim=N, phi_list=phi_mat[..., i, :])
delta_list[..., i] = U[..., 0, 0]
U = U[..., 1:, 1:]
else:
delta_list[..., -1] = U[..., -1, -1]
return delta_list, phi_mat
def decompose_francis(self, U):
if isinstance(U, np.ndarray):
if len(U.shape) == 2:
return self.decompose_francis_cpu(U)
else:
return self.decompose_francis_batch(U)
else:
if U.is_cuda:
N = U.size(-1)
size = U.size()
U = U.view(-1, N, N).contiguous()
delta_list = torch.zeros(list(U.size())[:-1], dtype=U.dtype, device=U.device).contiguous()
phi_mat = torch.zeros_like(U).contiguous()
matrix_parametrization_cuda.decompose_francis(U, delta_list, phi_mat)
delta_list = delta_list.view(list(size)[:-1])
phi_mat = phi_mat.view(size)
return delta_list, phi_mat
else:
if U.ndim == 2:
return torch.from_numpy(self.decompose_francis_cpu(U.cpu().numpy()))
else:
return torch.from_numpy(self.decompose_francis_batch(U.cpu().numpy()))
@profile(timer=timer)
def decompose_reck_cpu(self, U):
"""Reck decomposition implemented by Neurophox. Triangular mesh, input and output have no mirroring effects, i.e, [x1, ..., xn] -> Y = U x X -> [y1, ..., yn]
Rmn: [ cos(phi) -sin(phi)] -> MZI achieves counter-clock-wise rotation with phi (reconstruction, left mul)
[ sin(phi) cos(phi)]
Rmn*:[ cos(phi) sin(phi)] -> column-wise clock-wise rotation (decompose, right mul)
[-sin(phi) cos(phi)]
U = D R43 R32 R43 R21 R32 R43
"""
N = U.shape[0]
assert N > 0 and U.shape[0] == U.shape[1], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros([N, N], dtype=self.dtype) ## left upper triangular array.
"""
the bottom-left phase corresponds to the MZI at the bottom-left corner.
The decomposition ordering follows from bottom to top, from left to right.
R21 R32 R43 0
R32 R43 0 0
R43 0 0 0
0 0 0 0
"""
delta_list = np.zeros(N, dtype=self.dtype) ## D
"""
x x x 0 x x 0 0
x x x x -> x x x 0
x x x x x x x x
x x x x x x x x
"""
for i in range(N - 1):
### each outer loop deals with one off-diagonal, nullification starts from top-right
### even loop for column rotation
for j in range(i + 1):
### let p, q be the indices for the nullified '0'
p = j ## row
q = N - 1 - i + j ## col
### rotate two columns such that u2 is nullified to 0
pi = np.pi
half_pi = np.pi / 2
min_err = self.min_err
### col q-1 nullifies col q
u1, u2 = U[p, q - 1], U[p, q]
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
cond1, cond2 = u1_abs >= min_err, u2_abs >= min_err
if cond1 & cond2:
phi = np.arctan2(-u2, u1)
elif ~cond1 & cond2:
phi = -half_pi if u2 > min_err else half_pi
elif cond1 & ~cond2:
phi = 0 if u1 > min_err else -pi
else:
phi = 0
# phi_mat[p,q] = phi
# theta_checkerboard[pairwise_index, -j - 1] = phi
phi_mat[N - i - 2, j] = phi
c, s = np.cos(phi), np.sin(phi)
## q_m1 means q-1; right multiply by R*
col_q_m1, col_q = U[p:, q - 1], U[p:, q]
col_q_m1_cos, col_q_m1_sin = col_q_m1 * c, col_q_m1 * s
col_q_cos, col_q_sin = col_q * c, col_q * s
U[p:, q - 1], U[p:, q] = col_q_m1_cos - col_q_sin, col_q_cos + col_q_m1_sin
delta_list = np.diag(
U
) ## only the first and last element can be 1 or -1, the rest elements are all 1. This feature can be used in fast forward/reconstruction
return delta_list, phi_mat
@profile(timer=timer)
def decompose_reck_batch(self, U):
"""Reck decomposition implemented by Neurophox. Triangular mesh, input and output have no mirroring effects, i.e, [x1, ..., xn] -> Y = U x X -> [y1, ..., yn]
Rmn: [ cos(phi) -sin(phi)] -> MZI achieves counter-clock-wise rotation with phi (reconstruction, left mul)
[ sin(phi) cos(phi)]
Rmn*:[ cos(phi) sin(phi)] -> column-wise clock-wise rotation (decompose, right mul)
[-sin(phi) cos(phi)]
U = D R43 R32 R43 R21 R32 R43
"""
N = U.shape[-1]
assert N > 0 and U.shape[-1] == U.shape[-2], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros(U.shape, dtype=self.dtype) ## left upper triangular array.
"""
the bottom-left phase corresponds to the MZI at the bottom-left corner.
The decomposition ordering follows from bottom to top, from left to right.
R21 R32 R43 0
R32 R43 0 0
R43 0 0 0
0 0 0 0
"""
delta_list = np.zeros(U.shape[:-1], dtype=self.dtype) ## D
"""
x x x 0 x x 0 0
x x x x -> x x x 0
x x x x x x x x
x x x x x x x x
"""
for i in range(N - 1):
### each outer loop deals with one off-diagonal, nullification starts from top-right
### even loop for column rotation
for j in range(i + 1):
### let p, q be the indices for the nullified '0'
p = j ## row
q = N - 1 - i + j ## col
### rotate two columns such that u2 is nullified to 0
### col q-1 nullifies col q
u1, u2 = U[..., p, q - 1], U[..., p, q]
phi = self.cal_phi_batch_nondetermine(u1, u2)
phi_mat[..., N - i - 2, j] = phi
c, s = np.cos(phi)[..., np.newaxis], np.sin(phi)[..., np.newaxis]
## q_m1 means q-1; right multiply by R*
col_q_m1, col_q = U[..., p:, q - 1], U[..., p:, q]
col_q_m1_cos, col_q_m1_sin = col_q_m1 * c, col_q_m1 * s
col_q_cos, col_q_sin = col_q * c, col_q * s
U[..., p:, q - 1], U[..., p:, q] = col_q_m1_cos - col_q_sin, col_q_cos + col_q_m1_sin
delta_list = batch_diag(U)
return delta_list, phi_mat
def decompose_reck(self, U):
if isinstance(U, np.ndarray):
if len(U.shape) == 2:
return self.decompose_reck_cpu(U)
else:
return self.decompose_reck_batch(U)
else:
if U.is_cuda:
N = U.size(-1)
size = U.size()
U = U.view(-1, N, N).contiguous()
delta_list = torch.zeros(list(U.size())[:-1], dtype=U.dtype, device=U.device).contiguous()
phi_mat = torch.zeros_like(U).contiguous()
matrix_parametrization_cuda.decompose_reck(U, delta_list, phi_mat)
delta_list = delta_list.view(list(size)[:-1])
phi_mat = phi_mat.view(size)
return delta_list, phi_mat
else:
if U.ndim == 2:
return torch.from_numpy(self.decompose_reck_cpu(U.cpu().numpy()))
else:
return torch.from_numpy(self.decompose_reck_batch(U.cpu().numpy()))
@profile(timer=timer)
def decompose_clements_cpu(self, U):
"""clements Optica 2018 unitary decomposition
Tmn: [e^iphi x cos(theta) -sin(theta)]
[e^iphi x sin(theta) cos(theta)]
phi DC 2 theta DC ---
--- DC ------- DC ---
T45 T34 T23 T12 T45 T34 U T12* T34* T23* T12 = D
U=D T34 T45 T12 T23 T34 T45 T12 T23 T34 T12"""
N = U.shape[0]
assert N > 0 and U.shape[0] == U.shape[1], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros(
[N, N], dtype=self.dtype
) ## theta checkerboard that maps to the real MZI mesh layout, which is efficient for parallel reconstruction col-by-col.
for i in range(N - 1):
### each outer loop deals with one off-diagonal
## even loop for column rotation
if i % 2 == 0:
for j in range(i + 1):
### let p, q be the indices for the nullified '0'
p = N - 1 - j ## row
q = i - j ## col
### rotate two columns such that u2 is nullified to 0
pi = np.pi
half_pi = np.pi / 2
min_err = self.min_err
u1, u2 = U[p, q + 1], U[p, q]
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
cond1, cond2 = u1_abs >= min_err, u2_abs >= min_err
if cond1 & cond2:
phi = np.arctan2(-u2, u1)
elif ~cond1 & cond2:
phi = -half_pi if u2 > min_err else half_pi
elif cond1 & ~cond2:
phi = 0 if u1 > min_err else -pi
else:
phi = 0
phi = (
-phi
) ### simply convert the solved theta from T to T*, it is easier than changing the solving procedure
# phi_mat[p,q] = phi
pairwise_index = i - j
# theta_checkerboard[pairwise_index, -j - 1] = phi
phi_mat[pairwise_index, j] = phi
c, s = np.cos(phi), np.sin(phi)
## q_p1 means q+1; right multiply by T*
col_q_p1, col_q = U[: p + 1, q + 1], U[: p + 1, q]
col_q_p1_cos, col_q_p1_sin = col_q_p1 * c, col_q_p1 * s
col_q_cos, col_q_sin = col_q * c, col_q * s
U[: p + 1, q + 1], U[: p + 1, q] = col_q_p1_cos + col_q_sin, col_q_cos - col_q_p1_sin
else:
## odd loop for row rotation
for j in range(i + 1):
p = N - 1 - i + j
q = j
### rotate two rows such that u2 is nullified to 0
pi = np.pi
half_pi = np.pi / 2
min_err = self.min_err
u1, u2 = U[p - 1, q], U[p, q]
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
cond1, cond2 = u1_abs >= min_err, u2_abs >= min_err
if cond1 & cond2:
phi = np.arctan2(-u2, u1)
elif ~cond1 & cond2:
phi = -half_pi if u2 > min_err else half_pi
elif cond1 & ~cond2:
phi = 0 if u1 > min_err else -pi
else:
phi = 0
# phi_mat[p,q] = phi
pairwise_index = N + j - i - 2
# theta_checkerboard[pairwise_index, j] = phi
phi_mat[
pairwise_index, N - 1 - j
] = (
-phi
) ### from T* to T, consistent with propogation through MZI (T) see clements paper Eq.(4)
c, s = np.cos(phi), np.sin(phi)
## p_1 means p - 1; left multiply by T
row_p_1, row_p = U[p - 1, j:], U[p, j:]
row_p_1_cos, row_p_1_sin = row_p_1 * c, row_p_1 * s
row_p_cos, row_p_sin = row_p * c, row_p * s
U[p - 1, j:], U[p, j:] = row_p_1_cos - row_p_sin, row_p_cos + row_p_1_sin
delta_list = np.diag(
U
) ## only the first and last element can be 1 or -1, the rest elements are all 1. This feature can be used in fast forward/reconstruction
delta_list.setflags(write=True)
return delta_list, phi_mat
@profile(timer=timer)
def decompose_clements_batch(self, U):
N = U.shape[-1]
assert N > 0 and U.shape[-1] == U.shape[-2], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros(U.shape, dtype=np.float64)
delta_list = np.zeros(U.shape[:-1], dtype=np.float64)
for i in range(N - 1):
### each outer loop deals with one off-diagonal
## even loop for column rotation
if i % 2 == 0:
for j in range(i + 1):
### let p, q be the indices for the nullified '0'
p = N - 1 - j ## row
q = i - j ## col
### rotate two columns such that u2 is nullified to 0
pi = np.pi
# half_pi = np.pi / 2
min_err = self.min_err
u1, u2 = U[..., p : p + 1, q + 1], U[..., p : p + 1, q]
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
cond1 = u1_abs < min_err
cond2 = u2_abs < min_err
cond1_n = ~cond1
cond2_n = ~cond2
phi = np.where(
cond1 & cond2,
0,
np.where(
cond1_n & cond2,
np.where(u1 > min_err, 0, -pi),
np.where(
cond1 & cond2_n,
np.where(u2 > min_err, -0.5 * pi, 0.5 * pi),
np.arctan2(-u2, u1),
),
),
)
phi = (
-phi
) ### simply convert the solved theta from T to T*, it is easier than changing the solving procedure
# phi_mat[p,q] = phi
pairwise_index = i - j
# theta_checkerboard[pairwise_index, -j - 1] = phi
phi_mat[..., pairwise_index, j] = phi[..., 0]
c, s = np.cos(phi), np.sin(phi)
## q_p1 means q+1; right multiply by T*
col_q_p1, col_q = U[..., : p + 1, q + 1], U[..., : p + 1, q]
col_q_p1_cos, col_q_p1_sin = col_q_p1 * c, col_q_p1 * s
col_q_cos, col_q_sin = col_q * c, col_q * s
U[..., : p + 1, q + 1], U[..., : p + 1, q] = (
col_q_p1_cos + col_q_sin,
col_q_cos - col_q_p1_sin,
)
else:
## odd loop for row rotation
for j in range(i + 1):
p = N - 1 - i + j
q = j
### rotate two rows such that u2 is nullified to 0
pi = np.pi
min_err = self.min_err
u1, u2 = U[..., p - 1, q : q + 1], U[..., p, q : q + 1]
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
cond1 = u1_abs < min_err
cond2 = u2_abs < min_err
cond1_n = ~cond1
cond2_n = ~cond2
phi = np.where(
cond1 & cond2,
0,
np.where(
cond1_n & cond2,
np.where(u1 > min_err, 0, -pi),
np.where(
cond1 & cond2_n,
np.where(u2 > min_err, -0.5 * pi, 0.5 * pi),
np.arctan2(-u2, u1),
),
),
)
pairwise_index = N + j - i - 2
# theta_checkerboard[pairwise_index, j] = phi
phi_mat[..., pairwise_index, N - 1 - j] = -phi[
..., 0
] ### from T* to T, consistent with propogation through MZI (T) see clements paper Eq.(4)
c, s = np.cos(phi), np.sin(phi)
## p_1 means p - 1; left multiply by T
row_p_1, row_p = U[..., p - 1, j:], U[..., p, j:]
row_p_1_cos, row_p_1_sin = row_p_1 * c, row_p_1 * s
row_p_cos, row_p_sin = row_p * c, row_p * s
U[..., p - 1, j:], U[..., p, j:] = row_p_1_cos - row_p_sin, row_p_cos + row_p_1_sin
delta_list = batch_diag(U)
return delta_list, phi_mat
def decompose_clements(self, U):
if isinstance(U, np.ndarray):
if len(U.shape) == 2:
return self.decompose_clements_cpu(U)
else:
return self.decompose_clements_batch(U)
else:
if U.is_cuda:
N = U.size(-1)
size = U.size()
U = U.view(-1, N, N).contiguous()
delta_list = torch.zeros(list(U.size())[:-1], dtype=U.dtype, device=U.device).contiguous()
phi_mat = torch.zeros_like(U).contiguous()
matrix_parametrization_cuda.decompose_clements(U, delta_list, phi_mat)
delta_list = delta_list.view(list(size)[:-1])
phi_mat = phi_mat.view(size)
return delta_list, phi_mat
else:
if U.ndim == 2:
return torch.from_numpy(self.decompose_clements_cpu(U.cpu().numpy()))
else:
return torch.from_numpy(self.decompose_clements_batch(U.cpu().numpy()))
def decompose(self, U):
if self.alg == "reck":
decompose_cpu = self.decompose_reck_cpu
decompose_batch = self.decompose_reck_batch
decompose_cuda = matrix_parametrization_cuda.decompose_reck
elif self.alg == "francis":
decompose_cpu = self.decompose_francis_cpu
decompose_batch = self.decompose_francis_batch
decompose_cuda = matrix_parametrization_cuda.decompose_francis
elif self.alg == "clements":
decompose_cpu = self.decompose_clements_cpu
decompose_batch = self.decompose_clements_batch
decompose_cuda = matrix_parametrization_cuda.decompose_clements
else:
raise NotImplementedError
if isinstance(U, np.ndarray):
if len(U.shape) == 2:
return decompose_cpu(U)
else:
return decompose_batch(U)
else:
if U.is_cuda:
N = U.size(-1)
size = U.size()
U = U.view(-1, N, N).contiguous()
delta_list = torch.zeros(list(U.size())[:-1], dtype=U.dtype, device=U.device).contiguous()
phi_mat = torch.zeros_like(U).contiguous()
decompose_cuda(U, delta_list, phi_mat)
delta_list = delta_list.view(list(size)[:-1])
phi_mat = phi_mat.view(size)
return delta_list, phi_mat
else:
if U.dim() == 2:
delta_list, phi_mat = decompose_cpu(U.cpu().numpy())
else:
delta_list, phi_mat = decompose_batch(U.cpu().numpy())
return torch.from_numpy(delta_list), torch.from_numpy(phi_mat)
@profile(timer=timer)
def reconstruct_francis_cpu(self, delta_list, phi_mat):
### Francis style, 1962
N = delta_list.shape[0]
Ur = np.identity(N)
# reconstruct from right to left as in the book chapter
# count = 0
phi_mat_cos = np.cos(phi_mat)
phi_mat_sin = np.sin(phi_mat)
### cannot gaurantee the phase range, so this will be slower
for i in range(N):
for j in range(N - i - 1):
c, s = phi_mat_cos[i, j], phi_mat_sin[i, j]
p = i
q = N - j - 1
row_p, row_q = Ur[p, :], Ur[q, :]
row_p_cos, row_p_sin = row_p * c, row_p * s
row_q_cos, row_q_sin = row_q * c, row_q * s
Ur[p, :], Ur[q, :] = row_p_cos - row_q_sin, row_p_sin + row_q_cos
Ur = delta_list[:, np.newaxis] * Ur
return Ur
@profile(timer=timer)
def reconstruct_francis_batch(self, delta_list: np.ndarray, phi_mat: np.ndarray) -> np.ndarray:
N = delta_list.shape[-1]
Ur = batch_eye_cpu(N, batch_shape=delta_list.shape[:-1], dtype=delta_list.dtype)
# reconstruct from right to left as in the book chapter
phi_mat_cos = np.cos(phi_mat)
phi_mat_sin = np.sin(phi_mat)
for i in range(N):
for j in range(N - i - 1):
c, s = phi_mat_cos[..., i, j : j + 1], phi_mat_sin[..., i, j : j + 1]
p = i
q = N - j - 1
Ur[..., p, :], Ur[..., q, :] = (
Ur[..., p, :] * c - Ur[..., q, :] * s,
Ur[..., p, :] * s + Ur[..., q, :] * c,
)
Ur = delta_list[..., np.newaxis] * Ur
return Ur
def reconstruct_francis(self, delta_list, phi_mat):
if isinstance(phi_mat, np.ndarray):
if len(delta_list.shape) == 1:
return self.reconstruct_francis_cpu(delta_list, phi_mat)
else:
return self.reconstruct_francis_batch(delta_list, phi_mat)
else:
if phi_mat.is_cuda:
size = phi_mat.size()
N = phi_mat.size(-1)
delta_list = delta_list.view(-1, N).to(phi_mat.device).contiguous()
phi_mat = phi_mat.view(-1, N, N).contiguous()
U = matrix_parametrization_cuda.reconstruct_francis(delta_list, phi_mat)
U = U.view(size)
return U
else:
if phi_mat.dim() == 2:
return torch.from_numpy(
self.reconstruct_francis(delta_list.cpu().numpy(), phi_mat.cpu().numpy())
)
else:
return torch.from_numpy(
self.reconstruct_francis_batch(delta_list.cpu().numpy(), phi_mat.cpu().numpy())
)
@profile(timer=timer)
def reconstruct_reck_cpu(self, delta_list, phi_mat):
N = delta_list.shape[0]
Ur = np.identity(N)
### left multiply by a counter-clock-wise rotation
"""
cos, -sin
sin, cos
"""
phi_mat_cos = np.cos(phi_mat)
phi_mat_sin = np.sin(phi_mat)
## totally 2n-3 stage
for i in range(N - 1):
lower = N - 2 - i
for j in range(i + 1):
c, s = phi_mat_cos[lower, j], phi_mat_sin[lower, j]
p = N - 2 - i + j
q = p + 1
row_p, row_q = Ur[p, lower:], Ur[q, lower:]
res = (c + 1j * s) * (row_p + 1j * row_q)
Ur[p, lower:], Ur[q, lower:] = res.real, res.imag
Ur = delta_list[:, np.newaxis] * Ur
return Ur
@profile(timer=timer)
def reconstruct_reck_batch(self, delta_list, phi_mat):
N = delta_list.shape[-1]
Ur = batch_eye_cpu(N, batch_shape=delta_list.shape[:-1], dtype=delta_list.dtype)
### left multiply by a counter-clock-wise rotation
"""
cos, -sin
sin, cos
"""
phi_mat_cos = np.cos(phi_mat)
phi_mat_sin = np.sin(phi_mat)
for i in range(N - 1):
lower = N - 2 - i
for j in range(i + 1):
c, s = phi_mat_cos[..., lower, j : j + 1], phi_mat_sin[..., lower, j : j + 1]
p = N - 2 - i + j
q = p + 1
row_p, row_q = Ur[..., p, lower:], Ur[..., q, lower:]
### this rotation is equivalent to complex number multiplication as an acceleration.
res = (c + 1j * s) * (row_p + 1j * row_q)
Ur[..., p, lower:], Ur[..., q, lower:] = res.real, res.imag
Ur = delta_list[..., np.newaxis] * Ur
return Ur
@profile(timer=timer)
def reconstruct_reck_batch_par(self, delta_list, phi_mat):
N = delta_list.shape[-1]
Ur = batch_eye_cpu(N, batch_shape=delta_list.shape[:-1], dtype=delta_list.dtype)
### left multiply by a counter-clock-wise rotation
"""
cos, -sin
sin, cos
"""
phi_mat_cos = np.cos(phi_mat)
phi_mat_sin = np.sin(phi_mat)
### 2n-3 stages
for i in range(2 * N - 3):
lower = N - 2 - i
for j in range(i + 1):
c, s = phi_mat_cos[..., lower, j : j + 1], phi_mat_sin[..., lower, j : j + 1]
p = N - 2 - i + j
q = p + 1
row_p, row_q = Ur[..., p, lower:], Ur[..., q, lower:]
row_p_cos, row_p_sin = row_p * c, row_p * s
row_q_cos, row_q_sin = row_q * c, row_q * s
Ur[..., p, lower:], Ur[..., q, lower:] = row_p_cos - row_q_sin, row_p_sin + row_q_cos
Ur = delta_list[..., np.newaxis] * Ur
return Ur
def reconstruct_reck(self, delta_list, phi_mat):
if isinstance(phi_mat, np.ndarray):
if len(delta_list.shape) == 1:
return self.reconstruct_reck_cpu(delta_list, phi_mat)
else:
return self.reconstruct_reck_batch(delta_list, phi_mat)
else:
if phi_mat.is_cuda:
size = phi_mat.size()
N = phi_mat.size(-1)
delta_list = delta_list.view(-1, N).to(phi_mat.device).contiguous()
phi_mat = phi_mat.view(-1, N, N).contiguous()
U = matrix_parametrization_cuda.reconstruct_reck(delta_list, phi_mat)
U = U.view(size)
return U
else:
if phi_mat.dim() == 2:
return torch.from_numpy(
self.reconstruct_reck(delta_list.cpu().numpy(), phi_mat.cpu().numpy())
)
else:
return torch.from_numpy(
self.reconstruct_reck_batch(delta_list.cpu().numpy(), phi_mat.cpu().numpy())
)
@profile(timer=timer)
def reconstruct_clements_cpu(self, delta_list, phi_mat):
N = delta_list.shape[0]
Ur = np.identity(N)
# parallelly reconstruct col by col based on the checkerboard (phi_mat)
# count = 0
phi_mat_cos = np.cos(phi_mat)
phi_mat_sin = np.sin(phi_mat)
for i in range(N): ## N layers
max_len = 2 * (i + 1)
### in odd N, address delta_list[-1] before the first column
if i == 0 and N % 2 == 1 and delta_list[-1] < 0:
Ur[-1, :] *= -1
for j in range((i % 2), N - 1, 2):
c, s = phi_mat_cos[j, i], phi_mat_sin[j, i]
## not the entire row needs to be rotated, only a small working set is used
lower = j - i
upper = lower + max_len
lower = max(0, lower)
upper = min(upper, N)
row_p, row_q = Ur[j, lower:upper], Ur[j + 1, lower:upper]
res = (c + 1j * s) * (row_p + 1j * row_q)
Ur[j, lower:upper], Ur[j + 1, lower:upper] = res.real, res.imag
if i == 0 and N % 2 == 0 and delta_list[-1] < 0:
Ur[-1, :] *= -1
if (
i == N - 2 and N % 2 == 1 and delta_list[0] < 0
): ## consider diagonal[0]= {-1,1} before the last layer when N odd
Ur[0, :] *= -1
if N % 2 == 0 and delta_list[0] < 0: ## consider diagonal[0]= {-1,1} after the last layer when N even
Ur[0, :] *= -1
return Ur
@profile(timer=timer)
def reconstruct_clements_batch(self, delta_list, phi_mat):
N = delta_list.shape[-1]
Ur = batch_eye_cpu(N, batch_shape=delta_list.shape[:-1], dtype=delta_list.dtype)
# parallelly reconstruct col by col based on the checkerboard (phi_mat)
# count = 0
phi_mat_cos = np.cos(phi_mat)
phi_mat_sin = np.sin(phi_mat)
for i in range(N): ## N layers
max_len = 2 * (i + 1)
### in odd N, address delta_list[-1] before the first column
if i == 0 and N % 2 == 1:
Ur[..., -1, :] *= delta_list[..., -1:]
for j in range((i % 2), N - 1, 2):
## not the entire row needs to be rotated, only a small working set is used
lower = j - i
upper = lower + max_len
lower = max(0, lower)
upper = min(upper, N)
c, s = phi_mat_cos[..., j, i : i + 1], phi_mat_sin[..., j, i : i + 1]
row_p, row_q = Ur[..., j, lower:upper], Ur[..., j + 1, lower:upper]
row_p_cos, row_p_sin = row_p * c, row_p * s
row_q_cos, row_q_sin = row_q * c, row_q * s
Ur[..., j, lower:upper], Ur[..., j + 1, lower:upper] = (
row_p_cos - row_q_sin,
row_p_sin + row_q_cos,
)
if i == 0 and N % 2 == 0:
Ur[..., -1, :] *= delta_list[..., -1:]
if i == N - 2 and N % 2 == 1: ## consider diagonal[0]= {-1,1} before the last layer when N odd
Ur[..., 0, :] *= delta_list[..., 0:1]
if N % 2 == 0: ## consider diagonal[0]= {-1,1} after the last layer when N even
Ur[..., 0, :] *= delta_list[..., 0:1]
return Ur
def reconstruct_clements(self, delta_list, phi_mat):
if isinstance(phi_mat, np.ndarray):
if len(delta_list.shape) == 1:
return self.reconstruct_clements_cpu(delta_list, phi_mat)
else:
return self.reconstruct_clements_batch(delta_list, phi_mat)
else:
if phi_mat.is_cuda:
size = phi_mat.size()
N = phi_mat.size(-1)
delta_list = delta_list.view(-1, N).to(phi_mat.device).contiguous()
phi_mat = phi_mat.view(-1, N, N).contiguous()
U = matrix_parametrization_cuda.reconstruct_clements(delta_list, phi_mat)
U = U.view(size)
return U
else:
if phi_mat.dim() == 2:
return torch.from_numpy(
self.reconstruct_clements(delta_list.cpu().numpy(), phi_mat.cpu().numpy())
)
else:
return torch.from_numpy(
self.reconstruct_clements_batch(delta_list.cpu().numpy(), phi_mat.cpu().numpy())
)
def reconstruct(self, delta_list, phi_mat):
if self.alg == "francis":
reconstruct_cpu = self.reconstruct_francis
reconstruct_batch = self.reconstruct_francis_batch
reconstruct_cuda = matrix_parametrization_cuda.reconstruct_francis
elif self.alg == "reck":
reconstruct_cpu = self.reconstruct_reck_cpu
reconstruct_batch = self.reconstruct_reck_batch
reconstruct_cuda = matrix_parametrization_cuda.reconstruct_reck
elif self.alg == "clements":
reconstruct_cpu = self.reconstruct_clements_cpu
reconstruct_batch = self.reconstruct_clements_batch
reconstruct_cuda = matrix_parametrization_cuda.reconstruct_clements
else:
raise NotImplementedError
if isinstance(phi_mat, np.ndarray):
if len(delta_list.shape) == 1:
return reconstruct_cpu(delta_list, phi_mat)
else:
return reconstruct_batch(delta_list, phi_mat)
else:
if phi_mat.is_cuda:
size = phi_mat.size()
N = phi_mat.size(-1)
delta_list = delta_list.view(-1, N).to(phi_mat.device).contiguous()
phi_mat = phi_mat.view(-1, N, N).contiguous()
U = reconstruct_cuda(delta_list, phi_mat)
U = U.view(size)
return U
else:
if phi_mat.ndim == 2:
return torch.from_numpy(reconstruct_cpu(delta_list.cpu().numpy(), phi_mat.cpu().numpy()))
else:
return torch.from_numpy(
reconstruct_batch(delta_list.cpu().numpy(), phi_mat.cpu().numpy())
)
def check_identity(self, M):
return (M.shape[0] == M.shape[1]) and np.allclose(M, np.eye(M.shape[0]))
def check_unitary(self, U):
M = np.dot(U, U.T)
return self.check_identity(M)
def check_equal(self, M1, M2):
return (M1.shape == M2.shape) and np.allclose(M1, M2)
def gen_random_ortho(self, N):
U = ortho_group.rvs(N)
logger.info(f"Generate random {N}*{N} unitary matrix, check unitary: {self.check_unitary(U)}")
return U
def to_degree(self, M):
return np.degrees(M)
class ComplexUnitaryDecomposerBatch(object):
timer = False
def __init__(
self,
min_err: float = 1e-7,
timer: bool = False,
determine: bool = False,
alg: str = "reck",
dtype=np.float64,
) -> None:
self.min_err = min_err
self.timer = timer
self.determine = determine
self.alg = alg
assert alg.lower() in {"reck", "clements", "francis"}, logger.error(
f"Unitary decomposition algorithm can only be [reck, clements, francis], but got {alg}"
)
self.dtype = dtype
def set_alg(self, alg):
assert alg.lower() in {"reck", "clements", "francis"}, logger.error(
f"Unitary decomposition algorithm can only be [reck, clements, francis], but got {alg}"
)
self.alg = alg
def build_plane_unitary(self, p, q, phi, N, transpose=True):
assert N > 0 and isinstance(N, int), "[E] Matrix size must be positive integer"
assert (
isinstance(p, int) and isinstance(q, int) and 0 <= p < q < N
), "[E] Integer value p and q must satisfy p < q"
assert isinstance(phi, float) or isinstance(phi, int), "[E] Value phi must be of type float or int"
U = np.eye(N)
c = np.cos(phi)
s = np.sin(phi)
U[p, p] = U[q, q] = c
U[q, p] = s if not transpose else -s
U[p, q] = -s if not transpose else s
return U
def cal_phi_batch_determine(self, u1: np.ndarray, u2: np.ndarray, is_first_col=False) -> np.ndarray:
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
cond1 = u1_abs < min_err
cond2 = u2_abs < min_err
cond1_n = ~cond1
cond2_n = ~cond2
if is_first_col:
phi = np.where(
cond1 & cond2,
0,
np.where(
cond1_n & cond2,
np.where(u1 > min_err, 0, -pi),
np.where(
cond1 & cond2_n, np.where(u2 > min_err, -0.5 * pi, 0.5 * pi), np.arctan2(-u2, u1)
),
),
)
else:
phi = np.where(
cond1 & cond2,
0,
np.where(
cond1_n & cond2,
np.where(u1 > min_err, 0, -pi),
np.where(
cond1 & cond2_n, np.where(u2 > min_err, -0.5 * pi, 0.5 * pi), np.arctan(-u2 / u1)
),
),
)
return phi
def cal_phi_batch_nondetermine(self, u1: np.ndarray, u2: np.ndarray, is_first_col=False) -> np.ndarray:
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
cond1 = u1_abs < min_err
cond2 = u2_abs < min_err
cond1_n = ~cond1
cond2_n = ~cond2
phi = np.where(
cond1 & cond2,
0,
np.where(
cond1_n & cond2,
np.where(u1 > min_err, 0, -pi),
np.where(cond1 & cond2_n, np.where(u2 > min_err, -0.5 * pi, 0.5 * pi), np.arctan2(-u2, u1)),
),
)
return phi
def cal_phi_determine(self, u1, u2, is_first_col=False):
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
if u1_abs < min_err and u2_abs < min_err:
phi = 0
elif u1_abs >= min_err and u2_abs < min_err:
phi = 0 if u1 > min_err else -pi
elif u1_abs < min_err and u2_abs >= min_err:
phi = -0.5 * pi if u2 > min_err else 0.5 * pi
else:
# solve the equation: u'_1n=0
if is_first_col:
phi = np.arctan2(-u2, u1) # 4 quadrant4
else:
phi = np.arctan(-u2 / u1)
return phi
def cal_phi_nondetermine(self, u1, u2):
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
if u1_abs < min_err and u2_abs < min_err:
phi = 0
elif u1_abs >= min_err and u2_abs < min_err:
phi = 0 if u1 > min_err else -pi
elif u1_abs < min_err and u2_abs >= min_err:
phi = -0.5 * pi if u2 > min_err else 0.5 * pi
else:
# solve the equation: u'_1n=0
phi = np.arctan2(-u2, u1) # 4 quadrant4
return phi
def decompose_kernel_batch(self, U: np.ndarray, dim, phi_list=None):
"""return U^(N-1); (phi_1,...,phi_N-2); (sigma_1,...,sigma_N-2)"""
N = U.shape[-1]
if phi_list is None:
phi_list = np.zeros(list(U.shape[:-2]) + [dim], dtype=np.float64)
calPhi_batch = self.cal_phi_batch_determine if self.determine else self.cal_phi_batch_nondetermine
for i in range(N - 1):
u1, u2 = U[..., 0, 0], U[..., 0, N - 1 - i]
phi = calPhi_batch(u1, u2, is_first_col=(i == 0))
phi_list[..., i] = phi
p, q = 0, N - i - 1
c, s = np.cos(phi)[..., np.newaxis], np.sin(phi)[..., np.newaxis]
col_p, col_q = U[..., :, p], U[..., :, q]
U[..., :, p], U[..., :, q] = col_p * c - col_q * s, col_p * s + col_q * c
return U, phi_list
def decompose_kernel_determine(self, U, phi_list):
"""return U^(N-1); (phi_1,...,phi_N-2); (sigma_1,...,sigma_N-2)"""
N = U.shape[0]
for i in range(N - 1):
# with TimerCtx() as t:
u1, u2 = U[0, 0], U[0, N - 1 - i]
pi = np.pi
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
min_err = self.min_err
if u1_abs < min_err and u2_abs < min_err:
phi = 0
elif u1_abs >= min_err and u2_abs < min_err:
phi = 0 if u1 > min_err else -pi
elif u1_abs < min_err and u2_abs >= min_err:
phi = -0.5 * pi if u2 > min_err else 0.5 * pi
else:
# solve the equation: u'_1n=0
if i == 0:
phi = np.arctan2(-u2, u1) # 4 quadrant4
else:
phi = np.arctan(-u2 / u1)
phi_list[i] = phi
p, q = 0, N - i - 1
c, s = np.cos(phi), np.sin(phi)
row_p, row_q = U[:, p], U[:, q]
row_p_cos, row_p_sin = row_p * c, row_p * s
row_q_cos, row_q_sin = row_q * c, row_q * s
U[:, p], U[:, q] = row_p_cos - row_q_sin, row_q_cos + row_p_sin
return U, phi_list
def decompose_kernel_nondetermine(self, U, phi_list):
"""return U^(N-1); (phi_1,...,phi_N-2); (sigma_1,...,sigma_N-2)"""
N = U.shape[0]
pi = np.pi
half_pi = np.pi / 2
min_err = self.min_err
for i in range(N - 1):
u1, u2 = U[0, 0], U[0, N - 1 - i]
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
cond1, cond2 = u1_abs >= min_err, u2_abs >= min_err
if cond1 & cond2:
phi = np.arctan2(-u2, u1)
elif ~cond1 & cond2:
phi = -half_pi if u2 > min_err else half_pi
elif cond1 & ~cond2:
phi = 0 if u1 > min_err else -pi
else:
phi = 0
phi_list[i] = phi
p, q = 0, N - i - 1
c = np.cos(phi)
s = (1 - c * c) ** 0.5 if phi > 0 else -((1 - c * c) ** 0.5)
row_p, row_q = U[:, p], U[:, q]
row_p_cos, row_p_sin = row_p * c, row_p * s
row_q_cos, row_q_sin = row_q * c, row_q * s
U[:, p], U[:, q] = row_p_cos - row_q_sin, row_q_cos + row_p_sin
return U, phi_list
@profile(timer=timer)
def decompose_francis_cpu(self, U):
#### This decomposition has follows the natural reflection of MZIs. Thus the circuit will give a reversed output.
### Francis style, 1962
N = U.shape[0]
assert N > 0 and U.shape[0] == U.shape[1], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros([N, N], dtype=self.dtype)
delta_list = np.zeros(N, dtype=self.dtype)
decompose_kernel = (
self.decompose_kernel_determine if self.determine else self.decompose_kernel_nondetermine
)
for i in range(N - 1):
U, _ = decompose_kernel(U, phi_list=phi_mat[i, :])
delta_list[i] = U[0, 0]
U = U[1:, 1:]
else:
delta_list[-1] = U[-1, -1]
return delta_list, phi_mat
@profile(timer=timer)
def decompose_francis_batch(self, U: np.ndarray):
N = U.shape[-1]
assert N > 0 and U.shape[-1] == U.shape[-2], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros(U.shape, dtype=np.float64)
delta_list = np.zeros(U.shape[:-1], dtype=np.float64)
for i in range(N - 1):
U, _ = self.decompose_kernel_batch(U, dim=N, phi_list=phi_mat[..., i, :])
delta_list[..., i] = U[..., 0, 0]
U = U[..., 1:, 1:]
else:
delta_list[..., -1] = U[..., -1, -1]
return delta_list, phi_mat
def decompose_francis(self, U):
if isinstance(U, np.ndarray):
if len(U.shape) == 2:
return self.decompose_francis_cpu(U)
else:
return self.decompose_francis_batch(U)
else:
if U.is_cuda:
N = U.size(-1)
size = U.size()
U = U.view(-1, N, N).contiguous()
delta_list = torch.zeros(list(U.size())[:-1], dtype=U.dtype, device=U.device).contiguous()
phi_mat = torch.zeros_like(U).contiguous()
matrix_parametrization_cuda.decompose_francis(U, delta_list, phi_mat)
delta_list = delta_list.view(list(size)[:-1])
phi_mat = phi_mat.view(size)
return delta_list, phi_mat
else:
if U.dim() == 2:
return torch.from_numpy(self.decompose_francis_cpu(U.cpu().numpy()))
else:
return torch.from_numpy(self.decompose_francis_batch(U.cpu().numpy()))
@profile(timer=timer)
def decompose_reck_cpu(self, U):
"""Reck decomposition implemented by Neurophox. Triangular mesh, input and output have no mirroring effects, i.e, [x1, ..., xn] -> Y = U x X -> [y1, ..., yn]
Rmn: [ cos(phi) -sin(phi)] -> MZI achieves counter-clock-wise rotation with phi (reconstruction, left mul)
[ sin(phi) cos(phi)]
Rmn*:[ cos(phi) sin(phi)] -> column-wise clock-wise rotation (decompose, right mul)
[-sin(phi) cos(phi)]
U = D R43 R32 R43 R21 R32 R43
"""
N = U.shape[0]
assert N > 0 and U.shape[0] == U.shape[1], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros([N, N, 4], dtype=self.dtype) ## phase shifter, theta_t, theta_l, omega_p, omega_w
"""
the bottom-left phase corresponds to the MZI at the bottom-left corner.
The decomposition ordering follows from bottom to top, from left to right.
R21 R32 R43 0
R32 R43 0 0
R43 0 0 0
0 0 0 0
"""
"""
x x x 0 x x 0 0
x x x x -> x x x 0
x x x x x x x x
x x x x x x x x
"""
for i in range(N - 1):
### each outer loop deals with one off-diagonal, nullification starts from top-right
### even loop for column rotation
for j in range(i + 1):
### let p, q be the indices for the nullified '0'
p = j ## row
q = N - 1 - i + j ## col
### rotate two columns such that u2 is nullified to 0
pi = np.pi
half_pi = np.pi / 2
min_err = self.min_err
### col q-1 nullifies col q
u1, u2 = U[p, q - 1], U[p, q]
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
cond1, cond2 = u1_abs >= min_err, u2_abs >= min_err
if cond1 & cond2:
phi = np.arctan2(-u2, u1)
elif ~cond1 & cond2:
phi = -half_pi if u2 > min_err else half_pi
elif cond1 & ~cond2:
phi = 0 if u1 > min_err else -pi
else:
phi = 0
if phi < -pi / 2:
phi += 2 * np.pi ## [-pi/2, 3pi/2]
phi_mat[N - i - 2, j, 0] = np.pi / 2 ## this absorbs the global phase theta_tot
phi_mat[N - i - 2, j, 1] = 3 * np.pi / 2
phi_mat[N - i - 2, j, 2] = 1.5 * np.pi - phi
phi_mat[N - i - 2, j, 3] = 0.5 * np.pi + phi
c, s = np.cos(phi), np.sin(phi)
## q_m1 means q-1; right multiply by R*
col_q_m1, col_q = U[p:, q - 1], U[p:, q]
col_q_m1_cos, col_q_m1_sin = col_q_m1 * c, col_q_m1 * s
col_q_cos, col_q_sin = col_q * c, col_q * s
U[p:, q - 1], U[p:, q] = col_q_m1_cos - col_q_sin, col_q_cos + col_q_m1_sin
delta_list = np.angle(
np.diag(U)
) ## only the first and last element can be 1 or -1, the rest elements are all 1. This feature can be used in fast forward/reconstruction
return delta_list, phi_mat
@profile(timer=timer)
def decompose_reck_batch(self, U):
"""Reck decomposition implemented by Neurophox. Triangular mesh, input and output have no mirroring effects, i.e, [x1, ..., xn] -> Y = U x X -> [y1, ..., yn]
Rmn: [ cos(phi) -sin(phi)] -> MZI achieves counter-clock-wise rotation with phi (reconstruction, left mul)
[ sin(phi) cos(phi)]
Rmn*:[ cos(phi) sin(phi)] -> column-wise clock-wise rotation (decompose, right mul)
[-sin(phi) cos(phi)]
U = D R43 R32 R43 R21 R32 R43
U is real matrix
"""
N = U.shape[-1]
assert N > 0 and U.shape[-1] == U.shape[-2], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros(list(U.shape) + [4], dtype=self.dtype) ## left upper triangular array.
"""
the bottom-left phase corresponds to the MZI at the bottom-left corner.
The decomposition ordering follows from bottom to top, from left to right.
R21 R32 R43 0
R32 R43 0 0
R43 0 0 0
0 0 0 0
"""
"""
x x x 0 x x 0 0
x x x x -> x x x 0
x x x x x x x x
x x x x x x x x
"""
for i in range(N - 1):
### each outer loop deals with one off-diagonal, nullification starts from top-right
### even loop for column rotation
for j in range(i + 1):
### let p, q be the indices for the nullified '0'
p = j ## row
q = N - 1 - i + j ## col
### rotate two columns such that u2 is nullified to 0
### col q-1 nullifies col q
u1, u2 = U[..., p, q - 1], U[..., p, q]
phi = self.cal_phi_batch_nondetermine(u1, u2)
phi[phi < -np.pi / 2] += 2 * np.pi ## [-pi/2, 3pi/2]
phi_mat[..., N - i - 2, j, 0] = np.pi / 2 ## this absorbs the global phase theta_tot
phi_mat[..., N - i - 2, j, 1] = 3 * np.pi / 2
phi_mat[..., N - i - 2, j, 2] = 1.5 * np.pi - phi
phi_mat[..., N - i - 2, j, 3] = 0.5 * np.pi + phi
c, s = np.cos(phi)[..., np.newaxis], np.sin(phi)[..., np.newaxis]
## q_m1 means q-1; right multiply by R*
col_q_m1, col_q = U[..., p:, q - 1], U[..., p:, q]
col_q_m1_cos, col_q_m1_sin = col_q_m1 * c, col_q_m1 * s
col_q_cos, col_q_sin = col_q * c, col_q * s
U[..., p:, q - 1], U[..., p:, q] = col_q_m1_cos - col_q_sin, col_q_cos + col_q_m1_sin
delta_list = np.angle(batch_diag(U))
return delta_list, phi_mat
def decompose_reck(self, U):
if isinstance(U, np.ndarray):
if len(U.shape) == 2:
return self.decompose_reck_cpu(U)
else:
return self.decompose_reck_batch(U)
else:
if U.is_cuda:
N = U.size(-1)
size = U.size()
U = U.view(-1, N, N).contiguous()
delta_list = torch.zeros(list(U.size())[:-1], dtype=U.dtype, device=U.device).contiguous()
phi_mat = torch.zeros_like(U).contiguous()
matrix_parametrization_cuda.decompose_reck(U, delta_list, phi_mat)
delta_list = delta_list.view(list(size)[:-1])
phi_mat = phi_mat.view(size)
return delta_list, phi_mat
else:
if U.dim() == 2:
return torch.from_numpy(self.decompose_reck_cpu(U.cpu().numpy()))
else:
return torch.from_numpy(self.decompose_reck_batch(U.cpu().numpy()))
@profile(timer=timer)
def decompose_clements_cpu(self, U):
"""clements Optica 2018 unitary decomposition
Tmn: [e^iphi x cos(theta) -sin(theta)]
[e^iphi x sin(theta) cos(theta)]
phi DC 2 theta DC ---
--- DC ------- DC ---
T45 T34 T23 T12 T45 T34 U T12* T34* T23* T12 = D
U=D T34 T45 T12 T23 T34 T45 T12 T23 T34 T12"""
N = U.shape[0]
assert N > 0 and U.shape[0] == U.shape[1], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros(
[N, N, 4], dtype=self.dtype
) ## theta checkerboard that maps to the real MZI mesh layout, which is efficient for parallel reconstruction col-by-col.
pi = np.pi
half_pi = np.pi / 2
min_err = self.min_err
for i in range(N - 1):
### each outer loop deals with one off-diagonal
## even loop for column rotation
if i % 2 == 0:
for j in range(i + 1):
### let p, q be the indices for the nullified '0'
p = N - 1 - j ## row
q = i - j ## col
### rotate two columns such that u2 is nullified to 0
u1, u2 = U[p, q + 1], U[p, q]
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
cond1, cond2 = u1_abs >= min_err, u2_abs >= min_err
if cond1 & cond2:
phi = np.arctan2(-u2, u1)
elif ~cond1 & cond2:
phi = -half_pi if u2 > min_err else half_pi
elif cond1 & ~cond2:
phi = 0 if u1 > min_err else -pi
else:
phi = 0
phi = (
-phi
) ### simply convert the solved theta from T to T*, it is easier than changing the solving procedure
if phi < -pi / 2:
phi += 2 * pi
# phi_mat[p,q] = phi
pairwise_index = i - j
# theta_checkerboard[pairwise_index, -j - 1] = phi
# phi_mat[pairwise_index, j] = phi
phi_mat[pairwise_index, j, 0] = np.pi / 2 ## this absorbs the global phase theta_tot
phi_mat[pairwise_index, j, 1] = 3 * np.pi / 2
phi_mat[pairwise_index, j, 2] = 1.5 * np.pi - phi
phi_mat[pairwise_index, j, 3] = 0.5 * np.pi + phi
c, s = np.cos(phi), np.sin(phi)
## q_p1 means q+1; right multiply by T*
col_q_p1, col_q = U[: p + 1, q + 1], U[: p + 1, q]
col_q_p1_cos, col_q_p1_sin = col_q_p1 * c, col_q_p1 * s
col_q_cos, col_q_sin = col_q * c, col_q * s
U[: p + 1, q + 1], U[: p + 1, q] = col_q_p1_cos + col_q_sin, col_q_cos - col_q_p1_sin
else:
## odd loop for row rotation
for j in range(i + 1):
p = N - 1 - i + j
q = j
### rotate two rows such that u2 is nullified to 0
pi = np.pi
half_pi = np.pi / 2
min_err = self.min_err
u1, u2 = U[p - 1, q], U[p, q]
u1_abs, u2_abs = np.abs(u1), np.abs(u2)
cond1, cond2 = u1_abs >= min_err, u2_abs >= min_err
if cond1 & cond2:
phi = np.arctan2(-u2, u1)
elif ~cond1 & cond2:
phi = -half_pi if u2 > min_err else half_pi
elif cond1 & ~cond2:
phi = 0 if u1 > min_err else -pi
else:
phi = 0
phi = -phi
if phi < -pi / 2:
phi += 2 * pi
pairwise_index = N + j - i - 2
# theta_checkerboard[pairwise_index, j] = phi
# phi_mat[pairwise_index, N - 1 - j] = phi ### from T* to T, consistent with propogation through MZI (T) see clements paper Eq.(4)
phi_mat[pairwise_index, N - 1 - j, 0] = (
np.pi / 2
) ## this absorbs the global phase theta_tot
phi_mat[pairwise_index, N - 1 - j, 1] = 3 * np.pi / 2
phi_mat[pairwise_index, N - 1 - j, 2] = 1.5 * np.pi - phi
phi_mat[pairwise_index, N - 1 - j, 3] = 0.5 * np.pi + phi
c, s = np.cos(phi), np.sin(phi)
## p_1 means p - 1; left multiply by T
row_p_1, row_p = U[p - 1, j:], U[p, j:]
row_p_1_cos, row_p_1_sin = row_p_1 * c, row_p_1 * s
row_p_cos, row_p_sin = row_p * c, row_p * s
U[p - 1, j:], U[p, j:] = row_p_1_cos + row_p_sin, row_p_cos - row_p_1_sin
delta_list = np.angle(np.diag(U))
### efficiently absorb delta_list into theta_t and theta_l and move delta_list to the last phase shifter column
### since U is real matrix, only delta_list[0] and delta_list[-1] can be -1.
if N % 2 == 1:
phi_mat[0, -1, 0] += delta_list[0]
delta_list[0] = 0
phi_mat[N - 2, 1, 1] += delta_list[-1]
delta_list[-1] = 0
else:
phi_mat[N - 2, 2, 1] += delta_list[-1]
delta_list[-1] = 0
return delta_list, phi_mat
@profile(timer=timer)
def decompose_clements_batch(self, U):
N = U.shape[-1]
assert N > 0 and U.shape[-1] == U.shape[-2], "[E] Input matrix must be square and N > 0"
phi_mat = np.zeros(list(U.shape) + [4], dtype=np.float64)
for i in range(N - 1):
### each outer loop deals with one off-diagonal
## even loop for column rotation
if i % 2 == 0:
for j in range(i + 1):
### let p, q be the indices for the nullified '0'
p = N - 1 - j ## row
q = i - j ## col
### rotate two columns such that u2 is nullified to 0
pi = np.pi
min_err = self.min_err
u1, u2 = U[..., p : p + 1, q + 1], U[..., p : p + 1, q]
pi = np.pi
u1_abs, u2_abs =
|
np.abs(u1)
|
numpy.abs
|
import numpy as np
def cycle_GL(N):
""" Generates a graph Laplacian for a cycle graph
N: int (number of agents)
-> NxN numpy array (representing the graph Laplacian)
"""
#Check user input types
assert isinstance(N, int), "In the cycle_GL function, the number of nodes (N) must be an integer. Recieved type %r." % type(N).__name__
#Check user input ranges/sizes
assert N > 0, "In the cycle_GL function, number of nodes (N) must be positive. Recieved %r." % N
ones = np.ones(N-1)
L = 2*np.identity(N) - np.diag(ones, 1) - np.diag(ones, -1)
L[N-1, 0] = -1
L[0, N-1] = -1
return L
def lineGL(N):
""" Generates a graph Laplacian for a line graph
N: int (number of agents)
-> NxN numpy array (representing the graph Laplacian)
"""
#Check user input types
assert isinstance(N, int), "In the lineGL function, the number of nodes (N) must be an integer. Recieved type %r." % type(N).__name__
#Check user input ranges/sizes
assert N > 0, "In the lineGL function, number of nodes (N) must be positive. Recieved %r." % N
ones = np.ones(N-1)
L = 2*
|
np.identity(N)
|
numpy.identity
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0]))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
input_a = keras.Input(shape=(3,), name='input_a')
input_b = keras.Input(shape=(3,), name='input_b')
input_c = keras.Input(shape=(1,), name='input_b')
x = keras.layers.Dense(4, name='dense_1')(input_a)
y = keras.layers.Dense(3, name='dense_2')(input_b)
z = keras.layers.Dense(1, name='dense_3')(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer='rmsprop', loss='mse')
model_2.compile(optimizer='rmsprop', loss='mse')
input_a_np = np.random.random((10, 3))
input_b_np =
|
np.random.random((10, 3))
|
numpy.random.random
|
# coding=utf-8
import os
import sys
import numpy as np
from lcg_random import lcg_rand
import ncs
from easydict import EasyDict as edict
import time
import pdb
from pai_pyhdfs import *
#-----------------------init---------------------------#
# model files
proto='./models/lenet300100/lenet_train_test.prototxt'
# based on the network used in DS paper, 97.72 accuracy
#weights='/home/gitProject/Dynamic-Network-Surgery/models/lenet300100/caffe_lenet300100_original.caffemodel'
# based on the network used in IPR, 97.73 accuracy
weights='./models/lenet300100/lenet300100_iter_10000.caffemodel'
solver_path='./models/lenet300100/lenet_solver.prototxt'
es_method='ncs'
origin_proto_name = './models/lenet300100/lenet_origin.prototxt'
parallel_file_name = './tmp_model.caffemodel'
acc_constrain=0.08
niter = 30001
# stop pruning iteration count
prune_stop_iter = 15000
# the list of layer names
layer_name = ['ip1','ip2','ip3']
# the dict of layer names to its arrary indices
layer_inds = {'ip1':0, 'ip2':1, 'ip3':2}
# the dict of crates for each layer
crates = {'ip1':0.001, 'ip2':0.001, 'ip3':0.001}
# the list of the crates
crates_list = [0.001, 0.001, 0.001]
# the gamma for each layer
gamma = {'ip1':0.0002, 'ip2':0.0002, 'ip3':0.0002}
gamma_star = 0.0002
ncs_stepsize = 50
# random see for numpy.random
#seed= 981118 # for 112x compression with acc_constrain=0.3
#seed=961449 # for 113.5x compression with acc_constrain=0.08
seed= np.random.randint(1000000)
np.random.seed([seed])
# the dict to store intermedia results
es_cache = {}
#retrieval_tag=[]
r_count=0
work_path="/shared/work/"
#-----------------------init over-----------------------#
# definition of many axuliliary methods
# run the network on its dataset
def test_net(thenet, _start='mnist', _count=1):
'''
thenet: the object of network
_start: the layer to start from
_count: the number of batches to run
'''
scores = 0
for i in range(_count):
thenet.forward(start=_start)
scores += thenet.blobs['accuracy'].data
return scores/_count
# Set the crates of each layer, the pruning will happen in the next forward action
def apply_prune(thenet, _crates):
'''
thenet: the model to be pruned
_crates: the list of crates for layers
'''
for _id in range(len(layer_name)):
if _crates[_id] < 0:
continue
layer_id = layer_name[_id]
mask0 = thenet.params[layer_id][2].data.ravel()[0]
if mask0 == 0:
thenet.params[layer_id][2].data.ravel()[0] = -_crates[_id]
# elif mask0 == 1:
else:
thenet.params[layer_id][2].data.ravel()[0] = 1+_crates[_id]
# else:
# pdb.set_trace()
# calcuate the sparsity of a network model
def get_sparsity(thenet):
'''
thenet: the network for checking
'''
remain = 0
total = 0
for layer_id in layer_name:
remain += len(np.where(thenet.params[layer_id][2].data != 0)[0])
remain += len(np.where(thenet.params[layer_id][3].data != 0)[0])
total += thenet.params[layer_id][0].data.size
total += thenet.params[layer_id][1].data.size
#return total*1./(100.*remain)
return remain*1./total
def get_all(n):
'''
This function will get all the result in "fitX.npy",and stack them in a array.
The result file will be deleted after read.
In this program there will be exactly 3 result files.
:return: [array_of_solutions,array_of_fits]
'''
def wait_hdfs_files(filepath,hdfs_path="10.20.37.175",port=9000):
flag=True
hdfs_client=pyhdfs.HdfsClient(hdfs_path,port)
count=n
res=[]
X=[]
while count!=0:
files=hdfs_client.listdir(filepath)
for k in files:
#print('in the for loop.')
if k.startswith('fit'):
tmp_files=hdfs_client.listdir(filepath)
while k in tmp_files:
try:
tmp=hdfs_load('/shared/work/',k,delete=False)
#time.sleep(1)
hdfs_client.delete('/shared/work/'+k)
tmp_files=hdfs_client.listdir(filepath)
except:
tmp_files=hdfs_client.listdir(filepath)
tmp_x=tmp[0]
tmp_fit=tmp[1]
res.append(tmp_fit)
X.append(tmp_x)
count-=1
print("all the results recevied!")
return np.array([X,res])
return wait_hdfs_files('/shared/work/')
def set_solutions(solutions):
'''
This function used to split solutions into 3 files, and save them into work path as a .npy file.
:param solutions:
:return:
'''
print("solutions",solutions,"len:",len(solutions))
count=0
for solution in solutions:
fn='solution_'+str(np.random.randint(0,9999999))+'.npy'
np.save(fn,solution)
try:
hdfs_set_file('./','/shared/work/',fn)
except:
pass
try:
os.remove(fn)
except:
pass
count+=1
print('All the solutions have been setted!')
def NCSloop(tmp_crates,tmp_ind,accuracy_):
'''
This loop will get the parameters in LoopTest1, and use them to start a ncs loop.
The result will contained in a file named 'crates_list.npy' in work path.
The LoopTest1.py will use this file to apply prune the solver net.
:param tmp_crates:
:param tmp_ind:
:param accuracy_: in accuracy.npy file
:return: create crates_list.npy
'''
the_input_batch=hdfs_load('/shared/work/','data.npy')
es = {}
if es_method == 'ncs':
__C = edict()
__C.parameters = {'reset_xl_to_pop':False,
'init_value':tmp_crates,
'stepsize':ncs_stepsize,
'bounds':[0.0, 10.],
'ftarget':0,
'tmax':1600,
'popsize':10,
'best_k':1}
es = ncs.NCS(__C.parameters)
print('***************NCS initialization***************')
tmp_x_ = np.array(crates_list)
tmp_input_x = tmp_crates
for _ii in range(len(tmp_ind)):
tmp_x_[layer_inds[tmp_ind[_ii]]] = tmp_input_x[_ii]
set_solutions([tmp_x_])
_,tmp_fit = get_all(len([tmp_x_]))
print('all fitness gotten.')
es.set_initFitness(es.popsize*tmp_fit)
print('fit:{}'.format(tmp_fit))
print('***************NCS initialization***************')
count=0
while not es.stop():
print("now in the es loop.")
count+=1
if count==15:
break
x = es.ask()
X = []
for x_ in x:
tmp_x_ = np.array(crates_list)
for _ii in range(len(tmp_ind)):
tmp_x_[layer_inds[tmp_ind[_ii]]] = x_[_ii]
X.append(tmp_x_)
set_solutions(X)
X_arrange,fit=get_all(len(X))
X = []
for x_ in X_arrange:
tmp_x_ = np.array(len(tmp_ind)*[0.])
for _ii in range(len(tmp_ind)):
tmp_x_[_ii]= x_[layer_inds[tmp_ind[_ii]]]
X.append(tmp_x_)
es.tell(X, fit)
for _ii in range(len(tmp_ind)):
crates_list[layer_inds[tmp_ind[_ii]]] = es.result()[0][_ii]
for c_i in range(len(crates_list)):
crates[layer_name[c_i]] = crates_list[c_i]
#es_cache[itr]={'compression':-es.result()[1], 'crates':crates_list[:]}
_tmp_c = np.array(len(crates_list)*[-1.])
for t_name in tmp_ind:
_tmp_c[layer_inds[t_name]] = crates[t_name]
|
np.save('crates_list.npy',crates_list)
|
numpy.save
|
import cv2
import os
from os import listdir
from os.path import isfile, join
from sys import stdout
import psycopg2
import pickle
import numpy as np
import math
from PIL import Image
from math import floor
import hashlib
import random
from multiprocessing.dummy import Pool as ThreadPool
from sklearn.utils import shuffle
# generating the accurate geo-coordinates for a specific time poing
def gen_coord(line, time):
percentage = None
for idx, timestamp in enumerate(line["time"]):
if time < timestamp:
index = idx
percentage = (time - line["time"][index - 1]) * 1.0 / (timestamp - line["time"][index - 1])
break
if percentage is None:
return None
else:
coord_x = (line["points"][index][0] - line["points"][index - 1][0]) * percentage + line["points"][index - 1][0]
coord_y = (line["points"][index][1] - line["points"][index - 1][1]) * percentage + line["points"][index - 1][1]
return (coord_x, coord_y)
# function to randomly crop images from original in order to perform data augmentation
def random_crop(image, min_area_per, max_area_per, total_cnt, w_h_ratio_min, w_h_ratio_max):
width, height, channels = image.shape
augmented_images = []
augmented_images_infos = []
# for the original image
infos = [True, None, None, None, None]
augmented_images.append(image)
augmented_images_infos.append(infos)
for cnt in range(total_cnt):
w_h_ratio = random.uniform(w_h_ratio_min, w_h_ratio_max)
area = random.uniform(min_area_per, max_area_per)
w_new = int(floor(math.sqrt(height * width * area * w_h_ratio)))
h_new = int(floor(w_new / w_h_ratio))
# print(width, w_new, height, h_new)
random_left_shift = random.randint(0, int((width - w_new))) # Note: randint() is from uniform distribution.
random_down_shift = random.randint(0, int((height - h_new)))
new_image = image[random_left_shift : w_new + random_left_shift, random_down_shift: h_new + random_down_shift, :]
# print(new_image.shape)
centerpoint_x = random_left_shift / 2 + w_new
centerpoint_y = random_down_shift / 2 + h_new
original = False
infos = [original, w_h_ratio, area, centerpoint_x, centerpoint_y]
augmented_images.append(new_image)
augmented_images_infos.append(infos)
return {"augmented_images": augmented_images, "augmented_images_infos": augmented_images_infos}
#preset settings
frame_interval = 50
augmentation_split = 8
# select area for interpolation
conn_string = "host='localhost' dbname='indoor_position' user='postgres' password='<PASSWORD>' port='5432'"
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
query = '''select * from penn_station.areas'''
cur.execute(query)
areas = cur.fetchall()
cur.close()
conn.commit()
# select route id and its corresponding time list
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
query = '''select id, field_3 from penn_station.routes'''
cur.execute(query)
results = cur.fetchall()
cur.close()
conn.commit()
records = {}
for item in results:
records[item[0]] = {}
records[item[0]]["time"] = eval(item[1])
records[item[0]]["points"] = []
# select coordinates of these routes
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
query = '''select id, (ST_DumpPoints(geom)).path, ST_X(((ST_DumpPoints(geom)).geom)), ST_Y(((ST_DumpPoints(geom)).geom)) from penn_station.routes'''
cur.execute(query)
results = cur.fetchall()
cur.close()
conn.commit()
for item in results:
records[item[0]]["points"].append((item[2], item[3]))
# print(records)
print("--- parameters loaded...\n--- begin categorizing")
# for points in results:
place_title = "penn_station"
root_dir = os.path.join(os.getcwd(), "..", "data", place_title)
left_dir = os.path.join(root_dir, "1")
forward_dir = os.path.join(root_dir, "3")
right_dir = os.path.join(root_dir, "4")
back_dir = os.path.join(root_dir, "5")
output_dir = os.path.join(root_dir, "extracted_%sms" % str(frame_interval))
left_files = [join(left_dir, f) for f in listdir(left_dir) if isfile(join(left_dir, f))]
forward_files = [join(forward_dir, f) for f in listdir(forward_dir) if isfile(join(forward_dir, f))]
right_files = [join(right_dir, f) for f in listdir(right_dir) if isfile(join(right_dir, f))]
back_files = [join(back_dir, f) for f in listdir(back_dir) if isfile(join(back_dir, f))]
if not len(left_files) == len(forward_files) == len(right_files) == len(back_files) == len(top_files):
print("!!! file count not consistent")
# print(left_files, forward_files, right_files, back_files, top_files)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
os.chdir(output_dir)
all_files = [left_files, forward_files, right_files, back_files]
global total_cnt
global fail_cnt
global lookup_table
total_cnt = 0
fail_cnt = 0
lookup_table = {"2-1": 0, "2-3": 1, "2-4": 2, "2-5": 3, "2-6": 4, "2-8-2": 5, "2-9": 6, "2-10": 7}
# create table to store image infos
# conn = psycopg2.connect(conn_string)
# cur = conn.cursor()
# query = '''
# drop table if exists penn_station.image_lookup_%sms; create table penn_station.image_lookup_%sms
# (
# image_name text,
# id integer,
# spec_id text,
# path text,
# lat double precision,
# lon double precision,
# original boolean,
# w_h_ratio text,
# area text,
# centerpoint_x text,
# centerpoint_y text
# );
# drop table if exists penn_station.missing; create table penn_station.missing
# (
# lat double precision,
# lon double precision
# )
# ''' % (str(frame_interval), str(frame_interval))
# # print(query)
# cur.execute(query)
# cur.close()
# conn.commit()
# print("--- table penn_station.image_lookup_%sms created" % (str(frame_interval)))
all_files = [left_files, forward_files, right_files, back_files]
def extract(input_direction):
global total_cnt
global fail_cnt
global lookup_table
cam_id = input_direction[0]
direction = input_direction[1]
# for cam_id, direction in enumerate(all_files):
for clip_id in range(len(direction)):
print("------ begin extracting from cam_id: %s, clip_id %s" % (cam_id, clip_id) )
vidcap = cv2.VideoCapture(direction[clip_id])
# success, image = vidcap.read()
count = 0
# if not success:
# print("video reading error for %s" % direction[clip_id])
# continue
while True:
current_line = records[lookup_table[os.path.splitext((os.path.basename(direction[clip_id])))[0]]]
# use this one for opencv 2-- cv2.CAP_PROP_POS_MSEC was removed from opencv 3.0
vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * frame_interval))
# use this one for opencv 3
# post_frame = cap.get(1)
current_time = count * 1.0 * frame_interval / 1000
coord = gen_coord(current_line, current_time)
# print(current_line, count * 1.0 * frame_interval / 1000)
if coord is not None:
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
query = '''select id from penn_station.areas a where ST_Intersects(ST_PointFromText('POINT(%s %s)', 4326), ST_Transform(a.geom, 4326)) is True''' % (coord[0], coord[1])
# print(query)
cur.execute(query)
if not cur.rowcount == 0:
area_id = cur.fetchone()[0]
success, image = vidcap.read()
cur.close()
conn.commit()
if (image is not None):
#print(count * frame_interval)
spec_id = str(area_id) + str(cam_id)
# filename = "%s_%s_%s.png" % (spec_id, str(cam_id), count)
crop_result = random_crop(image, 0.3, 0.6, augmentation_split, 0.8, 1.2)
for crop_item in range(augmentation_split):
# inserting informations below
original_ins, w_h_ratio_ins, area_ins, centerpoint_x_ins, centerpoint_y_ins = crop_result["augmented_images_infos"][crop_item]
image_ins = crop_result["augmented_images"][crop_item]
h = hashlib.new('ripemd160')
h.update(str(image_ins))
image_name = h.hexdigest()
filename_level_1 = image_name[:2]
filename_level_2 = image_name[2:4]
image_dir = os.path.join(os.getcwd(), filename_level_1, filename_level_2)
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# save frame as png file
image_ins = cv2.resize(image_ins, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(os.path.join(image_dir, image_name + ".png"), image_ins)
# inserting image infos into database
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
query = '''insert into penn_station.image_lookup_%sms values('%s', %s, '%s', '%s', %s, %s, %s, '%s', '%s', '%s', '%s')''' % (str(frame_interval), image_name , area_id, spec_id, os.path.join(image_dir, image_name + ".png"), coord[0], coord[1], original_ins, w_h_ratio_ins, area_ins, centerpoint_x_ins, centerpoint_y_ins)
# print(query)
cur.execute(query)
stdout.write("\rcam_id %s, area_id %s, saved cnt %s, not_in_any_area cnt %s, clip_id %s" % (str(cam_id), area_id, total_cnt - fail_cnt, fail_cnt, clip_id))
stdout.flush()
cur.close()
conn.commit()
else:
print("\n" + str(count * frame_interval) + "disrupted")
count += 1
#total_cnt += 1
#fail_cnt += 1
#continue
#print(success, image)
#print("\nend of one video")
continue
else:
cur.close()
conn.commit()
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
query = '''insert into penn_station.missing values(%s, %s)''' % (coord[0], coord[1])
# print(query)
cur.execute(query)
cur.close()
conn.commit()
stdout.write("\rcam_id %s, saved cnt %s, not_in_any_area cnt %s, clip_id %s" % (str(cam_id), total_cnt - fail_cnt, fail_cnt, clip_id))
stdout.flush()
fail_cnt += 1
else:
print("\nbreak because coord is None")
break
if cv2.waitKey(10) == 27: # exit if Escape is hit
break
count += 1
total_cnt += 1
# pool = ThreadPool(processes = 4)
# pool.map(extract, [[cam_id, direction] for cam_id, direction in enumerate(all_files)])
# pool.close()
# pool.join()
# select specific area name & count from image look up table
conn = psycopg2.connect(conn_string)
cur = conn.cursor()
query = '''select spec_id, count(spec_id) as cnt1 from penn_station.image_lookup_%sms group by spec_id having count(spec_id) > 100; ''' % str(frame_interval)
cur.execute(query)
results = cur.fetchall()
cur.close()
conn.commit()
print(cur.rowcount)
spec_cat = {}
# for shitty classifer code
# for spec_id in results:
# conn = psycopg2.connect(conn_string)
# cur = conn.cursor()
# query = '''select * from penn_station.image_lookup_%sms where spec_id = '%s'; ''' % (str(frame_interval), spec_id[0])
# # print(query)
# cur.execute(query)
# images = cur.fetchall()
# output_dir = os.path.join(os.getcwd(), '..', 'categories', spec_id[0])
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# spec_cat[spec_id[0]] = []
# for image in images:
# resized_image = cv2.resize(np.array(Image.open(image[3])), dsize=(200, 200), interpolation=cv2.INTER_CUBIC)
# cv2.imwrite(os.path.join(output_dir, os.path.basename(image[3])), resized_image)
# cur.close()
# conn.commit()
# for bolei's code
per_train = 0.8
per_val = 0.09
per_test = 0.1
data_train = np.array([], dtype=np.uint8).reshape(0, 150528)
label_train = np.array([], dtype=np.uint8)
data_val =
|
np.array([], dtype=np.uint8)
|
numpy.array
|
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved."""
import numpy as np
from dataset.dataset_loader import Dataset
from inference_models.inference_torch import InferenceTorch
from inference_models.__inference_utils import compute_percent_correct
from codi.nlp_trainer import NLPTrainer
import torch
import time
from inference_models.__inference_utils import epoch_time
"""
This script is designed for the one step retraining setup.
For each step, the inference model is reset to its initial state so that we measure the improvement only due to the
points added for a particular threshold.
"""
def main():
start_time = time.time()
dataset_name, hyper_yaml = 'trec', 'yaml_hyper/trec_hyper.yaml'
dataset_loading = Dataset(dataset_name, hyper_yaml)
dataset_loading.dataset_to_torch()
print('***IMDB Dataset loaded***')
inference_train_iterator, inference_val_iterator, inference_test_iterator = dataset_loading.get_inference_iterator()
text, _ = dataset_loading.get_text_label()
vocab_size = len(text.vocab)
pad_idx = text.vocab.stoi[text.pad_token]
inference_yaml = 'trec.yaml'
inference_model = InferenceTorch(dataset_name, hyper_yaml, vocab_size, pad_idx, dataset_loading.get_device())
inference_model.load_from_yaml(inference_yaml)
initial_acc, initial_f1 = inference_model.train_model(text, inference_train_iterator, inference_val_iterator,
inference_test_iterator)
print('***Inference Model trained, now inferring labels.***')
unlabelled_iterator = dataset_loading.get_unlabelled_iterator()
inference_model.infer_labels(unlabelled_iterator)
print('Percent correct on unlabelled dataset prediction ',
compute_percent_correct(dataset_loading.get_unlabelled_dataset().examples))
print('***Unlabelled dataset processed, now processing CoDi dataset.***')
codi_labelled_iterator = dataset_loading.get_codi_iterator()
inference_model.infer_labels(codi_labelled_iterator)
print('***CoDi labelled dataset processed.***')
codi_trainer = NLPTrainer(yaml_model_path='codi/mlp_codi.yaml', yaml_train_path='codi/nlp_trainer.yaml')
codi_trainer.create_labelled_dataset(codi_labelled_iterator)
codi_trainer.train()
print('***CoDi model trained.***')
_ = codi_trainer.create_unlabelled_dataset(unlabelled_iterator)
print('***Prediction done.***')
# Beginning of retraining experiments
unlabelled_dataset_original = dataset_loading.get_unlabelled_dataset()
inference_train_original = dataset_loading.get_inference_dataset()
number_thresholds = codi_trainer.train_params['filtering']['nb_thresh']
initial_accuracies = np.array([initial_acc, initial_f1])
retraining_accuracies = np.zeros(number_thresholds)
percent_correct_array = np.zeros(number_thresholds)
size_indices_array = np.zeros(number_thresholds)
retraining_god_accuracies =
|
np.zeros(number_thresholds)
|
numpy.zeros
|
'''
Here we consider a controller trained for the reacher environment in
OpenAI Gym. The controller was taken from the baselines. The controller is
based on deepq.
'''
import gym
import numpy as np
from baselines.ddpg.ddpg import DDPG
from baselines.ddpg.noise import *
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.memory import Memory
from baselines.common import set_global_seeds
import baselines.common.tf_util as U
from mpi4py import MPI
from collections import deque
def train_return(env, param_noise, actor, critic, memory,nb_epochs=250, nb_epoch_cycles=20, reward_scale=1.,
render=False,normalize_returns=False, normalize_observations=True, critic_l2_reg=1e-2, actor_lr=1e-4,
critic_lr=1e-3,
action_noise=None, popart=False, gamma=0.99, clip_norm=None,nb_train_steps=50, nb_rollout_steps=2048,
batch_size=64,tau=0.01, param_noise_adaption_interval=50):
rank = MPI.COMM_WORLD.Get_rank()
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
max_action = env.action_space.high
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns,
normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
# Set up logging stuff only for a single worker.
episode_rewards_history = deque(maxlen=100)
#with U.single_threaded_session() as sess:
# Prepare everything.
agent.initialize(sess)
sess.graph.finalize()
agent.reset()
obs = env.reset()
episode_reward = 0.
episode_step = 0
episodes = 0
t = 0
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
for epoch in range(nb_epochs):
print('epoch number:', epoch)
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q = agent.pi(obs, apply_noise=True, compute_Q=True)
assert action.shape == env.action_space.shape
# Execute next action.
if rank == 0 and render:
env.render()
assert max_action.shape == action.shape
new_obs, r, done, info = env.step(
max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
t += 1
if rank == 0 and render:
env.render()
episode_reward += r
episode_step += 1
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done)
obs = new_obs
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
agent.reset()
obs = env.reset()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t % param_noise_adaption_interval == 0:
distance = agent.adapt_param_noise()
epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
return agent
seed = 2146337346
set_global_seeds(seed)
env = gym.make("Reacher-v1")
env.seed(seed)
sess = U.make_session(num_cpu=1).__enter__()
nb_actions = env.action_space.shape[-1]
layer_norm=True
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(0.2), desired_action_stddev=float(0.2))
memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)
critic = Critic(layer_norm=layer_norm)
actor = Actor(nb_actions, layer_norm=layer_norm)
agent = train_return(env=env,actor=actor, critic=critic, memory=memory, param_noise=param_noise)
max_action = env.action_space.high
def compute_traj(max_steps,early=False,done_early=False,**kwargs):
env.reset()
# This sets the init_qpos
if 'init_state' in kwargs:
env.env.init_qpos = kwargs['init_state']
# This sets the goal
if 'goal' in kwargs:
env.env.goal = kwargs['goal']
# This is the init_qvel
if 'init_velocity' in kwargs:
env.env.init_qvel = kwargs['init_velocity']
# State perturbation
if 'state_per' in kwargs:
state_per = kwargs['state_per']
# Velocity perturbation
if 'vel_per' in kwargs:
vel_per = kwargs['vel_per']
qpos = state_per+env.env.init_qpos
qvel = vel_per+env.env.init_qvel
qpos[-2:] = env.env.goal
qvel[-2:] = 0.
env.env.set_state(qpos,qvel)
ob = env.env._get_obs()
traj = [ob]
reward = 0
iters = 0
closest = np.inf
total_theta1 = 0.
total_theta2 = 0.
pt1 = np.arccos(ob[0]) if ob[2] > 0 else np.pi + np.arccos(ob[0])
pt2 = np.arccos(ob[1]) if ob[3] > 0 else np.pi +np.arccos(ob[1])
for _ in range(max_steps):
action, _ = agent.pi(ob, apply_noise=False, compute_Q=True)
ob, r, done, additional_data = env.step(max_action * action)
if early and np.linalg.norm(env.env.get_body_com("fingertip")\
-env.env.get_body_com("target")) < 0.1:
break
nt1 = np.arccos(ob[0]) if ob[2] > 0 else np.pi + np.arccos(ob[0])
nt2 = np.arccos(ob[1]) if ob[3] > 0 else np.pi + np.arccos(ob[1])
total_theta1 += nt1 - pt1
total_theta2 += nt2 - pt2
pt1 = nt1
pt2 = nt2
if -additional_data['reward_dist']< closest:
closest = -additional_data['reward_dist']
if done_early and done:
break
reward += r
traj.append(ob)
iters+=1.
additional_data = {}
additional_data['reward']=reward
additional_data['iters'] = iters
additional_data['closest'] = closest
additional_data['tot_theta1'] = np.abs(total_theta1/(2*np.pi))
additional_data['tot_theta2'] = np.abs(total_theta2/(2*np.pi))
return traj, additional_data
# ------------------------------------------------------------------------------
from active_testing import pred_node, max_node, min_node, test_module
from active_testing.utils import sample_from
rand_nums = [3547645943,
3250606528,
2906727341,
772456798,
2103980956,
2264249721,
1171067901,
3643734338,
854527104,
260127400,
578423204,
3152488971,
261317259,
2798623267,
3165387405]
bounds = [(-0.2, 0.2)] * 2 # Bounds on the goal
bounds.append((-0.1, 0.1)) # Bounds on the state perturbations
bounds.append((-0.1, 0.1)) # Bounds on the state perturbations
bounds.append((-0.005, 0.005)) # Bounds on the velocity perturbations
bounds.append((-0.005, 0.005)) # Bounds on the velocity perturbations
def sut(max_steps,x0,early=False, done_early=False):
goal = np.array(x0[0:2])
state_per = np.zeros(4)
state_per[0:2] += x0[2:4]
vel_per = np.zeros(4)
vel_per[0:2] += x0[4:6]
return compute_traj(max_steps,early, done_early,goal=goal, state_per=state_per,
vel_per=vel_per)
# Requirement 1: Find the initial state, goal state that minimizes the reward
# We need only one node for the reward. The reward is a smooth function
# given that the closed loop system is deterministic
smooth_details_r1 = []
random_details_r1 = []
# This set assumes random sampling and checking
for r in rand_nums:
|
np.random.seed(r)
|
numpy.random.seed
|
import pytest
import numpy as np
from numpy import array as ar
from irmetrics.io import ensure_inputs
# Identity shortcut
_id = ar([[1]])
_K = 20
@pytest.mark.parametrize("y_pred, y_pred_ex", [
(1, _id), # scalar -> [1, 1]
([1], _id), # [1] -> [1, 1]
([1, 2, 3, 4], ar([[1, 2, 3, 4]])), # [n] -> [1, n]
([[1]], _id), # [1, 1] -> [1, 1]
([[1, 2]], ar([[1, 2]])), # [1, 2] -> [1, 2]
([[1], [2]],
|
ar([[1], [2]])
|
numpy.array
|
#!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
# Secondary Structure
COIL = ['S', 'T', ' ', '_'] # ' ' == '_'
HELIX = ['H', 'G', 'I']
STRAND = ['E', 'B']
SS8toSS3 = {'S': 'C', 'T': 'C', ' ': 'C', '_': 'C',
'H': 'H', 'G': 'H', 'I': 'H', 'E': 'E', 'B': 'E'}
# SS: C, H, E
SS3_dict = {'C': 0, 'H': 1, 'E': 2}
SS8_dict = {'S': 0, 'T': 1, ' ': 2, '_': 2,
'H': 3, 'G': 4, 'I': 5, 'E': 6, 'B': 7}
SS8_str = 'ST_HGIEB'
SS3_str = 'CHE'
SA3_str = 'BIE'
# SA: B, I, E
SA3_dict = {'B': 0, 'I': 1, 'E': 2}
def read_results_ss(target, seq, pred_dir):
Ypred_ss8_file = pred_dir + '/Ypred_ss8.npy'
Ypred_ss8 = np.load(Ypred_ss8_file)
num_res = Ypred_ss8.shape[0]
Ypred_ss3 = np.zeros([num_res, 3], dtype=np.float32)
Ypred_ss3[:, 0] = Ypred_ss8[:, 0] + Ypred_ss8[:, 1] + Ypred_ss8[:, 2]
Ypred_ss3[:, 1] = Ypred_ss8[:, 3] + Ypred_ss8[:, 4] + Ypred_ss8[:, 5]
Ypred_ss3[:, 2] = Ypred_ss8[:, 6] + Ypred_ss8[:, 7]
predfile = pred_dir + '/' + target+'.ss8'
fp_ss8 = open(predfile, 'w')
line_out = '#resudie_idx AA SS8 S T _ H G I E B \n'
fp_ss8.write(line_out)
for i in range(0, num_res):
j = Ypred_ss8[i].argmax()
ss8 = SS8_str[j]
line_out = '%4d %s %s' % (i+1, seq[i], ss8)
for j in range(0, 8):
line_out += ' %6.4f' % (Ypred_ss8[i][j])
line_out += '\n'
fp_ss8.write(line_out)
fp_ss8.close()
predfile = pred_dir + '/' + target+'.ss3'
fp_ss3 = open(predfile, 'w')
line_out = '#resudie_idx AA SS3 C H E \n'
fp_ss3.write(line_out)
for i in range(0, num_res):
j = Ypred_ss3[i].argmax()
ss3 = SS3_str[j]
line_out = '%4d %s %s' % (i+1, seq[i], ss3)
for j in range(0, 3):
line_out += ' %6.4f' % (Ypred_ss3[i][j])
line_out += '\n'
fp_ss3.write(line_out)
fp_ss3.close()
return Ypred_ss8, Ypred_ss3
def read_results_sa(target, seq, pred_dir):
Ypred_sa3_file = pred_dir + '/Ypred_sa3.npy'
Ypred_rsa_file = pred_dir + '/Ypred_rsa.npy'
Ypred_sa3 = np.load(Ypred_sa3_file)
Ypred_rsa = np.load(Ypred_rsa_file)
num_res = Ypred_sa3.shape[0]
predfile = pred_dir + '/' + target+'.sa'
fp_sa = open(predfile, 'w')
line_out = '#resudie_idx AA RSA SA3 B I E \n'
fp_sa.write(line_out)
for i in range(0, num_res):
j = Ypred_sa3[i].argmax()
sa3 = SA3_str[j]
line_out = '%4d %s %6.4f %s' % (i+1, seq[i], Ypred_rsa[i][0], sa3)
for j in range(0, 3):
line_out += ' %6.4f' % (Ypred_sa3[i][j])
line_out += '\n'
fp_sa.write(line_out)
fp_sa.close()
return Ypred_sa3, Ypred_rsa
def read_results_dom(target, seq, pred_dir, conf_cut=1.3, NC=40):
Ypred_dom_file = pred_dir + '/Ypred_dom.npy'
Ypred_dom =
|
np.load(Ypred_dom_file)
|
numpy.load
|
'''
Tests for bipartitepandas
DATE: March 2021
'''
import pytest
import numpy as np
import pandas as pd
import bipartitepandas as bpd
import pickle
###################################
##### Tests for BipartiteBase #####
###################################
def test_refactor_1():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_2():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2. Time has jumps.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_3():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 2
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 2
def test_refactor_4():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1 -> 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 3})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_5():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 1 -> 0
# Time 1 -> 2 -> 4
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_6():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 2 -> 3 -> 5
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 5})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_7():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_8():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 0 -> 1
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 0
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_9():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 0
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_10():
# 1 mover between firms 0 and 1, 1 between firms 1 and 2, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_11():
# 1 mover between firms 0 and 1 and 2 and 3, 1 between firms 1 and 2, and 1 stayer at firm 2.
# Check going to event study and back to long, for data where movers have extended periods where they stay at the same firm
worker_data = []
# Firm 0 -> 1 -> 2 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 3})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 4})
worker_data.append({'i': 0, 'j': 2, 'y': 0.75, 't': 5})
worker_data.append({'i': 0, 'j': 3, 'y': 1.5, 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df).clean_data().get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 0
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 0.5
assert stayers.iloc[0]['y2'] == 0.5
assert stayers.iloc[0]['t1'] == 4
assert stayers.iloc[0]['t2'] == 4
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['j1'] == 2
assert stayers.iloc[1]['j2'] == 2
assert stayers.iloc[1]['y1'] == 1.
assert stayers.iloc[1]['y2'] == 1.
assert stayers.iloc[1]['t1'] == 1
assert stayers.iloc[1]['t2'] == 1
assert stayers.iloc[2]['i'] == 2
assert stayers.iloc[2]['j1'] == 2
assert stayers.iloc[2]['j2'] == 2
assert stayers.iloc[2]['y1'] == 1.
assert stayers.iloc[2]['y2'] == 1.
assert stayers.iloc[2]['t1'] == 2
assert stayers.iloc[2]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2.
assert movers.iloc[0]['y2'] == 1.
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1.
assert movers.iloc[1]['y2'] == 0.5
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 3
assert movers.iloc[2]['i'] == 0
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 3
assert movers.iloc[2]['y1'] == 0.75
assert movers.iloc[2]['y2'] == 1.5
assert movers.iloc[2]['t1'] == 5
assert movers.iloc[2]['t2'] == 6
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j1'] == 1
assert movers.iloc[3]['j2'] == 2
assert movers.iloc[3]['y1'] == 1.
assert movers.iloc[3]['y2'] == 1.
assert movers.iloc[3]['t1'] == 1
assert movers.iloc[3]['t2'] == 2
bdf = bdf.get_long()
for row in range(len(bdf)):
df_row = df.iloc[row]
bdf_row = bdf.iloc[row]
for col in ['i', 'j', 'y', 't']:
assert df_row[col] == bdf_row[col]
def test_refactor_12():
# Check going to event study and back to long
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
assert len(bdf) == len(bdf.get_es().get_long())
def test_contiguous_fids_11():
# Check contiguous_ids() with firm ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 3, 'y': 1., 't': 2})
# Firm 3 -> 3
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_contiguous_wids_12():
# Check contiguous_ids() with worker ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
# Firm 2 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_contiguous_cids_13():
# Check contiguous_ids() with cluster ids.
worker_data = []
# Firm 0 -> 1
# Cluster 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 2})
# Firm 1 -> 2
# Cluster 2 -> 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 1})
# Firm 2 -> 2
# Cluster 1 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 1})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['g1'] == 0
assert stayers.iloc[0]['g2'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['g1'] == 0
assert movers.iloc[0]['g2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['g1'] == 1
assert movers.iloc[1]['g2'] == 0
def test_contiguous_cids_14():
# Check contiguous_ids() with cluster ids.
worker_data = []
# Firm 0 -> 1
# Cluster 2 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 1})
# Firm 1 -> 2
# Cluster 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 2})
# Firm 2 -> 2
# Cluster 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es().original_ids()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['original_g1'] == 2
assert movers.iloc[0]['original_g2'] == 1
assert movers.iloc[0]['g1'] == 0
assert movers.iloc[0]['g2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['original_g1'] == 1
assert movers.iloc[1]['original_g2'] == 2
assert movers.iloc[1]['g1'] == 1
assert movers.iloc[1]['g2'] == 0
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['original_g1'] == 2
assert stayers.iloc[0]['original_g2'] == 2
assert stayers.iloc[0]['g1'] == 0
assert stayers.iloc[0]['g2'] == 0
def test_col_dict_15():
# Check that col_dict works properly.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
# Firm 2 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)]).rename({'j': 'firm', 'i': 'worker'}, axis=1)
bdf = bpd.BipartiteLong(data=df, col_dict={'j': 'firm', 'i': 'worker'})
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_worker_year_unique_16_1():
# Workers with multiple jobs in the same year, keep the highest paying, with long format. Testing 'max', 'sum', and 'mean' options, where options should not have an effect.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2 -> 3
# Time 1 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
# Firm 2 -> 1 -> 2
# Time 1 -> 1 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_worker_year_unique_16_2():
# Workers with multiple jobs in the same year, keep the highest paying, with long format. Testing 'max', 'sum' and 'mean' options, where options should have an effect.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2 -> 2 -> 3
# Time 1 -> 2 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
# Firm 2 -> 1 -> 2
# Time 1 -> 1 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLong(data=df.copy())
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
if how == 'max':
assert movers.iloc[3]['y'] == 1.5
elif how == 'sum':
assert movers.iloc[3]['y'] == 2.5
elif how == 'mean':
assert movers.iloc[3]['y'] == 1.25
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_worker_year_unique_16_3():
# Workers with multiple jobs in the same year, keep the highest paying, with collapsed long format. Testing 'max', 'sum', and 'mean' options, where options should have an effect. Using collapsed long data.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't1': 1, 't2': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't1': 2, 't2': 2})
# Firm 1 -> 2 -> 2 -> 3
# Time 1 -> 2 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't1': 2, 't2': 2})
# Worker 3
# Firm 2 -> 1
# Time 1 -> 1
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't1': 1, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLongCollapsed(data=df)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 1
assert stayers.iloc[0]['y'] == 1.5
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t1'] == 1
assert movers.iloc[2]['t2'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
if how == 'max':
assert movers.iloc[3]['y'] == 1.5
elif how == 'sum':
assert movers.iloc[3]['y'] == 2.5
elif how == 'mean':
assert movers.iloc[3]['y'] == 1.25
assert movers.iloc[3]['t1'] == 2
assert movers.iloc[3]['t2'] == 2
def test_worker_year_unique_16_4():
# Workers with multiple jobs in the same year, keep the highest paying, with event study format. Testing 'max', 'sum', and 'mean' options, where options should have an effect. NOTE: because of how data converts from event study to long (it only shifts period 2 (e.g. j2, y2) for the last row, as it assumes observations zigzag), it will only correct duplicates for period 1
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j1': 0, 'j2': 1, 'y1': 2., 'y2': 1., 't1': 1, 't2': 2})
# Worker 1
worker_data.append({'i': 1, 'j1': 1, 'j2': 2, 'y1': 0.5, 'y2': 1.5, 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j1': 1, 'j2': 2, 'y1': 0.75, 'y2': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j1': 2, 'j2': 1, 'y1': 1., 'y2': 2., 't1': 1, 't2': 2})
# Worker 3
worker_data.append({'i': 3, 'j1': 2, 'j2': 2, 't1': 1, 't2': 1, 'y1': 1., 'y2': 1.})
worker_data.append({'i': 3, 'j1': 2, 'j2': 2, 'y1': 1., 'y2': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 3, 'j1': 1, 'j2': 1, 'y1': 1.5, 'y2': 1.5, 't1': 1, 't2': 1})
worker_data.append({'i': 3, 'j1': 1, 'j2': 1, 'y1': 1.5, 'y2': 1.5, 't1': 2, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteEventStudy(data=df.copy(), include_id_reference_dict=True)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how})).original_ids()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['original_i'] == 3
assert stayers.iloc[0]['j1'] == 1
assert stayers.iloc[0]['j2'] == 1
assert stayers.iloc[0]['y1'] == 1.5
assert stayers.iloc[0]['y2'] == 1.5
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 1
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['original_i'] == 3
assert stayers.iloc[1]['j1'] == 1
assert stayers.iloc[1]['j2'] == 1
assert stayers.iloc[1]['y1'] == 1.5
assert stayers.iloc[1]['y2'] == 1.5
assert stayers.iloc[1]['t1'] == 2
assert stayers.iloc[1]['t2'] == 2
assert movers.iloc[0]['original_i'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['original_i'] == 1
assert movers.iloc[1]['i'] == 1
if how == 'max':
assert movers.iloc[1]['j1'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['j2'] == 1
assert movers.iloc[1]['y2'] == 2
elif how == 'sum':
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['y1'] == 1.25
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y2'] == 2.5
elif how == 'mean':
assert movers.iloc[1]['j1'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['j2'] == 1
assert movers.iloc[1]['y2'] == 2
assert movers.iloc[1]['t1'] == 1
assert movers.iloc[1]['t2'] == 2
def test_string_ids_17():
# String worker and firm ids.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_general_methods_18():
# Test some general methods, like n_workers/n_firms/n_clusters, included_cols(), drop(), and rename().
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 1})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 2})
# Worker 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
assert bdf.n_workers() == 3
assert bdf.n_firms() == 3
assert bdf.n_clusters() == 2
correct_cols = True
all_cols = bdf._included_cols()
for col in ['i', 'j', 'y', 't', 'g']:
if col not in all_cols:
correct_cols = False
break
assert correct_cols
bdf.drop('g1', axis=1, inplace=True)
assert 'g1' in bdf.columns and 'g2' in bdf.columns
bdf.drop('g', axis=1, inplace=True)
assert 'g1' not in bdf.columns and 'g2' not in bdf.columns
bdf.rename({'i': 'w'})
assert 'i' in bdf.columns
bdf['g1'] = 1
bdf['g2'] = 1
bdf.col_dict['g1'] = 'g1'
bdf.col_dict['g2'] = 'g2'
assert 'g1' in bdf.columns and 'g2' in bdf.columns
bdf.rename({'g': 'r'})
assert 'g1' not in bdf.columns and 'g2' not in bdf.columns
def test_save_19():
# Make sure changing attributes in a saved version does not overwrite values in the original.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
# Long
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Event study
bdf = bdf.gen_m(copy=False).get_es()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Collapsed long
bdf = bdf.gen_m(copy=False).get_long().get_collapsed_long()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Collapsed event study
bdf = bdf.gen_m(copy=False).get_es()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
def test_id_reference_dict_20():
# String worker and firm ids, link with id_reference_dict.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
id_reference_dict = bdf.id_reference_dict
merge_df = bdf.merge(id_reference_dict['i'], how='left', left_on='i', right_on='adjusted_ids_1').rename({'original_ids': 'original_i'})
merge_df = merge_df.merge(id_reference_dict['j'], how='left', left_on='j', right_on='adjusted_ids_1').rename({'original_ids': 'original_j'})
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] == 1]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'a'
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'b'
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'c'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'c'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_id_reference_dict_22():
# String worker and firm ids, link with id_reference_dict. Testing original_ids() method.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
merge_df = bdf.original_ids()
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] == 1]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'a'
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'b'
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'c'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'c'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_id_reference_dict_23():
# String worker and firm ids, link with id_reference_dict. Testing original_ids() method where there are multiple steps of references.
worker_data = []
# Worker 'a'
# Firm a -> b -> c turns into 0 -> 1 -> 2 turns into 0 -> 1
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
worker_data.append({'i': 'a', 'j': 'c', 'y': 1.5, 't': 3})
# Worker 'b'
# Firm b -> d turns into 1 -> 3 turns into 0 -> 2
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'd', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'c', 'y': 0.5, 't': 2})
# Worker 'd'
# Firm b -> d turns into 1 -> 3 turns into 0 -> 2
worker_data.append({'i': 'd', 'j': 'd', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'd', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
bdf = bdf[bdf['j'] > 0]
bdf = bdf.clean_data(bpd.clean_params({'connectedness': None}))
merge_df = bdf.original_ids()
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'b'
assert movers.iloc[0]['y'] == 1
assert movers.iloc[0]['t'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'c'
assert movers.iloc[1]['y'] == 1.5
assert movers.iloc[1]['t'] == 3
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 0
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'd'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 0
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'd'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_fill_time_24_1():
# Test .fill_time() method for long format, with no data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df['m'] == 0]
movers = new_df[new_df['m'] == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
def test_fill_time_24_2():
# Test .fill_time() method for long format, with 1 row of data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
# Time 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df.groupby('i')['m'].transform('max') == 0]
movers = new_df[new_df.groupby('i')['m'].transform('max') == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == - 1
assert np.isnan(movers.iloc[3]['y'])
assert np.isnan(movers.iloc[3]['m'])
assert movers.iloc[4]['i'] == 1
assert movers.iloc[4]['j'] == 2
assert movers.iloc[4]['y'] == 1
def test_fill_time_24_3():
# Test .fill_time() method for long format, with 2 rows of data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
# Time 1 -> 4
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 4})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df.groupby('i')['m'].transform('max') == 0]
movers = new_df[new_df.groupby('i')['m'].transform('max') == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == - 1
assert np.isnan(movers.iloc[3]['y'])
assert np.isnan(movers.iloc[3]['m'])
assert movers.iloc[4]['i'] == 1
assert movers.iloc[4]['j'] == - 1
assert np.isnan(movers.iloc[4]['y'])
assert np.isnan(movers.iloc[4]['m'])
assert movers.iloc[5]['i'] == 1
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
def test_uncollapse_25():
# Convert from collapsed long to long format.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't1': 1, 't2': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't1': 2, 't2': 2})
# Worker 1
# Time 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't1': 2, 't2': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't1': 1, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLongCollapsed(data=df).uncollapse()
assert bdf.iloc[0]['i'] == 0
assert bdf.iloc[0]['j'] == 0
assert bdf.iloc[0]['y'] == 2
assert bdf.iloc[0]['t'] == 1
assert bdf.iloc[1]['i'] == 0
assert bdf.iloc[1]['j'] == 1
assert bdf.iloc[1]['y'] == 1
assert bdf.iloc[1]['t'] == 2
assert bdf.iloc[2]['i'] == 1
assert bdf.iloc[2]['j'] == 1
assert bdf.iloc[2]['y'] == 1
assert bdf.iloc[2]['t'] == 1
assert bdf.iloc[3]['i'] == 1
assert bdf.iloc[3]['j'] == 1
assert bdf.iloc[3]['y'] == 1
assert bdf.iloc[3]['t'] == 2
assert bdf.iloc[4]['i'] == 1
assert bdf.iloc[4]['j'] == 2
assert bdf.iloc[4]['y'] == 1
assert bdf.iloc[4]['t'] == 2
assert bdf.iloc[5]['i'] == 1
assert bdf.iloc[5]['j'] == 2
assert bdf.iloc[5]['y'] == 1.5
assert bdf.iloc[5]['t'] == 2
assert bdf.iloc[6]['i'] == 1
assert bdf.iloc[6]['j'] == 3
assert bdf.iloc[6]['y'] == 0.5
assert bdf.iloc[6]['t'] == 2
assert bdf.iloc[7]['i'] == 3
assert bdf.iloc[7]['j'] == 2
assert bdf.iloc[7]['y'] == 1
assert bdf.iloc[7]['t'] == 1
assert bdf.iloc[8]['i'] == 3
assert bdf.iloc[8]['j'] == 2
assert bdf.iloc[8]['y'] == 1
assert bdf.iloc[8]['t'] == 2
assert bdf.iloc[9]['i'] == 3
assert bdf.iloc[9]['j'] == 1
assert bdf.iloc[9]['y'] == 1.5
assert bdf.iloc[9]['t'] == 1
assert bdf.iloc[10]['i'] == 3
assert bdf.iloc[10]['j'] == 1
assert bdf.iloc[10]['y'] == 1.5
assert bdf.iloc[10]['t'] == 2
def test_keep_ids_26():
# Keep only given ids.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
all_fids = bdf['j'].unique()
ids_to_keep = all_fids[: len(all_fids) // 2]
bdf_keep = bdf.get_es().keep_ids('j', ids_to_keep).get_long()
assert set(bdf_keep['j']) == set(ids_to_keep)
# Make sure long and es give same results
bdf_keep2 = bdf.keep_ids('j', ids_to_keep)
assert len(bdf_keep) == len(bdf_keep2)
def test_drop_ids_27():
# Drop given ids.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
all_fids = bdf['j'].unique()
ids_to_drop = all_fids[: len(all_fids) // 2]
bdf_keep = bdf.get_es().drop_ids('j', ids_to_drop).get_long()
assert set(bdf_keep['j']) == set(all_fids).difference(set(ids_to_drop))
# Make sure long and es give same results
bdf_keep2 = bdf.drop_ids('j', ids_to_drop)
assert len(bdf_keep) == len(bdf_keep2)
def test_min_obs_firms_28_1():
# List only firms that meet a minimum threshold of observations.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 250
# First, manually estimate the valid set of firms
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_obs_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_obs_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_obs_firms_28_2():
# List only firms that meet a minimum threshold of observations.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 60
# First, manually estimate the valid set of firms
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_obs_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_obs_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_obs_frame_29_1():
# Keep only firms that meet a minimum threshold of observations.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 250
# First, manually estimate the new frame
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
new_frame.reset_index(drop=True, inplace=True)
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_obs_frame(threshold)
new_frame3 = bdf.get_es().min_obs_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
def test_min_obs_frame_29_2():
# Keep only firms that meet a minimum threshold of observations.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 60
# First, manually estimate the new frame
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_obs_frame(threshold)
new_frame3 = bdf.get_es().min_obs_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
def test_min_workers_firms_30():
# List only firms that meet a minimum threshold of workers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng':
|
np.random.default_rng(1234)
|
numpy.random.default_rng
|
#
# cam_hole_filling_analysis.py
#
# Description: Under construction
import netCDF4
import numpy as np
import matplotlib.pyplot as plt
outdir = "./"
out_filename_tavg = "clip_tend_tavg"
out_filename_zavg = "clip_tend_zavg"
out_filetype =".png"
l_show_plots = False
### Function Definitions ###
#----------------------------------------------------------
def get_2d_profile(nc_file, var_name):
#
# Reads a 2d profile (time,lev) from a Scam data file
#
#----------------------------------------------------------
import numpy as np
var = nc_file.variables[var_name]
var = np.squeeze(var)
return var
#----------------------------------------------------------
#----------------------------------------------------------
def plot_z_profile(ax, data_label, data, lev,):
#
# Creates a time average of a 2 dimensional (time, lev) array
# and plots the resulting profile.
#
#----------------------------------------------------------
import numpy as np
ax.plot(data,lev,'b-',[0,0],[0,np.max(lev)],'r--')
ax.set_xlabel(data_label)
ax.set_ylabel('Altitude [m]')
#----------------------------------------------------------
#----------------------------------------------------------
def plot_t_profile(title, data_label, data, out_filepath):
#
# Creates a time average of a 2 dimensional (time, lev) array
# and plots the resulting profile.
#
#----------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
fig.text(.5,.95,title,horizontalalignment='center',)
plt.plot(range(2001), data)
plt.ylabel(data_label)
plt.xlabel('Time [days]')
fig.savefig(out_filepath)
plt.close()
#----------------------------------------------------------
#----------------------------------------------------------
def calc_invers_dzt(zm):
#
#
#----------------------------------------------------------
invers_dzt = [[0 for x in range(nlev)] for x in range(ntimes)]
for i in range(ntimes):
for j in range(nlev):
invers_dzt[i][j] = 1./(zm[i,j+1]-zm[i,j])
#end for j in range(1,len(zm))
#end for i in range(1,len(times))
invers_dzt = np.array(invers_dzt)
return invers_dzt
#----------------------------------------------------------
#----------------------------------------------------------
def vertical_integral(field, rho_ds, invrs_dz):
#
#
#----------------------------------------------------------
import numpy as np
vertical_integral = range(len(field))
vertical_integral = np.sum( field * rho_ds / invrs_dz )
return vertical_integral
#----------------------------------------------------------
#----------------------------------------------------------
def vertical_average(field, rho_ds, invrs_dz):
#
#
#----------------------------------------------------------
import numpy as np
field_zavg = [0 for x in range(ntimes)]
dummy_one = [1 for x in range(nlev)]
for i in range(1,ntimes):
numer = vertical_integral(field[i,:], rho, invers_dzt)
denom = vertical_integral(dummy_one, rho, invers_dzt)
field_zavg[i] = numer/denom
# end for i in range(1,ntimes)
field_zavg = np.array(field_zavg)
return field_zavg
#----------------------------------------------------------
### Main script starts here ###
# CAM data file
nc_file_path = './camclubb707_L30_T1200.cam.h0.0001-01-01-00000.nc'
nc = netCDF4.Dataset(nc_file_path)
# Grid cell altitudes
lev = nc.variables['lev']
nlev = len(lev)
times = nc.variables['time']
ntimes = len(times)
# Calculate the inverse of each grid cell height (for averaging over z)
rho = get_2d_profile(nc,'RHO_DS_HF')
tmp = [[0 for x in range(nlev)] for x in range(ntimes)]
for i in range(ntimes):
for j in range(nlev):
tmp[i][j] = rho[i,j+1]
#end for
#end for
rho = np.array(tmp)
zm = get_2d_profile(nc,'ZM_HF')
invers_dzt = calc_invers_dzt(zm)
ice_clip_tend = get_2d_profile(nc,'INEGCLPTEND')
ice_clip_tend_tavg = np.average(ice_clip_tend, axis=0)
liq_clip_tend = get_2d_profile(nc,'LNEGCLPTEND')
liq_clip_tend_tavg = np.average(liq_clip_tend, axis=0)
vap_clip_tend = get_2d_profile(nc,'VNEGCLPTEND')
vap_clip_tend_tavg = np.average(vap_clip_tend, axis=0)
# Plot profiles
title='CAM-CLUBB-SILHS clipping budgets'
fig = plt.figure()
fig.text(.5,.95,title,horizontalalignment='center',)
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
ax4 = plt.subplot2grid((2, 2), (1, 1))
plot_z_profile(ax1,'INEGCLPTEND', ice_clip_tend_tavg,lev)
plot_z_profile(ax2,'LNEGCLPTEND', liq_clip_tend_tavg,lev)
plot_z_profile(ax3,'VNEGCLPTEND', vap_clip_tend_tavg,lev)
plot_z_profile(ax4,'Sum', ice_clip_tend_tavg+liq_clip_tend_tavg+vap_clip_tend_tavg,lev)
fig.savefig(outdir+out_filename_tavg+out_filetype)
plt.close()
# Plot timelines of height averages
levavg_sum = vertical_average(ice_clip_tend+liq_clip_tend+vap_clip_tend, rho, invers_dzt)
levavg_vap = vertical_average(vap_clip_tend, rho, invers_dzt)
levavg_liq = vertical_average(liq_clip_tend, rho, invers_dzt)
levavg_ice = vertical_average(ice_clip_tend, rho, invers_dzt)
print("---- Total ----")
print("Sum: "+str(np.sum(levavg_sum)))
print("Vap: "+str(np.sum(levavg_vap)))
print("Liq: "+str(np.sum(levavg_liq)))
print("Ice: "+str(np.sum(levavg_ice)))
title = 'Sum of Vapor/Ice/Liquid mixing ratio tendencies due to hole filling (total)'
label = 'Mixing ratios (Sum)'
out_filepath = outdir+out_filename_zavg+"_sum"+out_filetype
plot_t_profile(title, label, levavg_sum, out_filepath)
title = 'Vapor mixing ratio tendencies due to hole filling (total)'
label = 'Vapor mixing ratio'
out_filepath = outdir+out_filename_zavg+"_vap"+out_filetype
plot_t_profile(title, label, levavg_vap, out_filepath)
title = 'Cloud liquid mixing ratio tendencies due to hole filling (total)'
label = 'Cloud liquid mixing ratio'
out_filepath = outdir+out_filename_zavg+"_liq"+out_filetype
plot_t_profile(title, label, levavg_liq, out_filepath)
title = 'Cloud ice mixing ratio tendencies due to hole filling (total)'
label = 'Cloud ice mixing ratio'
out_filepath = outdir+out_filename_zavg+"_ice"+out_filetype
plot_t_profile(title, label, levavg_ice, out_filepath)
# Plot profiles of clipping tendencies due to vertical hole filling
ice_clip_tend_vhf = get_2d_profile(nc,'INEGCLPTEND_VHF')
ice_clip_tend_vhf_tavg = np.average(ice_clip_tend_vhf, axis=0)
liq_clip_tend_vhf = get_2d_profile(nc,'LNEGCLPTEND_VHF')
liq_clip_tend_vhf_tavg = np.average(liq_clip_tend_vhf, axis=0)
fig = plt.figure()
fig.text(.5,.95,title,horizontalalignment='center',)
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
plot_z_profile(ax1,'INEGCLPTEND_VHF', ice_clip_tend_vhf_tavg,lev)
plot_z_profile(ax2,'LNEGCLPTEND_VHF', liq_clip_tend_vhf_tavg,lev)
plot_z_profile(ax3,'Ice+Liq VHF', ice_clip_tend_vhf_tavg+liq_clip_tend_vhf_tavg,lev)
fig.savefig(outdir+out_filename_tavg+"_vhf"+out_filetype)
plt.close()
levavg_sum_vhf = vertical_average(ice_clip_tend_vhf+liq_clip_tend_vhf, rho, invers_dzt)
levavg_liq_vhf = vertical_average(liq_clip_tend_vhf, rho, invers_dzt)
levavg_ice_vhf = vertical_average(ice_clip_tend_vhf, rho, invers_dzt)
print("---- VertHF ----")
print("Sum: "+str(np.sum(levavg_sum_vhf)))
print("Liq: "+str(np.sum(levavg_liq_vhf)))
print("Ice: "+str(np.sum(levavg_ice_vhf)))
title = 'Sum of Vapor/Ice/Liquid mixing ratio tendencies due to vertical hole filling'
label = 'Mixing ratios (Sum)'
out_filepath = outdir+out_filename_zavg+"_sum_vhf"+out_filetype
plot_t_profile(title, label, levavg_sum_vhf, out_filepath)
title = 'Cloud liquid mixing ratio tendencies due to vertical hole filling'
label = 'Cloud liquid mixing ratio'
out_filepath = outdir+out_filename_zavg+"_liq_vhf"+out_filetype
plot_t_profile(title, label, levavg_liq_vhf, out_filepath)
title = 'Cloud ice mixing ratio tendencies due to vertical hole filling'
label = 'Cloud ice mixing ratio'
out_filepath = outdir+out_filename_zavg+"_ice_vhf"+out_filetype
plot_t_profile(title, label, levavg_ice_vhf, out_filepath)
# Plot profiles of clipping tendencies due to water vapor hole filling
ice_clip_tend_whf = get_2d_profile(nc,'INEGCLPTEND_WHF')
ice_clip_tend_whf_tavg = np.average(ice_clip_tend_whf, axis=0)
liq_clip_tend_whf = get_2d_profile(nc,'LNEGCLPTEND_WHF')
liq_clip_tend_whf_tavg = np.average(liq_clip_tend_whf, axis=0)
fig = plt.figure()
fig.text(.5,.95,title,horizontalalignment='center',)
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
plot_z_profile(ax1,'INEGCLPTEND_WHF', ice_clip_tend_whf_tavg,lev)
plot_z_profile(ax2,'LNEGCLPTEND_WHF', liq_clip_tend_whf_tavg,lev)
plot_z_profile(ax3,'Ice+Liq WHF', ice_clip_tend_whf_tavg+liq_clip_tend_whf_tavg,lev)
fig.savefig(outdir+out_filename_tavg+"_whf"+out_filetype)
plt.close()
levavg_sum_whf = vertical_average(ice_clip_tend_whf+liq_clip_tend_whf, rho, invers_dzt)
levavg_liq_whf = vertical_average(liq_clip_tend_whf, rho, invers_dzt)
levavg_ice_whf = vertical_average(ice_clip_tend_whf, rho, invers_dzt)
print("---- WVHF ----")
print("Sum: "+str(np.sum(levavg_sum_whf)))
print("Liq: "+str(np.sum(levavg_liq_whf)))
print("Ice: "+str(np.sum(levavg_ice_whf)))
title = 'Sum of Vapor/Ice/Liquid mixing ratio tendencies due to water vapor hole filling'
label = 'Mixing ratios (Sum)'
out_filepath = outdir+out_filename_zavg+"_sum_whf"+out_filetype
plot_t_profile(title, label, levavg_sum_whf, out_filepath)
title = 'Cloud liquid mixing ratio tendencies due to water vapor hole filling'
label = 'Cloud liquid mixing ratio'
out_filepath = outdir+out_filename_zavg+"_liq_whf"+out_filetype
plot_t_profile(title, label, levavg_liq_whf, out_filepath)
title = 'Cloud ice mixing ratio tendencies due to water vapor hole filling'
label = 'Cloud ice mixing ratio'
out_filepath = outdir+out_filename_zavg+"_ice_whf"+out_filetype
plot_t_profile(title, label, levavg_ice_whf, out_filepath)
# Plot profiles of clipping tendencies due to clipping
ice_clip_tend_clp = get_2d_profile(nc,'INEGCLPTEND_CLP')
ice_clip_tend_clp_tavg = np.average(ice_clip_tend_clp, axis=0)
liq_clip_tend_clp = get_2d_profile(nc,'LNEGCLPTEND_CLP')
liq_clip_tend_clp_tavg = np.average(liq_clip_tend_clp, axis=0)
fig = plt.figure()
fig.text(.5,.95,title,horizontalalignment='center',)
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
plot_z_profile(ax1,'INEGCLPTEND_CLP', ice_clip_tend_clp_tavg,lev)
plot_z_profile(ax2,'LNEGCLPTEND_CLP', liq_clip_tend_clp_tavg,lev)
plot_z_profile(ax3,'Ice+Liq CLP', ice_clip_tend_clp_tavg+liq_clip_tend_clp_tavg,lev)
fig.savefig(outdir+out_filename_tavg+"_clp"+out_filetype)
plt.close()
levavg_sum_clp = vertical_average(ice_clip_tend_clp+liq_clip_tend_clp, rho, invers_dzt)
levavg_liq_clp = vertical_average(liq_clip_tend_clp, rho, invers_dzt)
levavg_ice_clp = vertical_average(ice_clip_tend_clp, rho, invers_dzt)
print("---- CLIP ----")
print("Sum: "+str(np.sum(levavg_sum_clp)))
print("Liq: "+str(np.sum(levavg_liq_clp)))
print("Ice: "+str(
|
np.sum(levavg_ice_clp)
|
numpy.sum
|
import random
import math
import itertools
import numpy as np
from cvxpy import *
def _solve(M, omega):
n_1 = M.shape[0]
n_2 = M.shape[1]
#X_ = Semidef(n_1 + n_2)
X_ = Variable((n_1 + n_2, n_1 + n_2), PSD=True)
objective = Minimize(trace(X_))
#constraints = [(X_) == (X_.T)] # add symmetric constraint.
constraints=[]
for i, j in omega:
constr = (X_[i, j + n_1] == M[i, j])
constraints.append(constr)
problem = Problem(objective, constraints)
problem.solve(solver=CVXOPT)
print("STATUS :", problem.status)
print("OPTIMAL VALUE:", problem.value)
X0 = X_.value
# check optimizer's solution is symmetric.
print("|X0-X0.T|_F :", np.linalg.norm(np.subtract(X0, X0.T), "fro"))
return X_.value[:n_1, n_1:]
def _generate_omega(n_1, n_2, m):
return random.sample([(i, j) for i in range(n_1) for j in range(n_2)], m)
def _get_mask_matrix(n_1, n_2, omega):
"""
If we observed entry (i, j) of matrix M, the entry of mask matrix is 1,
Otherwise 0.
"""
mask = np.zeros((n_1, n_2), dtype=np.int8)
for i, j in omega:
mask[i, j] = 1
return mask
def _get_abs_max_from_matrix(M):
return np.max(np.absolute(M))
def main(n_1, n_2, r, m):
print("#row of M :", n_1)
print("#column of M :", n_2)
print("#sample :", m)
L = np.random.randn(n_1, r)
R = np.random.randn(n_2, r)
M = np.dot(L, R.T)
M_abs_max = _get_abs_max_from_matrix(M)
# print("RANK of M :", np.linalg.matrix_rank(M))
print("|M|_* :", np.linalg.norm(M, "nuc"))
M_rank = np.linalg.matrix_rank(M)
print("RANK of M :", M_rank)
U, S, V_T = np.linalg.svd(M, full_matrices=False)
print(S)
omega = _generate_omega(n_1, n_2, m)
mask = _get_mask_matrix(n_1, n_2, omega)
# M_ is for training data removing the data.
# This block should not affect the result.
# Just for defensive programming.
# M_ = np.random.uniform(-M_abs_max, M_abs_max, M.shape)
M_ = M.copy()
np.place(M_, 1 - mask, M_abs_max * M_abs_max)
X = _solve(M_, omega)
X_rank = np.linalg.matrix_rank(X)
print("RANK of X :", X_rank)
print("|X|_* :", np.linalg.norm(X, "nuc"))
U, S, V_T = np.linalg.svd(X, full_matrices=False)
print(S)
E = np.subtract(M, X)
E_train = E.copy()
|
np.place(E_train, 1 - mask, 0)
|
numpy.place
|
"""
main page
"""
import os
os.chdir("F:/01Algorithms/HAPI/Web_application")
# import sys
# sys.path.append("HAPI/function")
import gdal
# to add the gdal data environment variable
gdal.SetConfigOption("GDAL_DATA","E:/ProgramData/Conda3/Library/share/gdal")
# to add the proj data environment variable
gdal.SetConfigOption("PROJ_data","E:/ProgramData/Conda3/Library/share/epsg_csv")
#%% Library
import numpy as np
import pandas as pd
import datetime as dt
from math import pi
from io import StringIO
from io import BytesIO
import base64
import geopandas as gpd
from shapely.geometry import Polygon
from fiona.crs import from_epsg
import osr
from collections import OrderedDict
#import pysal as ps
#from datetime import datetime,date
#import time
# bokeh
from bokeh.layouts import layout, widgetbox, row, column, gridplot #,Widget
from bokeh.models.widgets import (Panel, Button, TextInput, Div, Tabs ,Slider,Select ,
RadioButtonGroup, #DataTable, DateFormatter, TableColumn,
# DateRangeSlider, DateFormatter, DataTable, TableColumn #,DatePicker, NumberFormatter
)
from bokeh.models import (ColumnDataSource, CustomJS, GMapPlot,GMapOptions,
LinearAxis, Range1d, HoverTool, PanTool, WheelZoomTool,
BoxSelectTool, ColorBar,LogColorMapper,ResetTool,BoxZoomTool, #SaveTool,
CrosshairTool,
# NumeralTickFormatter, #PrintfTickFormatter, #BoxSelectionOverlay
Circle, Square, Title, Legend) #, Slider,GeoJSONDataSource, PreviewSaveTool,
from bokeh.plotting import figure#, gmap
from bokeh.io import curdoc , show
#from bokeh.palettes import YlOrRd6 as palette
#from bokeh.palettes import RdYlGn10 as palette
from bokeh.models.glyphs import Patches #, Line, Circle
#from bokeh.core import templates
#from bokeh.resources import CDN
#from bokeh.embed import components, autoload_static, autoload_server
#from bokeh.plotting import save
# functions
#import DHBV_functions
import Hapi.raster as Raster
import Hapi.vector as Vector
#import WeirdFn
import Hapi.weirdFn as WeirdFn
import Hapi.distparameters as DP
#import DistParameters
from Hapi.run import RunHAPIwithLake
# import Hapi.Wrapper as Wrapper
import Hapi.performancecriteria as PC
#import plotting_functions as plf
import Hapi.java_functions as Java
import Hapi.inputs as IN
import Hapi.statisticaltools as st
#from inputs import Inputs
GoogleFile = open("HAPI/static/Google API.txt")
GoogleAPI = GoogleFile.read()
#%% Run the model
# Read the input data
data_file= 'HAPI/static/input_data/' # Name of the output file
s=dt.datetime(2012,6,14,19,0,0)
e=dt.datetime(2013,12,23,0,0,0)
index=pd.date_range(s,e,freq="1H")
lake_data=pd.DataFrame(index=index)
# Read data from the output file
lake_data['et']=np.loadtxt(data_file+"lake/" + "et.txt")
lake_data['t']=
|
np.loadtxt(data_file+"lake/" + "temp.txt")
|
numpy.loadtxt
|
import pickle
import string
from argparse import ArgumentParser
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
import numpy.linalg as LA
import prody
import torch
from Bio import SeqIO
from einops import repeat
from sidechainnet.utils.measure import get_seq_coords_and_angles
from sidechainnet.utils.sequence import ProteinVocabulary
from torch.utils.data import DataLoader, Dataset
from alphafold2_pytorch.constants import DISTOGRAM_BUCKETS
from tqdm import tqdm
try:
import pytorch_lightning as pl
LightningDataModule = pl.LightningDataModule
except ImportError:
LightningDataModule = object
CACHE_PATH = Path("~/.cache/alphafold2_pytorch").expanduser()
DATA_DIR = CACHE_PATH / "trrosetta" / "trrosetta"
URL = "http://s3.amazonaws.com/proteindata/data_pytorch/trrosetta.tar.gz"
REMOVE_KEYS = dict.fromkeys(string.ascii_lowercase)
REMOVE_KEYS["."] = None
REMOVE_KEYS["*"] = None
translation = str.maketrans(REMOVE_KEYS)
DEFAULT_VOCAB = ProteinVocabulary()
def default_tokenize(seq: str) -> List[int]:
return [DEFAULT_VOCAB[ch] for ch in seq]
def read_fasta(filename: str) -> List[Tuple[str, str]]:
def remove_insertions(sequence: str) -> str:
return sequence.translate(translation)
return [
(record.description, remove_insertions(str(record.seq)))
for record in SeqIO.parse(filename, "fasta")
]
def read_pdb(pdb: str):
ag = prody.parsePDB(pdb)
for chain in ag.iterChains():
angles, coords, seq = get_seq_coords_and_angles(chain)
return angles, coords, seq
def download_file(url, filename=None, root=CACHE_PATH):
import os
import urllib
root.mkdir(exist_ok=True, parents=True)
filename = filename or os.path.basename(url)
download_target = root / filename
download_target_tmp = root / f"tmp.{filename}"
if download_target.exists() and not download_target.is_file():
raise RuntimeError(f"{download_target} exists and is not a regular file")
if download_target.is_file():
return download_target
with urllib.request.urlopen(url) as source, open(
download_target_tmp, "wb"
) as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
download_target_tmp.rename(download_target)
return download_target
def get_or_download(url: str = URL):
"""
download and extract trrosetta data
"""
import tarfile
file = CACHE_PATH / "trrosetta.tar.gz"
dir = CACHE_PATH / "trrosetta"
dir_temp = CACHE_PATH / "trrosetta_tmp"
if dir.is_dir():
print(f"Load cached data from {dir}")
return dir
if not file.is_file():
print(f"Cache not found, download from {url} to {file}")
download_file(url)
print(f"Extract data from {file} to {dir}")
with tarfile.open(file, "r:gz") as tar:
tar.extractall(dir_temp)
dir_temp.rename(dir)
return dir
def pad_sequences(sequences, constant_value=0, dtype=None) -> np.ndarray:
batch_size = len(sequences)
shape = [batch_size] + np.max([seq.shape for seq in sequences], 0).tolist()
if dtype is None:
dtype = sequences[0].dtype
if isinstance(sequences[0], np.ndarray):
array = np.full(shape, constant_value, dtype=dtype)
elif isinstance(sequences[0], torch.Tensor):
array = torch.full(shape, constant_value, dtype=dtype)
for arr, seq in zip(array, sequences):
arrslice = tuple(slice(dim) for dim in seq.shape)
arr[arrslice] = seq
return array
class TrRosettaDataset(Dataset):
def __init__(
self,
data_dir: Path,
list_path: Path,
tokenize: Callable[[str], List[int]],
seq_pad_value: int = 20,
random_sample_msa: bool = False,
max_seq_len: int = 300,
max_msa_num: int = 300,
overwrite: bool = False,
):
self.data_dir = data_dir
self.file_list: List[Path] = self.read_file_list(data_dir, list_path)
self.tokenize = tokenize
self.seq_pad_value = seq_pad_value
self.random_sample_msa = random_sample_msa
self.max_seq_len = max_seq_len
self.max_msa_num = max_msa_num
self.overwrite = overwrite
def __len__(self) -> int:
return len(self.file_list)
def read_file_list(self, data_dir: Path, list_path: Path):
file_glob = (data_dir / "npz").glob("*.npz")
files = set(list_path.read_text().split())
if len(files) == 0:
raise ValueError("Passed an empty split file set")
file_list = [f for f in file_glob if f.name in files]
if len(file_list) != len(files):
num_missing = len(files) - len(file_list)
raise FileNotFoundError(
f"{num_missing} specified split files not found in directory"
)
return file_list
def has_cache(self, index):
if self.overwrite:
return False
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
return path.is_file()
def write_cache(self, index, data):
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
path.parent.mkdir(exist_ok=True, parents=True)
with open(path, "wb") as file:
pickle.dump(data, file)
def read_cache(self, index):
path = (self.data_dir / "cache" / self.file_list[index].stem).with_suffix(
".pkl"
)
with open(path, "rb") as file:
return pickle.load(file)
def __getitem__(self, index):
if self.has_cache(index):
item = self.read_cache(index)
else:
id = self.file_list[index].stem
pdb_path = self.data_dir / "pdb" / f"{id}.pdb"
msa_path = self.data_dir / "a3m" / f"{id}.a3m"
_, msa = zip(*read_fasta(str(msa_path)))
msa = np.array([np.array(list(seq)) for seq in msa])
angles, coords, seq = read_pdb(str(pdb_path))
seq = np.array(list(seq))
coords = coords.reshape((coords.shape[0] // 14, 14, 3))
dist = self.get_bucketed_distance(seq, coords, subset="ca")
item = {
"id": id,
"seq": seq,
"msa": msa,
"coords": coords,
"angles": angles,
"dist": dist
}
self.write_cache(index, item)
item["msa"] = self.sample(item["msa"], self.max_msa_num, self.random_sample_msa)
item = self.crop(item, self.max_seq_len)
return item
def calc_cb(self, coord):
N = coord[0]
CA = coord[1]
C = coord[2]
b = CA - N
c = C - CA
a =
|
np.cross(b, c)
|
numpy.cross
|
# --- built in ---
import os
import sys
import time
import math
import logging
import functools
# --- 3rd party ---
import numpy as np
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
# --- my module ---
__all__ = [
'ToyMLP',
'Energy',
'Trainer',
]
# --- primitives ---
class Swish(nn.Module):
def __init__(self, dim=-1):
"""Swish activ bootleg from
https://github.com/wgrathwohl/LSD/blob/master/networks.py#L299
Args:
dim (int, optional): input/output dimension. Defaults to -1.
"""
super().__init__()
if dim > 0:
self.beta = nn.Parameter(torch.ones((dim,)))
else:
self.beta = torch.ones((1,))
def forward(self, x):
if len(x.size()) == 2:
return x * torch.sigmoid(self.beta[None, :] * x)
else:
return x * torch.sigmoid(self.beta[None, :, None, None] * x)
class ToyMLP(nn.Module):
def __init__(
self,
input_dim=2,
output_dim=1,
units=[300, 300],
swish=True,
dropout=False
):
"""Toy MLP from
https://github.com/ermongroup/ncsn/blob/master/runners/toy_runner.py#L198
Args:
input_dim (int, optional): input dimensions. Defaults to 2.
output_dim (int, optional): output dimensions. Defaults to 1.
units (list, optional): hidden units. Defaults to [300, 300].
swish (bool, optional): use swish as activation function. Set False to use
soft plus instead. Defaults to True.
dropout (bool, optional): use dropout layers. Defaults to False.
"""
super().__init__()
layers = []
in_dim = input_dim
for out_dim in units:
layers.extend([
nn.Linear(in_dim, out_dim),
Swish(out_dim) if swish else nn.Softplus(),
nn.Dropout(.5) if dropout else nn.Identity()
])
in_dim = out_dim
layers.append(nn.Linear(in_dim, output_dim))
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
# --- energy model ---
class Energy(nn.Module):
def __init__(self, net):
"""A simple energy model
Args:
net (nn.Module): An energy function, the output shape of
the energy function should be (b, 1). The score is
computed by grad(-E(x))
"""
super().__init__()
self.net = net
def forward(self, x):
return self.net(x)
def score(self, x, sigma=None):
x = x.requires_grad_()
logp = -self.net(x).sum()
return torch.autograd.grad(logp, x, create_graph=True)[0]
def save(self, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
torch.save(self.state_dict(), path)
def load(self, path):
self.load_state_dict(torch.load(path))
return self
class Trainer():
def __init__(
self,
model,
learning_rate = 1e-3,
clipnorm = 100.,
n_slices = 1,
loss_type = 'ssm-vr',
noise_type = 'gaussian',
device = 'cuda'
):
"""Energy based model trainer
Args:
model (nn.Module): energy-based model
learning_rate (float, optional): learning rate. Defaults to 1e-4.
clipnorm (float, optional): gradient clip. Defaults to 100..
n_slices (int, optional): number of slices for sliced score matching loss.
Defaults to 1.
loss_type (str, optional): type of loss. Can be 'ssm-vr', 'ssm', 'deen',
'dsm'. Defaults to 'ssm-vr'.
noise_type (str, optional): type of noise. Can be 'radermacher', 'sphere'
or 'gaussian'. Defaults to 'radermacher'.
device (str, optional): torch device. Defaults to 'cuda'.
"""
self.model = model
self.learning_rate = learning_rate
self.clipnorm = clipnorm
self.n_slices = n_slices
self.loss_type = loss_type.lower()
self.noise_type = noise_type.lower()
self.device = device
self.model = self.model.to(device=self.device)
# setup optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
self.num_gradsteps = 0
self.num_epochs = 0
self.progress = 0
self.tb_writer = None
def ssm_loss(self, x, v):
"""SSM loss from
Sliced Score Matching: A Scalable Approach to Density and Score Estimation
The loss is computed as
s = -dE(x)/dx
loss = vT*(ds/dx)*v + 1/2*(vT*s)^2
Args:
x (torch.Tensor): input samples
v (torch.Tensor): sampled noises
Returns:
SSM loss
"""
x = x.unsqueeze(0).expand(self.n_slices, *x.shape) # (n_slices, b, ...)
x = x.contiguous().view(-1, *x.shape[2:]) # (n_slices*b, ...)
x = x.requires_grad_()
score = self.model.score(x) # (n_slices*b, ...)
sv = torch.sum(score * v) # ()
loss1 = torch.sum(score * v, dim=-1) ** 2 * 0.5 # (n_slices*b,)
gsv = torch.autograd.grad(sv, x, create_graph=True)[0] # (n_slices*b, ...)
loss2 = torch.sum(v * gsv, dim=-1) # (n_slices*b,)
loss = (loss1 + loss2).mean() # ()
return loss
def ssm_vr_loss(self, x, v):
"""SSM-VR (variance reduction) loss from
Sliced Score Matching: A Scalable Approach to Density and Score Estimation
The loss is computed as
s = -dE(x)/dx
loss = vT*(ds/dx)*v + 1/2*||s||^2
Args:
x (torch.Tensor): input samples
v (torch.Tensor): sampled noises
Returns:
SSM-VR loss
"""
x = x.unsqueeze(0).expand(self.n_slices, *x.shape) # (n_slices, b, ...)
x = x.contiguous().view(-1, *x.shape[2:]) # (n_slices*b, ...)
x = x.requires_grad_()
score = self.model.score(x) # (n_slices*b, ...)
sv = torch.sum(score * v) # ()
loss1 = torch.norm(score, dim=-1) ** 2 * 0.5 # (n_slices*b,)
gsv = torch.autograd.grad(sv, x, create_graph=True)[0] # (n_slices*b, ...)
loss2 = torch.sum(v*gsv, dim=-1) # (n_slices*b,)
loss = (loss1 + loss2).mean() # ()
return loss
def deen_loss(self, x, v, sigma=0.1):
"""DEEN loss from
Deep Energy Estimator Networks
The loss is computed as
x_ = x + v # noisy samples
s = -dE(x_)/dx_
loss = 1/2*||x - x_ + sigma^2*s||^2
Args:
x (torch.Tensor): input samples
v (torch.Tensor): sampled noises
sigma (int, optional): noise scale. Defaults to 1.
Returns:
DEEN loss
"""
x = x.requires_grad_()
v = v * sigma
x_ = x + v
s = sigma ** 2 * self.model.score(x_)
loss = torch.norm(s+v, dim=-1)**2
loss = loss.mean()/2.
return loss
def dsm_loss(self, x, v, sigma=0.1):
"""DSM loss from
A Connection Between Score Matching
and Denoising Autoencoders
The loss is computed as
x_ = x + v # noisy samples
s = -dE(x_)/dx_
loss = 1/2*||s + (x-x_)/sigma^2||^2
Args:
x (torch.Tensor): input samples
v (torch.Tensor): sampled noises
sigma (float, optional): noise scale. Defaults to 0.1.
Returns:
DSM loss
"""
x = x.requires_grad_()
v = v * sigma
x_ = x + v
s = self.model.score(x_)
loss = torch.norm(s + v/(sigma**2), dim=-1)**2
loss = loss.mean()/2.
return loss
def get_random_noise(self, x, n_slices=None):
"""Sampling random noises
Args:
x (torch.Tensor): input samples
n_slices (int, optional): number of slices. Defaults to None.
Returns:
torch.Tensor: sampled noises
"""
if n_slices is None:
v = torch.randn_like(x, device=self.device)
else:
v = torch.randn((n_slices,)+x.shape, dtype=x.dtype, device=self.device)
v = v.view(-1, *v.shape[2:]) # (n_slices*b, 2)
if self.noise_type == 'radermacher':
v = v.sign()
elif self.noise_type == 'sphere':
v = v/torch.norm(v, dim=-1, keepdim=True) *
|
np.sqrt(v.shape[-1])
|
numpy.sqrt
|
import itertools
import os.path
import numpy as np
import pytest
import janus.fft.serial as fft
import janus.material.elastic.linear.isotropic as material
from numpy.testing import assert_allclose
from janus.green import truncated
from janus.green import filtered
# Default material constants
MU = 0.75
NU = 0.3
# All tests are performed with discrete Green operators based on these grids
GRID_SIZES = ([(8, 8), (8, 16), (16, 8), (4, 4, 4)]
+ list(itertools.permutations((4, 8, 16))))
# The value of one unit in the last place, for the float64 format.
ULP = np.finfo(np.float64).eps
def multi_indices(n):
"""Return the list of all multi-indices within the specified bounds.
Return the list of multi-indices ``[b[0], ..., b[dim - 1]]`` such that
``0 <= b[i] < n[i]`` for all i.
"""
iterables = [range(ni) for ni in n]
return [np.asarray(b, dtype=np.intc)
for b in itertools.product(*iterables)]
class AbstractTestDiscreteGreenOperator:
def pytest_generate_tests(self, metafunc):
if metafunc.function.__name__ == 'test_init_invalid_params':
g2 = material.create(MU, NU, 2).green_operator()
g3 = material.create(MU, NU, 3).green_operator()
params = [(g3, (9, 9), 1., None),
(g2, (-1, 9), 1., None),
(g2, (9, -1), 1., None),
(g2, (9, 9), -1., None),
(g2, (9, 9), 1., fft.create_real((8, 9))),
(g2, (9, 9), 1., fft.create_real((9, 8))),
(g2, (9, 9, 9), 1., None),
(g3, (-1, 9, 9), 1., None),
(g3, (9, -1, 9), 1., None),
(g3, (9, 9, -1), 1., None),
(g3, (9, 9, 9), -1., None)]
metafunc.parametrize('greenc, n, h, transform', params)
elif metafunc.function.__name__ == 'test_to_memoryview':
params = [(n, flag) for n in GRID_SIZES for flag in [0, 1]]
metafunc.parametrize('n, flag', params)
elif metafunc.function.__name__ == 'test_to_memoryview_invalid_params':
g2 = self.greend(material.create(MU, NU, 2).green_operator(),
(8, 16), 1.0)
g3 = self.greend(material.create(MU, NU, 3).green_operator(),
(8, 16, 32), 1.0)
params = [(g2, (g2.oshape[-1], g2.ishape[-1] + 1)),
(g2, (g2.oshape[-1] + 1, g2.ishape[-1])),
(g3, (g3.oshape[-1], g3.ishape[-1] + 1)),
(g3, (g3.oshape[-1] + 1, g3.ishape[-1])),]
metafunc.parametrize('greend, out_shape', params)
elif metafunc.function.__name__ == 'test_apply_by_freq':
x = [np.array([0.3, -0.4, 0.5]),
np.array([0.1, -0.2, 0.3, -0.4, 0.5, -0.6])]
params = [(n, x[len(n) - 2], flag) for n in GRID_SIZES
for flag in range(3)]
metafunc.parametrize('n, x, flag', params)
elif metafunc.function.__name__ == 'test_apply_by_freq_invalid_params':
g2 = self.greend(material.create(MU, NU, 2).green_operator(),
(8, 16), 1.0)
g3 = self.greend(material.create(MU, NU, 3).green_operator(),
(8, 16, 32), 1.0)
params = [(g2, g2.ishape[-1], g2.oshape[-1] + 1),
(g2, g2.ishape[-1] + 1, g2.oshape[-1]),
(g3, g3.ishape[-1], g3.oshape[-1] + 1),
(g3, g3.ishape[-1] + 1, g3.oshape[-1]),]
metafunc.parametrize('greend, x_size, y_size', params)
elif metafunc.function.__name__ == 'test_apply':
if self.operates_in_place:
flags = [0, 1, 2]
else:
flags = [0, 2]
params = [i + (j,) for i in self.params_test_apply() for j in flags]
metafunc.parametrize('path_to_ref, rtol, flag', params)
elif metafunc.function.__name__ == 'test_apply_invalid_params':
g = self.greend(material.create(MU, NU, 2).green_operator(),
(8, 16), 1.0)
i0, i1, i2 = g.ishape
o0, o1, o2 = g.oshape
params2 = [(g, (i0 + 1, i1, i2), (o0, o1, o2)),
(g, (i0, i1 + 1, i2), (o0, o1, o2)),
(g, (i0, i1, i2 + 1), (o0, o1, o2)),
(g, (i0, i1, i2), (o0 + 1, o1, o2)),
(g, (i0, i1, i2), (o0, o1 + 1, o2)),
(g, (i0, i1, i2), (o0, o1, o2 + 1))]
g = self.greend(material.create(MU, NU, 3).green_operator(),
(8, 16, 32), 1.0)
i0, i1, i2, i3 = g.ishape
o0, o1, o2, o3 = g.oshape
params3 = [(g, (i0 + 1, i1, i2, i3), (o0, o1, o2, o3)),
(g, (i0, i1 + 1, i2, i3), (o0, o1, o2, o3)),
(g, (i0, i1, i2 + 1, i3), (o0, o1, o2, o3)),
(g, (i0, i1, i2, i3 + 1), (o0, o1, o2, o3)),
(g, (i0, i1, i2, i3), (o0 + 1, o1, o2, o3)),
(g, (i0, i1, i2, i3), (o0, o1 + 1, o2, o3)),
(g, (i0, i1, i2, i3), (o0, o1, o2 + 1, o3)),
(g, (i0, i1, i2, i3), (o0, o1, o2, o3 + 1))]
metafunc.parametrize('greend, x_shape, y_shape', params2 + params3)
def test_init_invalid_params(self, greenc, n, h, transform):
with pytest.raises(ValueError):
self.greend(greenc, n, h, transform)
def test_to_memoryview(self, n, flag):
dim = len(n)
greenc = material.create(MU, NU, dim).green_operator()
greend = self.greend(greenc, n, 1.0)
k = np.empty((dim,), dtype=np.float64)
for b in multi_indices(n):
expected = self.to_memoryview_expected(greenc, n, b)
greend.set_frequency(b)
if flag == 0:
actual = greend.to_memoryview()
elif flag == 1:
base = np.empty_like(expected)
actual = greend.to_memoryview(base)
assert actual.base is base
else:
raise ValueError
|
assert_allclose(actual, expected, ULP, ULP)
|
numpy.testing.assert_allclose
|
#!/usr/bin/env python
"""
Contains class PerfData and its subclasses, which are objects for collecting and computing model performance metrics
and predictions
"""
import sklearn.metrics
import deepchem as dc
import numpy as np
import tensorflow as tf
from sklearn.metrics import roc_auc_score, confusion_matrix, average_precision_score, precision_score, recall_score
from sklearn.metrics import accuracy_score, matthews_corrcoef, cohen_kappa_score, log_loss, balanced_accuracy_score
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from atomsci.ddm.pipeline import transformations as trans
# ******************************************************************************************************************************
def rms_error(y_real, y_pred):
""" Calculates the root mean squared error. Score function used for model selection.
Args:
y_real (np.array): Array of ground truth values
y_pred (np.array): Array of predicted values
Returns:
(np.array): root mean squared error of the input
"""
return np.sqrt(mean_squared_error(y_real, y_pred))
# ---------------------------------------------
def negative_predictive_value(y_real, y_pred):
"""
Computes negative predictive value of a binary classification model: NPV = TN/(TN+FN).
Args:
y_real (np.array): Array of ground truth values
y_pred (np.array): Array of predicted values
Returns:
(float): The negative predictive value
"""
TN = sum((y_pred == 0) & (y_real == 0))
FN = sum((y_pred == 0) & (y_real == 1))
if TN + FN == 0:
return 0.0
else:
return float(TN)/float(TN+FN)
# ******************************************************************************************************************************
# params.model_choice_score_type must be a key in one of the dictionaries below:
regr_score_func = dict(r2 = r2_score, mae = mean_absolute_error, rmse = rms_error)
classif_score_func = dict(roc_auc = roc_auc_score, precision = precision_score, ppv = precision_score, recall = recall_score,
npv = negative_predictive_value, cross_entropy = log_loss, accuracy = accuracy_score, bal_accuracy = balanced_accuracy_score,
avg_precision = average_precision_score, mcc = matthews_corrcoef, kappa = cohen_kappa_score)
# The following score types are loss functions, meaning the result must be sign flipped so we can maximize it in model selection
loss_funcs = {'mae', 'rmse', 'cross_entropy'}
# The following score types for classification require predicted class probabilities rather than class labels as input
uses_class_probs = {'roc_auc', 'avg_precision', 'cross_entropy'}
# The following classification score types have an 'average' parameter to control how multilabel scores are combined
has_average_param = {'roc_auc', 'avg_precision', 'precision', 'recall'}
# The following classification score types allow the value 'binary' for the 'average' parameter to make them report scores
# for class 1 only
binary_average_param = {'precision', 'recall'}
# The following classification score types only support binary classifiers
binary_class_only = {'npv'}
# ******************************************************************************************************************************
def create_perf_data(prediction_type, model_dataset, transformers, subset, **kwargs):
"""Factory function that creates the right kind of PerfData object for the given subset,
prediction_type (classification or regression) and split strategy (k-fold or train/valid/test).
Args:
prediction_type (str): classification or regression.
model_dataset (ModelDataset): Object representing the full dataset.
transformers (list): A list of transformer objects.
subset (str): Label in ['train', 'valid', 'test', 'full'], indicating the type of subset of dataset for tracking predictions
**kwargs: Additional PerfData subclass arguments
Returns:
PerfData object
Raises:
ValueError: if split_strategy not in ['train_valid_test','k_fold_cv']
ValueError: prediction_type not in ['regression','classification']
"""
if subset == 'full':
split_strategy = 'train_valid_test'
else:
split_strategy = model_dataset.params.split_strategy
if prediction_type == 'regression':
if subset == 'full' or split_strategy == 'train_valid_test':
# Called simple because no need to track compound IDs across multiple training folds
return SimpleRegressionPerfData(model_dataset, transformers, subset, **kwargs)
elif split_strategy == 'k_fold_cv':
return KFoldRegressionPerfData(model_dataset, transformers, subset, **kwargs)
else:
raise ValueError('Unknown split_strategy %s' % split_strategy)
elif prediction_type == 'classification':
if subset == 'full' or split_strategy == 'train_valid_test':
return SimpleClassificationPerfData(model_dataset, transformers, subset, **kwargs)
elif split_strategy == 'k_fold_cv':
return KFoldClassificationPerfData(model_dataset, transformers, subset, **kwargs)
else:
raise ValueError('Unknown split_strategy %s' % split_strategy)
elif prediction_type == "hybrid":
return SimpleHybridPerfData(model_dataset, transformers, subset, **kwargs)
else:
raise ValueError('Unknown prediction type %s' % prediction_type)
# ****************************************************************************************
class PerfData(object):
"""Class with methods for accumulating prediction data over multiple cross-validation folds
and computing performance metrics after all folds have been run. Abstract class with
concrete subclasses for classification and regression models.
"""
# ****************************************************************************************
def __init__(self, model_dataset, subset):
"""Initialize any attributes that are common to all PerfData subclasses
"""
# ****************************************************************************************
def accumulate_preds(self, predicted_vals, ids, pred_stds=None):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def get_pred_values(self):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def get_real_values(self, ids=None):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def get_weights(self, ids=None):
"""Returns the dataset response weights as an (ncmpds, ntasks) array
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def compute_perf_metrics(self, per_task=False):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def get_prediction_results(self):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def _reshape_preds(self, predicted_vals):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
class RegressionPerfData(PerfData):
"""Class with methods for accumulating regression model prediction data over multiple
cross-validation folds and computing performance metrics after all folds have been run.
Abstract class with concrete subclasses for different split strategies.
Attributes:
set in __init__
num_tasks (int): Set to None, the number of tasks
num_cmpds (int): Set to None, the number of compounds
"""
# ****************************************************************************************
# class RegressionPerfData
def __init__(self, model_dataset, subset):
"""Initialize any attributes that are common to all RegressionPerfData subclasses.
Side effects:
num_tasks (int) is set as a RegressionPerfData attribute
num_cmps (int) is set as a RegressionPerfData attribute
"""
# The code below is to document the atributes that methods in this class expect the
# subclasses to define. Subclasses don't actually call this superclass method.
self.num_tasks = None
self.num_cmpds = None
self.perf_metrics = []
self.model_score = None
self.weights = None
# ****************************************************************************************
def accumulate_preds(self, predicted_vals, ids, pred_stds=None):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def get_pred_values(self):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def compute_perf_metrics(self, per_task=False):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
# class RegressionPerfData
def model_choice_score(self, score_type='r2'):
"""Computes a score function based on the accumulated predicted values, to be used for selecting
the best training epoch and other hyperparameters.
Args:
score_type (str): The name of the scoring metric to be used, e.g. 'r2',
'neg_mean_squared_error', 'neg_mean_absolute_error', etc.; see
https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
and sklearn.metrics.SCORERS.keys() for a complete list of options.
Larger values of the score function indicate better models.
Returns:
score (float): A score function value. For multitask models, this will be averaged
over tasks.
"""
ids, pred_vals, stds = self.get_pred_values()
real_vals = self.get_real_values(ids)
weights = self.get_weights(ids)
scores = []
for i in range(self.num_tasks):
nzrows = np.where(weights[:,i] != 0)[0]
task_real_vals = np.squeeze(real_vals[nzrows,i])
task_pred_vals = np.squeeze(pred_vals[nzrows,i])
scores.append(regr_score_func[score_type](task_real_vals, task_pred_vals))
self.model_score = float(np.mean(scores))
if score_type in loss_funcs:
self.model_score = -self.model_score
return self.model_score
# ****************************************************************************************
# class RegressionPerfData
def get_prediction_results(self):
"""Returns a dictionary of performance metrics for a regression model.
The dictionary values should contain only primitive Python types, so that it can
be easily JSONified.
Args:
per_task (bool): True if calculating per-task metrics, False otherwise.
Returns:
pred_results (dict): dictionary of performance metrics for a regression model.
"""
pred_results = {}
# Get the mean and SD of R^2 scores over folds. If only single fold training was done, the SD will be None.
r2_means, r2_stds = self.compute_perf_metrics(per_task=True)
pred_results['r2_score'] = float(np.mean(r2_means))
if r2_stds is not None:
pred_results['r2_std'] = float(np.sqrt(np.mean(r2_stds ** 2)))
if self.num_tasks > 1:
pred_results['task_r2_scores'] = r2_means.tolist()
if r2_stds is not None:
pred_results['task_r2_stds'] = r2_stds.tolist()
# Compute some other performance metrics. We do these differently than R^2, in that we compute the
# metrics from the average predicted values, rather than computing them separately for each fold
# and then averaging the metrics. If people start asking for SDs of MAE and RMSE scores over folds,
# we'll change the code to compute all metrics the same way.
(ids, pred_vals, pred_stds) = self.get_pred_values()
real_vals = self.get_real_values(ids)
weights = self.get_weights(ids)
mae_scores = []
rms_scores = []
response_means = []
response_stds = []
# Iterate over tasks, call score funcs directly on weight masked values
for i in range(self.num_tasks):
nzrows = np.where(weights[:,i] != 0)[0]
task_real_vals = np.squeeze(real_vals[nzrows,i])
task_pred_vals = np.squeeze(pred_vals[nzrows,i])
mae_scores.append(regr_score_func['mae'](task_real_vals, task_pred_vals))
rms_scores.append(regr_score_func['rmse'](task_real_vals, task_pred_vals))
response_means.append(task_real_vals.mean().tolist())
response_stds.append(task_real_vals.std().tolist())
pred_results['mae_score'] = float(np.mean(mae_scores))
if self.num_tasks > 1:
pred_results['task_mae_scores'] = mae_scores
pred_results['rms_score'] = float(np.mean(rms_scores))
if self.num_tasks > 1:
pred_results['task_rms_scores'] = rms_scores
# Add model choice score if one was computed
if self.model_score is not None:
pred_results['model_choice_score'] = self.model_score
pred_results['num_compounds'] = self.num_cmpds
pred_results['mean_response_vals'] = response_means
pred_results['std_response_vals'] = response_stds
return pred_results
# ****************************************************************************************
# class RegressionPerfData
def _reshape_preds(self, predicted_vals):
"""Reshape an array of regression model predictions to a standard (ncmpds, ntasks)
format. Checks that the task dimension matches what we expect for the dataset.
Args:
predicted_vals (np.array): array of regression model predictions.
Returns:
predicted_vals (np.array): reshaped array
Raises:
ValueError: if the dimensions of the predicted value do not match the dimensions of num_tasks for
RegressionPerfData
"""
# For regression models, predicted_vals can be 1D, 2D or 3D array depending on the type of
# underlying DeepChem model.
dim = len(predicted_vals.shape)
ncmpds = predicted_vals.shape[0]
if dim == 1:
# Single task model
predicted_vals = predicted_vals.reshape((ncmpds,1))
ntasks = 1
else:
ntasks = predicted_vals.shape[1]
if ntasks != self.num_tasks:
raise ValueError("Predicted value dimensions don't match num_tasks for RegressionPerfData")
if dim == 3:
# FCNet models generate predictions with an extra dimension, possibly for the number of
# classes, which is always 1 for regression models.
predicted_vals = predicted_vals.reshape((ncmpds,ntasks))
return predicted_vals
# ****************************************************************************************
# ****************************************************************************************
class HybridPerfData(PerfData):
"""Class with methods for accumulating regression model prediction data over multiple
cross-validation folds and computing performance metrics after all folds have been run.
Abstract class with concrete subclasses for different split strategies.
Attributes:
set in __init__
num_tasks (int): Set to None, the number of tasks
num_cmpds (int): Set to None, the number of compounds
"""
# ****************************************************************************************
# class HybridPerfData
def __init__(self, model_dataset, subset):
"""Initialize any attributes that are common to all HybridPerfData subclasses.
Side effects:
num_tasks (int) is set as a HybridPerfData attribute
num_cmps (int) is set as a HybridPerfData attribute
"""
# The code below is to document the atributes that methods in this class expect the
# subclasses to define. Subclasses don't actually call this superclass method.
self.num_tasks = 2
self.num_cmpds = None
self.perf_metrics = []
self.model_score = None
self.weights = None
# ****************************************************************************************
def accumulate_preds(self, predicted_vals, ids, pred_stds=None):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def get_pred_values(self):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
def compute_perf_metrics(self, per_task=False):
"""
Raises:
NotImplementedError: The method is implemented by subclasses
"""
raise NotImplementedError
# ****************************************************************************************
# class HybridPerfData
def model_choice_score(self, score_type='r2'):
"""Computes a score function based on the accumulated predicted values, to be used for selecting
the best training epoch and other hyperparameters.
Args:
score_type (str): The name of the scoring metric to be used, e.g. 'r2', 'mae', 'rmse'
Returns:
score (float): A score function value. For multitask models, this will be averaged
over tasks.
"""
ids, pred_vals, stds = self.get_pred_values()
real_vals = self.get_real_values(ids)
weights = self.get_weights(ids)
scores = []
pos_ki = np.where(np.isnan(real_vals[:, 1]))[0]
pos_bind = np.where(~np.isnan(real_vals[:, 1]))[0]
# score for pKi/IC50
nzrows = np.where(weights[:, 0] != 0)[0]
rowki = np.intersect1d(nzrows, pos_ki)
rowbind = np.intersect1d(nzrows, pos_bind)
ki_real_vals = np.squeeze(real_vals[rowki,0])
ki_pred_vals = np.squeeze(pred_vals[rowki,0])
bind_real_vals = np.squeeze(real_vals[rowbind,0])
bind_pred_vals = np.squeeze(pred_vals[rowbind,0])
if len(rowki) > 0:
scores.append(regr_score_func[score_type](ki_real_vals, ki_pred_vals))
if len(rowbind) > 0:
scores.append(regr_score_func[score_type](bind_real_vals, bind_pred_vals))
else:
# if all values are dose response activities, use the r2_score above.
scores.append(scores[0])
elif len(rowbind) > 0:
# all values are single concentration activities.
scores.append(regr_score_func[score_type](bind_real_vals, bind_pred_vals))
scores.append(scores[0])
self.model_score = float(np.mean(scores))
if score_type in loss_funcs:
self.model_score = -self.model_score
return self.model_score
# ****************************************************************************************
# class HybridPerfData
def get_prediction_results(self):
"""Returns a dictionary of performance metrics for a regression model.
The dictionary values should contain only primitive Python types, so that it can
be easily JSONified.
Args:
per_task (bool): True if calculating per-task metrics, False otherwise.
Returns:
pred_results (dict): dictionary of performance metrics for a regression model.
"""
pred_results = {}
# Get the mean and SD of R^2 scores over folds. If only single fold training was done, the SD will be None.
r2_means, r2_stds = self.compute_perf_metrics(per_task=True)
pred_results['r2_score'] = float(np.mean(r2_means))
if r2_stds is not None:
pred_results['r2_std'] = float(np.sqrt(np.mean(r2_stds ** 2)))
if self.num_tasks > 1:
pred_results['task_r2_scores'] = r2_means.tolist()
if r2_stds is not None:
pred_results['task_r2_stds'] = r2_stds.tolist()
# Compute some other performance metrics. We do these differently than R^2, in that we compute the
# metrics from the average predicted values, rather than computing them separately for each fold
# and then averaging the metrics. If people start asking for SDs of MAE and RMSE scores over folds,
# we'll change the code to compute all metrics the same way.
(ids, pred_vals, pred_stds) = self.get_pred_values()
real_vals = self.get_real_values(ids)
weights = self.get_weights(ids)
mae_scores = []
rms_scores = []
response_means = []
response_stds = []
pos_ki = np.where(np.isnan(real_vals[:, 1]))[0]
pos_bind = np.where(~np.isnan(real_vals[:, 1]))[0]
# score for pKi/IC50
nzrows = np.where(weights[:, 0] != 0)[0]
rowki = np.intersect1d(nzrows, pos_ki)
rowbind = np.intersect1d(nzrows, pos_bind)
ki_real_vals = np.squeeze(real_vals[rowki,0])
ki_pred_vals = np.squeeze(pred_vals[rowki,0])
bind_real_vals = np.squeeze(real_vals[rowbind,0])
bind_pred_vals = np.squeeze(pred_vals[rowbind,0])
if len(rowki) > 0:
mae_scores.append(regr_score_func['mae'](ki_real_vals, ki_pred_vals))
rms_scores.append(regr_score_func['rmse'](ki_real_vals, ki_pred_vals))
if len(rowbind) > 0:
mae_scores.append(regr_score_func['mae'](bind_real_vals, bind_pred_vals))
rms_scores.append(regr_score_func['rmse'](bind_real_vals, bind_pred_vals))
else:
# if all values are dose response activities, use the r2_score above.
mae_scores.append(mae_scores[0])
rms_scores.append(rms_scores[0])
elif len(rowbind) > 0:
# all values are single concentration activities.
mae_scores.append(regr_score_func['mae'](bind_real_vals, bind_pred_vals))
rms_scores.append(regr_score_func['rmse'](bind_real_vals, bind_pred_vals))
mae_scores.append(mae_scores[0])
rms_scores.append(rms_scores[0])
response_means.append(ki_real_vals.mean().tolist())
response_stds.append(ki_real_vals.std().tolist())
response_means.append(bind_real_vals.mean().tolist())
response_stds.append(bind_real_vals.std().tolist())
pred_results['mae_score'] = float(np.mean(mae_scores))
if self.num_tasks > 1:
pred_results['task_mae_scores'] = mae_scores
pred_results['rms_score'] = float(
|
np.mean(rms_scores)
|
numpy.mean
|
#!/usr/local/bin/python3
# Author: <NAME> (https://github.com/linzebing)
from datetime import datetime, date
import math
import numpy as np
import time
import sys
import requests
import re
from ortools.linear_solver import pywraplp
if len(sys.argv) == 1:
symbols = ['TMF', 'UPRO']
else:
symbols = sys.argv[1].split(',')
for i in range(len(symbols)):
symbols[i] = symbols[i].strip().upper()
num_trading_days_per_year = 252
window_size = 20
date_format = "%Y-%m-%d"
end_timestamp = int(time.time())
start_timestamp = int(end_timestamp - (1.4 * (window_size + 1) + 4) * 86400)
def get_volatility_and_performance(symbol,cookie,crumb):
download_url = "https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events=history&crumb={}".format(symbol, start_timestamp, end_timestamp,crumb)
lines = requests.get(download_url, cookies={'B': cookie}).text.strip().split('\n')
# print(cookie)
# print(crumb)
# print(lines)
assert lines[0].split(',')[0] == 'Date'
assert lines[0].split(',')[4] == 'Close'
prices = []
for line in lines[1:]:
prices.append(float(line.split(',')[4]))
prices.reverse()
volatilities_in_window = []
for i in range(window_size):
volatilities_in_window.append(math.log(prices[i] / prices[i+1]))
most_recent_date = datetime.strptime(lines[-1].split(',')[0], date_format).date()
assert (date.today() - most_recent_date).days <= 4, "today is {}, most recent trading day is {}".format(date.today(), most_recent_date)
return np.std(volatilities_in_window, ddof = 1) * np.sqrt(num_trading_days_per_year), prices[0] / prices[window_size] - 1.0, prices[0]
def get_cookie():
url = 'https://finance.yahoo.com/quote/VOO/history?p=VOO'
r = requests.get(url)
txt = r.text
cookie = r.cookies['B']
pattern = re.compile('.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}')
for line in txt.splitlines():
m = pattern.match(line)
if m is not None:
crumb = m.groupdict()['crumb']
return cookie,crumb
def create_model(epsilon=0.01):
alpha[0]/alpha[1]
data={}
data['constraint_coeffs']=[
[current_prices[0],-(epsilon+alpha[0]/alpha[1])*current_prices[1],current_prices[0],-(epsilon+alpha[0]/alpha[1])*current_prices[1]],
[current_prices[0],-(alpha[0]/alpha[1]-epsilon)*current_prices[1],current_prices[0],-(alpha[0]/alpha[1]-epsilon)*current_prices[1]],
[current_prices[0],current_prices[1],current_prices[0],current_prices[1]],
[current_prices[0],current_prices[1],0,0],
[0,0,current_prices[0],current_prices[1]]
]
data['lb']=[-np.inf, 0,0,0,0]
data['ub']=[0, np.inf,S,S_Tax,S_IRA]
data['obj_coeffs']=[current_prices[0],current_prices[1],current_prices[0],current_prices[1]]
data['xub']=[np.floor(S_Tax/current_prices[0]),np.floor(S_Tax/current_prices[1]),
|
np.floor(S_IRA/current_prices[0])
|
numpy.floor
|
# coding: utf-8
# In[1]:
import sklearn.mixture as mix
import scipy as sp
import numpy as np
import copy
'''
num:データの個数
dim:データの特徴量次元
state = {'FEATURE', 'LABEL', 'CLUSTER', 'SCORE', 'GM'}:ディクショナリ
feature:選択した特徴量を表すリスト
label:データをクラスタリングした際のラベル
clusters:データをいくつのクラスタに分類するか。Boumanのアルゴリズムによって求める。
score:評価値
'''
def scale(data):
num = data.shape[0]
dim = data.shape[1]
# 属性ごとに平均値と標準偏差を計算
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
# 属性ごとにデータを標準化
data = np.array([[(data[j,i]-mu[i])/sigma[i] for i in range(dim)] for j in range(num)])
return data
def score_likelihood(data, GM):
'''
the higher the betterな尤度基準の評価値を返す
bicやaicならばthe lower the better, score(普通の対数尤度)ならばthe higher the betterであることに注意
入力
data:分析対象のデータ
GM:混合ガウス分布のモデル
出力
score:評価値(ML基準)
'''
score = -GM.bic(data) # score?
return score
def score_ss(data, label, clusters): # クラスタリング結果の評価関数
'''
クラス間分散Sbとクラス内分散Swを求めて
trace(Sw^{-1}Sb)を評価値として返す。
入力
data:分析対象のデータ
label:分析対象の分類ラベル
clusters:分類するクラスタ数
出力
score:評価値(SS基準)
'''
num = data.shape[0]
dim = data.shape[1]
Sw = np.zeros((dim,dim)) # クラス内共分散行列
Sb = np.zeros((dim,dim)) # クラス間共分散行列
# クラスタ毎に分けたデータセット
subdata = np.array([data[label[:]==i, :] for i in range(clusters)])
# 各クラスタにデータが入る確率(データ割合)
pi = np.array([subdata[i].shape[0]/num for i in range(clusters)])
for i in range(clusters):
if subdata[i].shape[0] != 1 or subdata[i].shape[1] != 1:
Sw += pi[i] * np.cov(subdata[i], rowvar=0)
mean_all = np.mean(data, axis=0) # 全体の平均ベクトル
for i in range(clusters):
mean_diff = np.matrix(
|
np.mean(subdata[i], axis=0)
|
numpy.mean
|
"""
Plot temperature profile difference between HIT and FIT experiments.
These are sea ice thickness perturbation experiments using WACCM4.
Notes
-----
Author : <NAME>
Date : 14 August 2017
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as c
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import nclcmaps as ncm
import datetime
import read_MonthlyOutput as MO
import calc_Utilities as UT
### Define directories
directorydata = '/surtsey/zlabe/simu/'
directoryfigure = '/home/zlabe/Desktop/'
#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting temperature - %s----' % titletime)
### Alott time series
year1 = 1900
year2 = 2000
years = np.arange(year1,year2+1,1)
### Call function for vertical temperature data
lat,lon,time,lev,th = MO.readExperi(directorydata,'TEMP','HIT','profile')
lat,lon,time,lev,tf = MO.readExperi(directorydata,'TEMP','FIT','profile')
### Separate per periods (ON,DJ,FM)
th_on = np.nanmean(th[:,9:11,:,:,:],axis=1)
tf_on = np.nanmean(tf[:,9:11,:,:,:],axis=1)
th_dj,tf_dj = UT.calcDecJan(th,tf,lat,lon,'profile',lev.shape[0])
th_fm = np.nanmean(th[:,1:3,:,:,:],axis=1)
tf_fm = np.nanmean(tf[:,1:3,:,:,:],axis=1)
#### Calculate period differenceds
diff_on = np.nanmean((tf_on-th_on),axis=0)
diff_dj = np.nanmean((tf_dj-th_dj),axis=0)
diff_fm =
|
np.nanmean((tf_fm-th_fm),axis=0)
|
numpy.nanmean
|
import cv2
import numpy as np
import os
import sys
this_dir = os.path.dirname(__file__)
sys.path.append(this_dir)
import params as prm
class Remapper():
def __init__(self):
self.right_shoulder_idx = 2
self.left_shoulder_idx = 5
self.neck_idx = 1
def check_overshoot(self, coord, size = prm.rscam_size):
v_in, u_in = coord[:,0], coord[:,1]
v_in[v_in >= size[1]] = size[1] - 1
u_in[u_in >= size[0]] = size[0] - 1
v_in[v_in < 0] = 0
u_in[u_in < 0] = 0
return np.array([v_in, u_in]).astype(np.int).transpose()
def _move_frame(self,coord,map_u,map_v):
v, u = coord[:,0], coord[:,1]
u_moved = np.around(map_u[v,u]).astype(np.int)
v_moved = np.around(map_v[v,u]).astype(np.int)
return np.array([v_moved, u_moved]).transpose()
def _move_depth_frame(self,coord):
n = coord.shape[0]
u = np.array([coord[:,1],coord[:,0],np.repeat(1,n)])
v = np.transpose(np.matmul(prm.homography, u))
u_moved = np.around(v[:,0]/v[:,2]).astype(np.int)
v_moved = np.around(v[:,1]/v[:,2]).astype(np.int)
return np.array([v_moved, u_moved]).transpose()
def run_remapper(self, keypoints, verts):
equi_coord = self.check_overshoot(keypoints,prm.fisheye_size)
# equirectangular frame -> fisheye camera frame
fisheye_coord = self._move_frame(equi_coord, prm.equi_u, prm.equi_v)
fisheye_coord = self.check_overshoot(fisheye_coord,prm.fisheye_size)
# fisheye frame -> perspective camera frame
per_coord = self._move_frame(fisheye_coord, prm.per_u_rev, prm.per_v_rev)
per_coord = self.check_overshoot(per_coord,prm.fisheye_size)
# perspective frame -> depth camera frame
depth_coord = self._move_depth_frame(per_coord)
depth_coord = self.check_overshoot(depth_coord,prm.rscam_size)
# save data
self.equi_coord = equi_coord
self.per_coord = per_coord
self.depth_coord = depth_coord
# remap the points on zero depth (shadow in depth camera)
self.remap_invalid_keypoints(verts)
return self.depth_coord
def get_right_shoulder_2D(self):
idx = self.right_shoulder_idx
return self.depth_coord[idx,:]
def get_left_shoulder_2D(self):
idx = self.left_shoulder_idx
return self.depth_coord[idx,:]
def get_neck_2D(self):
idx = self.neck_idx
return self.depth_coord[idx,:]
def get_keypoints_2D(self):
return self.depth_coord
def _set_right_shoulder_2D(self, val):
idx = self.right_shoulder_idx
self.depth_coord[idx,:] = val
def _set_right_shoulder_2D(self, val):
idx = self.right_shoulder_idx
self.depth_coord[idx,:] = val
def _set_left_shoulder_2D(self, val):
idx = self.left_shoulder_idx
self.depth_coord[idx,:] = val
def _set_neck_2D(self, val):
idx = self.neck_idx
self.depth_coord[idx,:] = val
def _nearest_valid(self,valid_idx,body_coord):
distance = valid_idx - body_coord
distance =
|
np.sum(distance**2,axis=1)
|
numpy.sum
|
import cv2
import imutils
import numpy as np
import time
# from face_detection import FaceDetection
# from scipy import signal
import sys
from numpy.linalg import inv
# import dlib
import imutils
import time
import skin_detector
def face_detect_and_thresh(frame):
skinM = skin_detector.process(frame)
skin = cv2.bitwise_and(frame, frame, mask = skinM)
# cv2.imshow("skin2",skin)
# cv2.waitKey(1)
return skin,skinM
def spartialAverage(thresh,frame):
a=list(np.argwhere(thresh>0))
# x=[i[0] for i in a]
# y=[i[1] for i in a]
# p=[x,y]
if a:
ind_img=(np.vstack((a)))
else:
return 0,0,0
sig_fin=np.zeros([np.shape(ind_img)[0],3])
test_fin=[]
for i in range(np.shape(ind_img)[0]):
sig_temp=frame[ind_img[i,0],ind_img[i,1],:]
sig_temp = sig_temp.reshape((1, 3))
if sig_temp.any()!=0:
sig_fin=
|
np.concatenate((sig_fin,sig_temp))
|
numpy.concatenate
|
import numpy as np
import numpy.matlib as nm
import matplotlib.mlab as mlab
from svgd import SVGD
class MVN:
def __init__(self, mu, var):
self.mu = mu
self.var = var
def dlnprob(self, theta):
return -1*np.log(np.exp((theta-self.mu)*(1.0/self.var)))
def plot_results(mu, var, theta, bins=20):
import matplotlib.pyplot as plt
count, bins, _ = plt.hist(theta, bins=bins, density=True)
plt.plot(bins, 1/(np.sqrt(var) * np.sqrt(2 * np.pi)) *
np.exp( - (bins - mu)**2 / (2 * np.sqrt(var)**2) ),
linewidth=2, color='r')
plt.show()
def animate_results(mu, var, theta_hist, n_bins=20):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.animation as animation
fig, ax = plt.subplots()
# histogram our data with numpy
data = theta_hist[:, 0]
n, bins = np.histogram(data, bins=n_bins, density=True)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
# here comes the tricky part -- we have to set up the vertex and path
# codes arrays using moveto, lineto and closepoly
# for each rect: 1 for the MOVETO, 3 for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
nverts = nrects*(1 + 3 + 1)
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0::5] = path.Path.MOVETO
codes[4::5] = path.Path.CLOSEPOLY
verts[0::5, 0] = left
verts[0::5, 1] = bottom
verts[1::5, 0] = left
verts[1::5, 1] = top
verts[2::5, 0] = right
verts[2::5, 1] = top
verts[3::5, 0] = right
verts[3::5, 1] = bottom
barpath = path.Path(verts, codes)
patch = patches.PathPatch(
barpath, facecolor='blue', edgecolor='blue', alpha=0.5)
ax.add_patch(patch)
ax.set_xlim(mu - 3*np.sqrt(var), mu + 3*np.sqrt(var))
ax.set_ylim(bottom.min(), 0.5)
x = np.linspace(mu - 3*np.sqrt(var), mu + 3*np.sqrt(var), 100)
plt.plot(x, mlab.normpdf(x, mu, np.sqrt(var)))
def animate(i):
# simulate new data coming in
data = theta_hist[:,int(i*25)]
n, bins = np.histogram(data, bins=n_bins, density=True)
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
# here comes the tricky part -- we have to set up the vertex and path
# codes arrays using moveto, lineto and closepoly
# for each rect: 1 for the MOVETO, 3 for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
nverts = nrects*(1 + 3 + 1)
verts = np.zeros((nverts, 2))
verts[0::5, 0] = left
verts[0::5, 1] = bottom
verts[1::5, 0] = left
verts[1::5, 1] = top
verts[2::5, 0] = right
verts[2::5, 1] = top
verts[3::5, 0] = right
verts[3::5, 1] = bottom
[p.remove() for p in reversed(ax.patches)]
barpath = path.Path(verts, codes)
patch = patches.PathPatch(
barpath, facecolor='blue', edgecolor='blue', alpha=0.5)
ax.add_patch(patch)
# ax.set_xlim(left[0], right[-1])
# ax.set_ylim(bottom.min(), top.max())
# plt.plot(bins, 1/(np.sqrt(var) * np.sqrt(2 * np.pi)) *
# np.exp( - (bins - mu)**2 / (2 * np.sqrt(var)**2) ),
# linewidth=2, color='r')
return [patch, ]
ani = animation.FuncAnimation(fig, animate, int(theta_hist.shape[1]/25), repeat=False, blit=False)
plt.show()
if __name__ == '__main__':
var = 1
mu = 0
model = MVN(mu, var)
x0 = np.random.normal(0,1, [150,1])
theta, theta_hist = SVGD().update(x0, model.dlnprob, n_iter=2000, stepsize=0.01, debug=True)
print("ground truth: mu = {} var = {}".format(mu, var))
print("svgd: mu = {} var = {}".format(round(
|
np.mean(theta)
|
numpy.mean
|
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.stats import multivariate_normal
if os.path.isdir('scripts'):
os.chdir('scripts')
fs = 12
np.random.seed(0)
true_mu1 = -10
true_mu2 = 10
true_pi = 0.5
sigmas = np.array([5])
obs = None
for sigmai in sigmas:
true_sigma = sigmai
n_obs = 100
obs = ([true_mu1 + true_sigma*np.random.randn(1, n_obs), true_mu2 + true_sigma*
|
np.random.randn(1, n_obs)
|
numpy.random.randn
|
from astropy.coordinates import SkyCoord, AltAz
import astropy.units as u
from ctapipe.io import event_source
from ctapipe.image.cleaning import tailcuts_clean
from ctapipe.calib import CameraCalibrator
from ctapipe.utils.datasets import get_dataset_path
import matplotlib.pyplot as plt
import numpy as np
from ctapipe.coordinates import CameraFrame, NominalFrame
cleaning_level = {
'LSTCam': (3.5, 7.5, 2), # ?? (3, 6) for Abelardo...
'FlashCam': (4, 8, 2), # there is some scaling missing?
'ASTRICam': (5, 7, 2),
}
input_url = get_dataset_path('gamma_test_large.simtel.gz')
with event_source(input_url=input_url) as source:
calibrator = CameraCalibrator(subarray=source.subarray)
for event in source:
calibrator(event)
nominal_frame = NominalFrame(
origin=SkyCoord(alt=70 * u.deg, az=0 * u.deg, frame=AltAz)
)
nom_fov_lon = []
nom_fov_lat = []
photons = []
subarray = source.subarray
for tel_id, dl1 in event.dl1.tel.items():
geom = subarray.tels[tel_id].camera.geometry
focal_length = subarray.tels[tel_id].optics.equivalent_focal_length
image = dl1.image
telescope_pointing = SkyCoord(
alt=event.pointing.tel[tel_id].altitude,
az=event.pointing.tel[tel_id].azimuth,
frame=AltAz(),
)
camera_frame = CameraFrame(
telescope_pointing=telescope_pointing, focal_length=focal_length
)
boundary, picture, min_neighbors = cleaning_level[geom.camera_name]
clean = tailcuts_clean(
geom,
image,
boundary_thresh=boundary,
picture_thresh=picture,
min_number_picture_neighbors=min_neighbors
)
cam_coords = SkyCoord(
geom.pix_x[clean],
geom.pix_y[clean],
frame=camera_frame
)
nom = cam_coords.transform_to(nominal_frame)
nom_fov_lon.append(nom.fov_lon.to_value(u.deg))
nom_fov_lat.append(nom.fov_lat.to_value(u.deg))
photons.append(image[clean])
nom_fov_lon = np.concatenate(nom_fov_lon)
nom_fov_lat = np.concatenate(nom_fov_lat)
photons =
|
np.concatenate(photons)
|
numpy.concatenate
|
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import numpy as np
# ========================================================================= #
# Better Choice #
# ========================================================================= #
def random_choice_prng(a, size=None, replace=True, seed: int = None):
# generate a random seed
if seed is None:
seed = np.random.randint(0, 2**32)
# create seeded pseudo random number generator
# - built in np.random.choice cannot handle large values: https://github.com/numpy/numpy/issues/5299#issuecomment-497915672
# - PCG64 is the default: https://numpy.org/doc/stable/reference/random/bit_generators/index.html
# - PCG64 has good statistical properties and is fast: https://numpy.org/doc/stable/reference/random/performance.html
g = np.random.Generator(np.random.PCG64(seed=seed))
# sample indices
choices = g.choice(a, size=size, replace=replace)
# done!
return choices
# ========================================================================= #
# Random Ranges #
# ========================================================================= #
def randint2(a_low, a_high, b_low, b_high, size=None):
"""
Like np.random.randint, but supports two ranges of values.
Samples with equal probability from both ranges.
- a: [a_low, a_high) -> including a_low, excluding a_high!
- b: [b_low, b_high) -> including b_low, excluding b_high!
"""
# convert
a_low, a_high = np.array(a_low), np.array(a_high)
b_low, b_high = np.array(b_low), np.array(b_high)
# checks
assert np.all(a_low <= a_high), f'a_low <= a_high | {a_low} <= {a_high}'
assert np.all(b_low <= b_high), f'b_low <= b_high | {b_low} <= {b_high}'
assert np.all(a_high <= b_low), f'a_high <= b_low | {a_high} <= {b_low}'
# compute
da = a_high - a_low
db = b_high - b_low
d = da + db
assert np.all(d > 0), f'(a_high - a_low) + (b_high - b_low) > 0 | {d} = ({a_high} - {a_low}) + ({b_high} - {b_low}) > 0'
# sampled
offset = np.random.randint(0, d, size=size)
offset += (da <= offset) * (b_low - a_high)
return a_low + offset
def sample_radius(value, low, high, r_low, r_high):
"""
Sample around the given value (low <= value < high),
the resampled value will lie in th same range.
- sampling occurs in a radius around the value
r_low <= radius < r_high
"""
value =
|
np.array(value)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 15:35:13 2018
@author: ben
"""
from osgeo import gdal, gdalconst, osr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as pColors
import h5py
import scipy.interpolate as si
class mapData(object):
def __init__(self):
self.x=None
self.y=None
self.z=None
self.projection=None
self.filename=None
self.extent=None
self.interpolator=None
self.nan_interpolator=None
def __copy__(self):
temp=mapData()
for field in ['x','y','z','projection','filename','extent']:
setattr(temp, field, getattr(self, field))
return temp
def copy(self):
return self.__copy__()
def update_extent(self):
self.extent=[np.min(self.x), np.max(self.x), np.min(self.y), np.max(self.y)]
def from_geotif(self, file, bands=None, bounds=None, skip=1):
"""
Read a raster from a DEM file
"""
ds=gdal.Open(file, gdalconst.GA_ReadOnly)
GT=ds.GetGeoTransform()
proj=ds.GetProjection()
if bands is None:
n_bands=ds.RasterCount
bands=np.arange(n_bands, dtype=int)+1
if not isinstance(bands, (list, tuple, np.ndarray)):
bands=[bands]
# get geolocation info, allocate outputs
band=ds.GetRasterBand(1)
nodataValue=band.GetNoDataValue()
# ii and jj are the pixel center coordinates. 0,0 in GDAL is the upper-left
# corner of the first pixel.
ii=np.arange(0, band.XSize)+0.5
jj=np.arange(0, band.YSize)+0.5
x=GT[0]+GT[1]*ii
y=GT[3]+GT[5]*jj
if bounds is not None:
cols = np.where(( x>=bounds[0][0] ) & ( x<= bounds[0][1] ))[0]
rows = np.where(( y>=bounds[1][0] ) & ( y<= bounds[1][1] ))[0]
else:
rows=np.arange(band.YSize, dtype=int)
cols=np.arange(band.XSize, dtype=int)
z=list()
for band_num in bands:
band=ds.GetRasterBand(int(band_num))
z.append(band.ReadAsArray(int(cols[0]), int(rows[0]), int(cols[-1]-cols[0]+1), int(rows[-1]-rows[0]+1))[::-1,:])
if skip > 1:
z[-1]=z[-1][::skip, ::skip]
if len(bands)==1:
z=z[0]
else:
z=np.stack(z, axis=2)
ds=None
if skip >1:
cols=cols[::skip]
rows=rows[::skip]
if nodataValue is not None and np.isfinite(nodataValue):
bad = z==np.array(nodataValue).astype(z.dtype)
z = np.float64(z)
z[bad] = np.NaN
else:
z = np.float64(z)
x=x[cols]
y=y[rows]
self.x=x
self.y=y[::-1]
self.z=z
self.projection=proj
self.update_extent()
return self
def from_h5(self, h5_file, field_mapping={}, group='/', bounds=None, skip=1):
self.filename=h5_file
fields={'x':'x','y':'y','z':'z','t':'t'}
fields.update(field_mapping)
t=None
with h5py.File(h5_file,'r') as h5f:
x=np.array(h5f[group+fields['x']])
y=np.array(h5f[group+fields['y']])
if fields['t'] in h5f[group]:
t=np.array(h5f[group+fields['t']])
if bounds is not None:
cols = np.where(( x>=bounds[0][0] ) & ( x<= bounds[0][1] ))[0]
rows = np.where(( y>=bounds[1][0] ) & ( y<= bounds[1][1] ))[0]
else:
rows=np.arange(y.size, dtype=int)
cols=np.arange(x.size, dtype=int)
if len(rows) > 0 and len(cols) > 0:
zfield=h5f[group+fields['z']]
if len(zfield.shape) == 2:
self.z=np.array(h5f[group+fields['z']][rows[0]:rows[-1]+1, cols[0]:cols[-1]+1])
else:
self.z=
|
np.array(zfield[rows[0]:rows[-1]+1, cols[0]:cols[-1]+1,:])
|
numpy.array
|
import datetime
import pandas as pd
import numpy as np
from utils import running_mean, reduce_mem_usage
from data.tweedie import get_tweedie_power
from data.out_of_stock import out_of_stock_zeroes
from sklearn.preprocessing import LabelEncoder
class preprocessData:
def __init__(self, path_data, test_size=28):
now = datetime.datetime.now()
self.path_data = path_data
self.test_size = test_size
self._import_data()
print('calculate scaling factor')
self._scaling_factor_and_rho()
print('calculate corrected sales and trend normalization')
self._create_corrected_sales_and_trend_normalization_df()
print('unpivot sales, {}s so far'.format((datetime.datetime.now()-now).seconds))
self._melt_sales_df()
print('feature engineering calendar')
self._add_features_event_calendar()
print('unpivot calendar, {}s so far'.format((datetime.datetime.now()-now).seconds))
self._melt_calendar_df()
print('feature engineering snap')
self._add_features_snap_calendar()
print("reducing memory")
self._reduce_memory()
print("inferring probable missing data in prices, {}s so far".format((datetime.datetime.now()-now).seconds))
self._infer_missing_prices_df()
print("feature engineering prices")
self._add_features_price()
print('join data')
self._join_datasets()
print('add out of stock flag data')
self._finalize()
print('done in , {}s so far'.format((datetime.datetime.now()-now).seconds))
def _import_data(self):
self.calendar_df = pd.read_csv(self.path_data+'calendar_enriched.csv')
self.calendar_raw_df = self.calendar_df.copy()
self.sales_df = pd.read_csv(self.path_data+'sales_train_evaluation.csv')
self.sales_raw_df = self.sales_df.copy()
self.prices_df = pd.read_csv(self.path_data+'sell_prices.csv')
self.sample_df = pd.read_csv(self.path_data+'sample_submission.csv')
def _scaling_factor_and_rho(self):
scale_list = []
rho_list = []
oos_list = []
for i, r_raw in self.sales_df.drop(['id','item_id','dept_id','cat_id','store_id','state_id'],axis=1).iterrows():
r_raw = r_raw.values
r = r_raw[np.argmax(r_raw != 0):].copy()
#Scale
r1 = r[:-1]
r2 = r[1:]
r_diff = (r1-r2)**2
r_sum = r_diff.mean()
r_sum = np.sqrt(r_sum)
scale_list.append(r_sum)
#Rho
rho = get_tweedie_power(r)
rho_list.append(rho)
#out of stock
oos = out_of_stock_zeroes(r_raw)
oos_list.append(oos.oos_array)
self.oos_flag_df = pd.DataFrame(oos_list,columns=self.sales_df.columns[6:])
self.oos_flag_df = self._add_nan_for_test(self.oos_flag_df)
self.oos_flag_df['id'] = self.sales_df['id']
self.sales_df['scale'] = scale_list
self.sales_df['rho'] = rho_list
def _add_nan_for_test(self,df,trail=0):
max_d = int(df.columns[-1-trail][2:])
a = np.empty(df.shape[0])
a[:] = np.nan
for i in range(self.test_size):
df['d_'+str(max_d+1+i)] = a
return df
def _fill_zeros_with_last(self,arr):
prev = np.arange(len(arr))
prev[arr == 0] = 0
prev = np.maximum.accumulate(prev)
return arr[prev]
def _create_corrected_sales_and_trend_normalization_df(self):
N = 28
list_corrected_sales = []
norm_factor = []
for i in range(self.oos_flag_df.shape[0]):
oos = self.oos_flag_df.iloc[i,:-1-self.test_size].values.astype(int)
sales = self.sales_raw_df.iloc[i,6:].values.astype(int)
sales_moving_avg = running_mean(sales,N)
#Arrays without trailing zeros
oos_without_beg = oos[np.argmax(sales!=0):]
sales_mov_avg_without_beg = sales_moving_avg[np.argmax(sales!=0):]
#Replace mov av sales with zero for timestamp where oos
sales_mov_avg_without_beg = sales_mov_avg_without_beg*(1-oos_without_beg)
#Fill zeroes with last know non zero value
sales_mov_avg_without_beg = self._fill_zeros_with_last(sales_mov_avg_without_beg)
#Create new sales by replacing wherever oos by the moving average
sales_corrected = np.concatenate([sales[:np.argmax(sales!=0)],np.where(oos_without_beg==0,sales[np.argmax(sales!=0):],sales_mov_avg_without_beg)])
list_corrected_sales.append(sales_corrected)
#Normalization (has 28 more values than sales_df to be able to use it for inference)
sales_mov_avg_corrected = np.concatenate([sales[:np.argmax(sales!=0)],sales_mov_avg_without_beg])
sales_mov_avg_corrected = np.concatenate([sales_mov_avg_corrected[0]*np.ones(28),sales_mov_avg_corrected])
normalization = np.where(sales_mov_avg_corrected==0,1,1/sales_mov_avg_corrected)
norm_factor.append(normalization)
self.corrected_sales_df = pd.DataFrame(list_corrected_sales,columns=self.sales_raw_df.columns[6:])
self.corrected_sales_df = self._add_nan_for_test(self.corrected_sales_df)
self.corrected_sales_df['id'] = self.sales_df['id']
self.normalization_factor_df = pd.DataFrame(norm_factor,columns=['d_'+str(i+1) for i in range(norm_factor[0].shape[0])])
self.normalization_factor_df = self._add_nan_for_test(self.normalization_factor_df)
self.normalization_factor_df['id'] = self.sales_df['id']
def _melt_sales_df(self):
self.sales_df = self._add_nan_for_test(self.sales_df,2)
all_cols = self.sales_df.columns
all_cols_mod = [int(x[2:]) if 'd_' in x else x for x in all_cols]
self.sales_df.columns = all_cols_mod
id_vars = ['id','item_id','dept_id','cat_id','store_id','state_id','scale','rho']
value_vars = list(set(all_cols_mod)-set(id_vars))
self.sales_df = pd.melt(self.sales_df, id_vars=id_vars, value_vars=value_vars)
self.sales_df.rename(columns={'variable':'d','value':'sales'},inplace=True)
def _add_features_event_calendar(self):
le = LabelEncoder()
self.calendar_df['event_name_1'] = self.calendar_df['event_name_1'].fillna('No')
self.calendar_df['event_name_1'] = le.fit_transform(self.calendar_df['event_name_1'])
no_value = le.fit_transform(['No'])[0]
self.calendar_df['event_name_1'] = self.calendar_df['event_name_1'].astype('int8')
self.calendar_df['1_day_prior_event_name_1'] = self.calendar_df['event_name_1'].shift(-1).astype('float16').fillna(no_value)
self.calendar_df['2_day_prior_event_name_1'] = self.calendar_df['event_name_1'].shift(-2).astype('float16').fillna(no_value)
self.calendar_df['3_day_prior_event_name_1'] = self.calendar_df['event_name_1'].shift(-3).astype('float16').fillna(no_value)
self.calendar_df['4_day_prior_event_name_1'] = self.calendar_df['event_name_1'].shift(-4).astype('float16').fillna(no_value)
self.calendar_df['5_day_prior_event_name_1'] = self.calendar_df['event_name_1'].shift(-5).astype('float16').fillna(no_value)
self.calendar_df['6_day_prior_event_name_1'] = self.calendar_df['event_name_1'].shift(-6).astype('float16').fillna(no_value)
self.calendar_df['7_day_prior_event_name_1'] = self.calendar_df['event_name_1'].shift(-7).astype('float16').fillna(no_value)
self.calendar_df['1_day_after_event_name_1'] = self.calendar_df['event_name_1'].shift(1).astype('float16').fillna(no_value)
self.calendar_df['2_day_after_event_name_1'] = self.calendar_df['event_name_1'].shift(2).astype('float16').fillna(no_value)
self.calendar_df['3_day_after_event_name_1'] = self.calendar_df['event_name_1'].shift(3).astype('float16').fillna(no_value)
self.calendar_df['4_day_after_event_name_1'] = self.calendar_df['event_name_1'].shift(4).astype('float16').fillna(no_value)
self.calendar_df['5_day_after_event_name_1'] = self.calendar_df['event_name_1'].shift(5).astype('float16').fillna(no_value)
self.calendar_df['6_day_after_event_name_1'] = self.calendar_df['event_name_1'].shift(6).astype('float16').fillna(no_value)
self.calendar_df['7_day_after_event_name_1'] = self.calendar_df['event_name_1'].shift(7).astype('float16').fillna(no_value)
def _melt_calendar_df(self):
self.calendar_df.rename(columns={'snap_CA':'CA','snap_TX':'TX','snap_WI':'WI'},inplace=True)
self.calendar_df['d'] = self.calendar_df['d'].apply(lambda d: int(d[2:]))
all_cols = self.calendar_df.columns
id_vars = ['date','wm_yr_wk','weekday','wday','month','year','d','event_name_1','event_type_1','event_name_2','event_type_2',
'1_day_prior_event_name_1','2_day_prior_event_name_1','3_day_prior_event_name_1','4_day_prior_event_name_1','5_day_prior_event_name_1','6_day_prior_event_name_1','7_day_prior_event_name_1',
'1_day_after_event_name_1','2_day_after_event_name_1','3_day_after_event_name_1','4_day_after_event_name_1','5_day_after_event_name_1','6_day_after_event_name_1','7_day_after_event_name_1']
value_vars = list(set(all_cols)-set(id_vars))
self.calendar_df = pd.melt(self.calendar_df, id_vars=id_vars, value_vars=value_vars)
self.calendar_df.rename(columns={'variable':'state_id','value':'snap'},inplace=True)
def _add_features_snap_calendar(self):
CA_df = self.calendar_df[self.calendar_df['state_id']=='CA'].copy()
TX_df = self.calendar_df[self.calendar_df['state_id']=='TX'].copy()
WI_df = self.calendar_df[self.calendar_df['state_id']=='WI'].copy()
for df in [CA_df,TX_df,WI_df]:
df.sort_values(by=['d'],ascending=True,inplace=True)
df['1_day_prior_snap'] = df['snap'].shift(-1).fillna(0)
df['2_day_prior_snap'] = df['snap'].shift(-2).fillna(0)
df['3_day_prior_snap'] = df['snap'].shift(-3).fillna(0)
df['4_day_prior_snap'] = df['snap'].shift(-4).fillna(0)
df['5_day_prior_snap'] = df['snap'].shift(-5).fillna(0)
df['6_day_prior_snap'] = df['snap'].shift(-6).fillna(0)
df['7_day_prior_snap'] = df['snap'].shift(-7).fillna(0)
df['1_day_after_snap'] = df['snap'].shift(1).fillna(0)
df['2_day_after_snap'] = df['snap'].shift(2).fillna(0)
df['3_day_after_snap'] = df['snap'].shift(3).fillna(0)
df['4_day_after_snap'] = df['snap'].shift(4).fillna(0)
df['5_day_after_snap'] = df['snap'].shift(5).fillna(0)
df['6_day_after_snap'] = df['snap'].shift(6).fillna(0)
df['7_day_after_snap'] = df['snap'].shift(7).fillna(0)
self.calendar_df = pd.concat([CA_df,TX_df,WI_df],axis=0)
def _reduce_memory(self):
self.calendar_df = reduce_mem_usage(self.calendar_df)
self.sales_df = reduce_mem_usage(self.sales_df)
self.prices_df = reduce_mem_usage(self.prices_df)
def _infer_missing_prices_df(self):
max_date = self.prices_df['wm_yr_wk'].max()
min_date = self.prices_df['wm_yr_wk'].min()
store_item_min_df = self.prices_df.sort_values(by=['wm_yr_wk']).drop_duplicates(subset=['item_id','store_id'],keep='first')
store_item_min_df = store_item_min_df[store_item_min_df['wm_yr_wk']!=min_date].copy()
store_item_max_df = self.prices_df.sort_values(by=['wm_yr_wk']).drop_duplicates(subset=['item_id','store_id'],keep='last')
store_item_max_df = store_item_max_df[store_item_max_df['wm_yr_wk']!=max_date].copy()
list_date = list(set(self.prices_df['wm_yr_wk'].values.tolist()))
list_date.sort()
list_missing = []
for index,r in store_item_min_df.iterrows():
store_id = r['store_id']
item_id = r['item_id']
wm_yr_wk = r['wm_yr_wk']
sell_price = r['sell_price']
wm_yr_wk_previous = list_date[list_date.index(wm_yr_wk)-1]
list_missing.append([store_id,item_id,wm_yr_wk_previous,sell_price])
if wm_yr_wk_previous!=min_date:
wm_yr_wk_previous = list_date[list_date.index(wm_yr_wk_previous)-1]
list_missing.append([store_id,item_id,wm_yr_wk_previous,sell_price])
for index,r in store_item_max_df.iterrows():
store_id = r['store_id']
item_id = r['item_id']
wm_yr_wk = r['wm_yr_wk']
sell_price = r['sell_price']
wm_yr_wk_next = list_date[list_date.index(wm_yr_wk)-1]
list_missing.append([store_id,item_id,wm_yr_wk_next,sell_price])
if wm_yr_wk_next!=max_date:
wm_yr_wk_next = list_date[list_date.index(wm_yr_wk_next)-1]
list_missing.append([store_id,item_id,wm_yr_wk_next,sell_price])
missing_df = pd.DataFrame(list_missing,columns=['store_id','item_id','wm_yr_wk','sell_price'])
self.prices_df = pd.concat([self.prices_df,missing_df])
def _add_features_price(self):
price_agg = self.prices_df.groupby(['store_id','item_id'],as_index=False).agg(
{
'sell_price':'mean'
}
)
price_agg.rename(columns={'sell_price':'avg_sell_price'},inplace=True)
self.prices_df = pd.merge(self.prices_df,price_agg,on=['store_id','item_id'])
self.prices_df['sell_price_norm'] = self.prices_df['sell_price']/self.prices_df['avg_sell_price']
self.prices_df['sell_price_norm_lag'] = self.prices_df.groupby(['store_id','item_id'])["sell_price_norm"].transform(
lambda x: x.shift(1)
)
self.prices_df['sell_price_norm_momentum'] = self.prices_df['sell_price_norm_lag']/self.prices_df['sell_price_norm']
self.prices_df.drop(['avg_sell_price','sell_price_norm_lag'],inplace=True,axis=1)
def _join_datasets(self):
self.all_data_preprocessed = pd.merge(
self.sales_df, self.calendar_df, on=['d','state_id'],how='left'
)
self.all_data_preprocessed = pd.merge(
self.all_data_preprocessed, self.prices_df, on=['wm_yr_wk','item_id','store_id'], how='left'
)
def _finalize(self):
self.all_data_preprocessed.sort_values(by=['id','d'],inplace=True)
#Melt out of stock
all_cols = self.oos_flag_df.columns
all_cols_mod = [int(x[2:]) if 'd_' in x else x for x in all_cols]
self.oos_flag_df.columns = all_cols_mod
id_vars = ['id']
value_vars = list(set(all_cols_mod)-set(id_vars))
oos_flag_pivot_df = pd.melt(self.oos_flag_df, id_vars=id_vars, value_vars=value_vars)
oos_flag_pivot_df.rename(columns={'variable':'d','value':'oos'},inplace=True)
oos_flag_pivot_df.sort_values(by=['id','d'],inplace=True)
self.all_data_preprocessed['oos'] = oos_flag_pivot_df['oos'].values
#Melt sales corrected
all_cols = self.corrected_sales_df.columns
all_cols_mod = [int(x[2:]) if 'd_' in x else x for x in all_cols]
self.corrected_sales_df.columns = all_cols_mod
id_vars = ['id']
value_vars = list(set(all_cols_mod)-set(id_vars))
corrected_sales_pivot_df = pd.melt(self.corrected_sales_df, id_vars=id_vars, value_vars=value_vars)
corrected_sales_pivot_df.rename(columns={'variable':'d','value':'sales_corrected'},inplace=True)
corrected_sales_pivot_df.sort_values(by=['id','d'],inplace=True)
self.all_data_preprocessed['sales_corrected'] = corrected_sales_pivot_df['sales_corrected'].values
#Test vs train
max_train_d = int(self.sales_raw_df.columns[-1][2:])
d_list = self.all_data_preprocessed['d'].values
is_test =
|
np.where(d_list>max_train_d,1,0)
|
numpy.where
|
import numpy as np
domain = np.array([[0, 6], [0, 6]])
def check_in_domain(x):
"""Validate input"""
x = np.atleast_2d(x)
v_dim_0 = np.all(x[:, 0] >= domain[0, 0]) and np.all(x[:, 0] <= domain[0, 1])
v_dim_1 = np.all(x[:, 1] >= domain[1, 0]) and np.all(x[:, 0] <= domain[1, 1])
return v_dim_0 and v_dim_1
def validate_input(x_test, n_points=None):
"""Check whether a point belongs to the domain and has the right shape."""
x_test = np.array(x_test)
x_test =
|
np.atleast_2d(x_test)
|
numpy.atleast_2d
|
import os
import json
import numpy as np
import rdkit.Chem as Chem
import CoolProp.CoolProp as CP
from scipy.integrate import quad
from solvation_predictor.solubility.solubility_predictions import SolubilityPredictions
import pkgutil, io
class SolubilityCalculations:
# Set class variable with McGowan volumes for each atomic number
mcgowan_volumes = {
1: 8.71, 2: 6.75,
3: 22.23, 4: 20.27, 5: 18.31, 6: 16.35, 7: 14.39, 8: 12.43, 9: 10.47, 10: 8.51,
11: 32.71, 12: 30.75, 13: 28.79, 14: 26.83, 15: 24.87, 16: 22.91, 17: 20.95, 18: 18.99,
19: 51.89, 20: 50.28, 21: 48.68, 22: 47.07, 23: 45.47, 24: 43.86, 25: 42.26, 26: 40.65, 27: 39.05,
28: 37.44, 29: 35.84, 30: 34.23, 31: 32.63, 32: 31.02, 33: 29.42, 34: 27.81, 35: 26.21, 36: 24.60,
37: 60.22, 38: 58.61, 39: 57.01, 40: 55.40, 41: 53.80, 42: 52.19, 43: 50.59, 44: 48.98, 45: 47.38,
46: 45.77, 47: 44.17, 48: 42.56, 49: 40.96, 50: 39.35, 51: 37.75, 52: 36.14, 53: 34.54, 54: 32.93,
55: 77.25, 56: 76.00, 57: 74.75, 72: 55.97, 73: 54.71, 74: 53.46, 75: 52.21, 76: 50.96, 77: 49.71,
78: 48.45, 79: 47.20, 80: 45.95, 81: 44.70, 82: 43.45, 83: 42.19, 84: 40.94, 85: 39.69, 86: 38.44,
}
def __init__(self, predictions: SolubilityPredictions = None,
calculate_aqueous: bool = None,
calculate_reference_solvents: bool = None,
calculate_t_dep: bool = None,
calculate_t_dep_with_t_dep_hdiss: bool = None,
solv_crit_prop_dict: dict = None,
hsubl_298: np.array = None,
Cp_solid: np.array = None,
Cp_gas: np.array = None,
logger=None,
verbose=True):
'''
All uncertainty is reported as the standard deviation of machine learning model ensemble predictions.
'''
self.logger = logger.info if logger is not None else print
self.solv_crit_prop_dict = solv_crit_prop_dict
self.gsolv_298, self.unc_gsolv_298 = None, None
self.logk_298, self.unc_logk_298 = None, None
self.gsolv_aq_298, self.unc_gsolv_aq_298 = None, None
self.logk_aq_298, self.unc_logk_aq_298 = None, None
self.logs_aq_298, self.unc_logs_aq_298 = None, None
self.logs_298_from_aq, self.unc_logs_298_from_aq = None, None
self.gsolv_ref_298, self.unc_gsolv_ref_298 = None, None
self.logk_ref_298, self.unc_logk_ref_298 = None, None
self.logs_ref_298 = None
self.logs_298_from_ref, self.unc_logs_298_from_ref = None, None
self.hsolv_298, self.unc_hsolv_298 = None, None
self.E, self.S, self.A, self.B, self.L = None, None, None, None, None
self.unc_E, self.unc_S, self.unc_A, self.unc_B, self.unc_L = None, None, None, None, None
self.V = None
self.I_OHadj, self.I_OHnonadj, self.I_NH = None, None, None
self.hsubl_298 = hsubl_298 if hsubl_298 is not None else None
self.Cp_solid = Cp_solid if Cp_solid is not None else None
self.Cp_gas = Cp_gas if Cp_gas is not None else None
self.logs_T_with_const_hdiss_from_aq, self.logs_T_with_T_dep_hdiss_from_aq = None, None
self.logs_T_with_const_hdiss_warning_message, self.logs_T_with_T_dep_hdiss_error_message = None, None
self.hsolv_T, self.gsolv_T, self.ssolv_T = None, None, None
self.logs_T_with_const_hdiss_from_ref, self.logs_T_with_T_dep_hdiss_from_ref = None, None
if predictions is not None:
if verbose:
self.logger('Start making logS calculations')
self.make_calculations_298(predictions=predictions,
calculate_aqueous=calculate_aqueous,
calculate_reference_solvents=calculate_reference_solvents,
verbose=verbose)
if calculate_t_dep:
self.make_calculations_t(predictions=predictions,
calculate_aqueous=calculate_aqueous,
calculate_reference_solvents=calculate_reference_solvents,
calculate_t_dep_with_t_dep_hdiss=calculate_t_dep_with_t_dep_hdiss,
verbose=verbose)
def make_calculations_298(self, predictions: SolubilityPredictions,
calculate_aqueous: bool = None,
calculate_reference_solvents: bool = None,
verbose=False):
self.gsolv_298, self.unc_gsolv_298 = self.extract_predictions(predictions.gsolv)
self.logk_298 = self.calculate_logk(gsolv=self.gsolv_298)
self.unc_logk_298 = self.calculate_logk(gsolv=self.unc_gsolv_298, uncertainty=True)
if calculate_aqueous:
if verbose:
self.logger('Calculating logS at 298K from predicted aqueous solubility')
self.gsolv_aq_298, self.unc_gsolv_aq_298 = self.extract_predictions(predictions.gsolv_aq)
self.logk_aq_298 = self.calculate_logk(gsolv=self.gsolv_aq_298)
self.unc_logk_aq_298 = self.calculate_logk(gsolv=self.unc_gsolv_aq_298, uncertainty=True)
self.logs_aq_298, self.unc_logs_aq_298 = self.extract_predictions(predictions.saq)
self.logs_298_from_aq = self.calculate_logs_298(logk=self.logk_298,
logk_ref=self.logk_aq_298,
logs_ref=self.logs_aq_298)
self.unc_logs_298_from_aq = self.calculate_logs_298(logk=self.unc_logk_298,
logk_ref=self.unc_logk_aq_298,
logs_ref=self.unc_logs_aq_298,
uncertainty=True)
if calculate_reference_solvents:
if verbose:
self.logger('Calculating logS at 298K from reference solubility')
self.gsolv_ref_298, self.unc_gsolv_ref_298 = self.extract_predictions(predictions.gsolv_ref)
self.logk_ref_298 = self.calculate_logk(gsolv=self.gsolv_ref_298)
self.unc_logk_ref_298 = self.calculate_logk(gsolv=self.unc_gsolv_ref_298, uncertainty=True)
self.logs_ref_298 = np.array(predictions.data.reference_solubility)
self.logs_298_from_ref = self.calculate_logs_298(logk=self.logk_298,
logk_ref=self.logk_ref_298,
logs_ref=self.logs_ref_298)
self.unc_logs_298_from_ref = self.calculate_logs_298(logk=self.unc_logk_298,
logk_ref=self.unc_logk_ref_298,
logs_ref=0.0,
uncertainty=True)
def make_calculations_t(self, predictions: SolubilityPredictions,
calculate_aqueous: bool = None,
calculate_reference_solvents: bool = None,
calculate_t_dep_with_t_dep_hdiss: bool = None,
verbose=False):
self.hsolv_298, self.unc_hsolv_298 = self.extract_predictions(predictions.hsolv)
if predictions.solute_parameters:
self.E, self.S, self.A, self.B, self.L = self.get_solute_parameters(predictions.solute_parameters[0])
self.unc_E, self.unc_S, self.unc_A, self.unc_B, self.unc_L = self.get_solute_parameters(predictions.solute_parameters[1])
self.V = np.array([self.calculate_solute_parameter_v(sm[1]) for sm in predictions.data.smiles_pairs])
self.I_OHadj, self.I_OHnonadj, self.I_NH = self.get_diol_amine_ids(predictions.data.smiles_pairs)
if self.hsubl_298 is None:
self.hsubl_298 = self.get_hsubl_298(self.E, self.S, self.A, self.B, self.V,
I_OHadj=self.I_OHadj,
I_OHnonadj=self.I_OHnonadj,
I_NH=self.I_NH)
self.logs_T_with_const_hdiss_warning_message = self.get_logs_t_with_const_hdiss_warning_message(
temperatures=predictions.data.temperatures)
if calculate_t_dep_with_t_dep_hdiss:
if self.Cp_solid is None:
self.Cp_solid = self.get_Cp_solid(self.E, self.S, self.A, self.B, self.V,
I_OHnonadj=self.I_OHnonadj) # in cal/mol/K
if self.Cp_gas is None:
self.Cp_gas = self.get_Cp_gas(self.E, self.S, self.A, self.B, self.V) # in cal/mol/K
# load solvent's CoolProp name, critical temperature, and critical density data
# if the solvent critical property dictionary (solv_crit_prop_dict) is not provided, use the default one.
if self.solv_crit_prop_dict is None:
#load from package
#current_path = os.path.dirname(os.path.abspath(__file__))
#crit_data_path = os.path.join(current_path, 'solvent_crit_data.json')
#with open(crit_data_path) as f:
# self.solv_crit_prop_dict = json.load(f) # inchi with fixed H is used as a solvent key
path = os.path.join('solubility', 'solvent_crit_data.json')
crit_data_path = io.BytesIO(pkgutil.get_data('solvation_predictor', path))
self.solv_crit_prop_dict = json.load(crit_data_path) # inchi with fixed H is used as a solvent key
coolprop_name_list, crit_t_list, crit_d_list = \
self.get_solvent_info(predictions.data.smiles_pairs, self.solv_crit_prop_dict)
if calculate_aqueous:
if verbose:
self.logger('Calculating T-dep logS from predicted aqueous solubility using H_diss(298K) approximation')
self.logs_T_with_const_hdiss_from_aq = self.calculate_logs_t(hsolv_298=self.hsolv_298,
hsubl_298=self.hsubl_298,
logs_298=self.logs_298_from_aq,
temperatures=predictions.data.temperatures)
if calculate_t_dep_with_t_dep_hdiss:
if verbose:
self.logger('Calculating T-dep logS from predicted aqueous solubility using T-dep H_diss')
self.logs_T_with_T_dep_hdiss_from_aq, self.logs_T_with_T_dep_hdiss_error_message, self.hsolv_T,\
self.gsolv_T, self.ssolv_T = self.calculate_logs_t_with_t_dep_hdiss_all(
gsolv_298_list=self.gsolv_298, hsolv_298_list=self.hsolv_298, hsubl_298_list=self.hsubl_298,
Cp_solid_list=self.Cp_solid, Cp_gas_list=self.Cp_gas, logs_298_list=self.logs_298_from_aq,
T_list=predictions.data.temperatures, coolprop_name_list=coolprop_name_list,
Tc_list=crit_t_list, rho_c_list=crit_d_list)
if calculate_reference_solvents:
if verbose:
self.logger('Calculating T-dep logS from reference solubility using H_diss(298K) approximation')
self.logs_T_with_const_hdiss_from_ref = self.calculate_logs_t(hsolv_298=self.hsolv_298,
hsubl_298=self.hsubl_298,
logs_298=self.logs_298_from_ref,
temperatures=predictions.data.temperatures)
if calculate_t_dep_with_t_dep_hdiss:
if verbose:
self.logger('Calculating T-dep logS from reference solubility using T-dep H_diss')
# since `logs_T_with_T_dep_hdiss_error_message` and `hsolv_T` will be the same whether aqueous
# or reference solubility is used, these can be overwritten.
self.logs_T_with_T_dep_hdiss_from_ref, self.logs_T_with_T_dep_hdiss_error_message, self.hsolv_T,\
self.gsolv_T, self.ssolv_T= self.calculate_logs_t_with_t_dep_hdiss_all(
gsolv_298_list=self.gsolv_298, hsolv_298_list=self.hsolv_298, hsubl_298_list=self.hsubl_298,
Cp_solid_list=self.Cp_solid, Cp_gas_list=self.Cp_gas, logs_298_list=self.logs_298_from_ref,
T_list=predictions.data.temperatures, coolprop_name_list=coolprop_name_list,
Tc_list=crit_t_list, rho_c_list=crit_d_list)
def extract_predictions(self, predictions):
pred = np.array(predictions[0]) if predictions else None
unc = np.sqrt(np.array(predictions[1])) if predictions else None # uncertainty reported as standard deviation
return pred, unc
def calculate_logs_t(self, hsolv_298=None, hsubl_298=None, logs_298=None, temperatures=None):
hdiss_298 = hsolv_298 + hsubl_298
return logs_298 - hdiss_298/2.303/8.314*4.184*1000*(1/temperatures-1/298.)
def get_logs_t_with_const_hdiss_warning_message(self, temperatures=None):
warning_message = ['Warning! Above 350 K, `calculate_t_dep_with_t_dep_hdiss` option is recommended.'
if temp > 350 else '' for temp in temperatures]
return warning_message
def calculate_logs_298(self, logk=None, logk_ref=None, logs_ref=None, uncertainty: bool = False):
if uncertainty:
return np.sqrt(np.square(logk) + np.square(logk_ref) + np.square(logs_ref))
else:
return logs_ref + logk - logk_ref
def calculate_logk(self, gsolv=None, uncertainty: bool = False):
if uncertainty:
return np.abs(gsolv * 4.184 * 1000. / 8.314 / 298. / 2.303)
else:
return -gsolv * 4.184 * 1000. / 8.314 / 298. / 2.303 # in log10
def get_solute_parameters(self, predictions, uncertainty: bool = False):
E, S, A, B, L = [], [], [], [], []
for i in predictions:
E.append(i[0])
S.append(i[1])
A.append(i[2])
B.append(i[3])
L.append(i[4])
if uncertainty:
return np.sqrt(np.array(E)), np.sqrt(np.array(S)), np.sqrt(np.array(A)), \
np.sqrt(np.array(B)), np.sqrt(np.array(L))
else:
return np.array(E), np.array(S), np.array(A), np.array(B), np.array(L)
def get_Cp_solid(self, E, S, A, B, V, I_OHnonadj=False, in_cal=True):
'''
From Acree. N = 406, SD = 19.0, R 2 = 0.976, F = 1799.2, PRESS = 153,144,
Q2 = 0.974, PSD = 19.7.
I_OHnonadj: indicator for aliphatic diols with non-adjacent OH groups. Either True or False.
Cp at 298.15 K in J/K/mol
'''
Cp = 11.63 - 34.18 * E - 1.20 * S - 1.09 * A + 12.28 * B + 181.69 * V \
+ 2.32 * S * S + 4.24 * A * B - 1.85 * V * V - 28.50 * I_OHnonadj
if in_cal == True:
Cp = Cp / 4.184 # convert to cal/K/mol
return Cp
def get_Cp_gas(self, E, S, A, B, V, in_cal=True):
'''
From Acree. N = 1014, SD = 7.86, R2 = 0.994, F = 22,597.7, PRESS = 63,725.7, Q2 =0.994, PSD= 7.96.
Cp at 298.15 K in J/K/mol
'''
Cp = -8.62 - 24.33 * E - 15.83 * S + 12.35 * A + 13.27 * B + 160.00 * V + 10.66 * S * S - 2.11 * A * B + 0.41 * V * V
if in_cal == True:
Cp = Cp / 4.184 # convert to cal/K/mol
return Cp
def get_hsubl_298(self, E, S, A, B, V, I_OHadj=None, I_OHnonadj=None, I_NH=None, in_kcal=True):
'''
From Acree. N = 898, SD = 9.90, R2 = 0.868, F = 528.6, PRESS = 90315.5,
Q2 = 0.863, PSD = 10.09.
I_OHadj: indicator for aliphatic diols with adjacent OH groups. Either True or False
I_OHnonadj: indicator for aliphatic diols with non-adjacent OH groups. Either True or False.
I_amine: indicator for alkyl amine compounds
'''
# the correlation unit is in kJ/mol.
dHsub = 9.96 - 2.10 * E + 24.10 * S + 13.70 * A + 0.79 * B + 38.71 * V - 1.36 * S * S \
+ 36.90 * A * B + 1.86 * V * V - 10.89 * I_OHadj + 14.74 * I_OHnonadj + 9.69 * I_NH # kJ/mol
if in_kcal == True:
dHsub = dHsub / 4.184 # convert to kcal/mol
return dHsub
def calculate_solute_parameter_v(self, solute_smiles):
mol = Chem.MolFromSmiles(solute_smiles)
mol = Chem.rdmolops.AddHs(mol)
V_tot = 0.0
for atom in mol.GetAtoms():
try:
V_tot += self.mcgowan_volumes[atom.GetAtomicNum()]
except KeyError:
raise ValueError('McGowan volume not available for element {}'.format(atom.GetAtomicNum()))
# divide contribution in half since all bonds would be counted twice this way
V_tot -= len(atom.GetBonds()) * 6.56 / 2
return V_tot / 100 # division by 100 to get units correct
def get_diol_amine_ids(self, smiles_pairs):
solutes = [sm[1] for sm in smiles_pairs]
unique_solutes = set(solutes)
dict_diol_amine = dict()
for i in unique_solutes:
dict_diol_amine[i] = self.get_individual_diol_amine_ids(i)
I_OHadj = [1 if dict_diol_amine[i][0] else 0 for i in solutes]
I_OHnonadj = [1 if dict_diol_amine[i][1] else 0 for i in solutes]
I_NH = [1 if dict_diol_amine[i][2] else 0 for i in solutes]
return np.array(I_OHadj), np.array(I_OHnonadj), np.array(I_NH)
def get_individual_diol_amine_ids(self, solute):
smarts_aliphatic_OH = ['[C;X4v4]-[O;H1]', '[!O]=[C;X3v4]-[O;H1]']
mol_OHnon_list = [Chem.MolFromSmarts(i) for i in smarts_aliphatic_OH]
smarts_aliphatic_adj_OH = ['[O;H1]-[C;X4v4]-[C;X4v4][O;H1]',
'[O;H1]-[C;X3v4](=[!O])-[C;X4v4]-[O;H1]',
'[O;H1]-[C;X3v4](=[!O])-[C;X3v4](=[!O])-[O;H1]']
mol_OHajd_list = [Chem.MolFromSmarts(i) for i in smarts_aliphatic_adj_OH]
smarts_aliphatic_amine = ['[C;v4X4]-[N;H1]-[C;v4X4]', '[C;v4X4]-[N;H2]',
'[!O]=[C;v4X3]-[N;H1]-[C;v4X4]',
'[!O]=[C;v4X3]-[N;H1]-[C;v4X3]=[!O]',
'[!O]=[C;v4X3]-[N;H2]']
mol_amine_list = [Chem.MolFromSmarts(i) for i in smarts_aliphatic_amine]
mol = Chem.MolFromSmiles(solute)
mol = Chem.rdmolops.AddHs(mol)
OH_adj_found = False
OH_non_found = False
amine_found = False
# only consider aliphatic molecules
# future improvements should include aliphatic side-chain of aromatic molecules
if len(mol.GetAromaticAtoms()) > 0:
pass
else:
# get OH non match
OH_non_match_tup = ()
for mol_template in mol_OHnon_list:
OH_non_match_tup += mol.GetSubstructMatches(mol_template)
# get OH adj match
OH_adj_match_tup = ()
for mol_template in mol_OHajd_list:
OH_adj_match_tup += mol.GetSubstructMatches(mol_template)
# get NH and NH2 match
amine_match_tup = ()
for mol_template in mol_amine_list:
amine_match_tup += mol.GetSubstructMatches(mol_template)
if len(OH_adj_match_tup) > 0:
OH_adj_found = True
else:
if len(OH_non_match_tup) >= 2: # make sure they are diols
OH_non_found = True
if len(amine_match_tup) > 0:
amine_found = True
return OH_adj_found, OH_non_found, amine_found
def get_solvent_info(self, smiles_pairs, solv_crit_prop_dict):
solvents_smiles_list = [sm[0] for sm in smiles_pairs]
coolprop_name_list, crit_t_list, crit_d_list = [], [], []
for smi in solvents_smiles_list:
mol = Chem.MolFromSmiles(smi)
inchi = Chem.MolToInchi(mol, options='/FixedH')
if inchi in solv_crit_prop_dict:
coolprop_name_list.append(solv_crit_prop_dict[inchi]['coolprop_name'])
crit_t_list.append(solv_crit_prop_dict[inchi]['Tc']) # in K
crit_d_list.append(solv_crit_prop_dict[inchi]['rho_c']) # in mol/m^3
else:
coolprop_name_list.append(None)
crit_t_list.append(None)
crit_d_list.append(None)
return coolprop_name_list, crit_t_list, crit_d_list
def check_valid_t(self, T, Tc, coolprop_name=None, ref_solvent='n-Heptane'):
if coolprop_name is None:
Tc_ref = CP.PropsSI('T_critical', ref_solvent) # in K
T_min_ref = CP.PropsSI('T_min', ref_solvent)
T_max = Tc
T_min_red = T_min_ref / Tc_ref
T_min = T_min_red * Tc
else:
T_max = CP.PropsSI('T_critical', coolprop_name)
T_min = CP.PropsSI('T_min', coolprop_name)
valid = True
const_hdiss_T = None
error_message = None
if T > T_max:
error_message = f"Temperature {T} K is above the critical temperature {T_max} K."
valid = False
elif T > T_max - 15:
error_message = f"Warning! Temperature {T} K is too close to the critical temperature {T_max} K."
error_message += ' The prediction may not be reliable.'
elif T < T_min:
const_hdiss_T = T_min
if coolprop_name is None:
error_message = f"Unable to predict dHdissT for T < {'%.3f' % T_min} K. dHdissT at {'%.3f' % T_min} K is used instead for lower temperatures."
else:
error_message = f"Warning! Temperature {T} K is below the minimum limit. It should be in range [{T_min} K, {T_max} K]."
error_message += f" Constant dHdissT at {'%.3f' % T_min} K is used instead for lower temperatures."
return valid, const_hdiss_T, error_message, T_min, T_max
def get_gas_liq_sat_density(self, T, Tc, rho_c, coolprop_name=None, ref_solvent='n-Heptane'):
if coolprop_name is None:
return self.get_gas_liq_sat_density_from_ref(T, Tc, rho_c, ref_solvent=ref_solvent)
else:
return self.get_gas_liq_sat_density_from_cp(T, coolprop_name)
def get_gas_liq_sat_density_from_cp(self, T, coolprop_name):
gas_density = CP.PropsSI('Dmolar', 'T', T, 'Q', 1, coolprop_name) # in mol/m^3
liq_density = CP.PropsSI('Dmolar', 'T', T, 'Q', 0, coolprop_name) # in mol/m^3
return gas_density, liq_density
def get_gas_liq_sat_density_from_ref(self, T, Tc, rho_c, ref_solvent='n-Heptane'):
# convert temperature to reduced temperature and then calculate corresponding temperature for the reference solvent
T_red = T / Tc
Tc_ref = CP.PropsSI('T_critical', ref_solvent) # K
T_ref = T_red * Tc_ref
# get densities for the reference solvent
gas_density_ref, liq_density_ref = self.get_gas_liq_sat_density_from_cp(T_ref, ref_solvent)
# convert densities to reduced densities and then calculate corresponding densities for the solvent of interest.
rhoc_ref = CP.PropsSI('rhomolar_critical', ref_solvent) # mol/m^3
gas_density_red = gas_density_ref / rhoc_ref
gas_density = gas_density_red * rho_c # mol/m^3
liq_density_red = liq_density_ref / rhoc_ref
liq_density = liq_density_red * rho_c # mol/m^3
return gas_density, liq_density
def get_Kfactor_parameters(self, gsolv_298, hsolv_298, Tc, rho_c, coolprop_name, T_trans_factor=0.75):
T1 = 298
gsolv_298 = gsolv_298 * 4184 # convert from kcal/mol to J/mol
hsolv_298 = hsolv_298 * 4184 # convert from kcal/mol to J/mol
dSsolv298 = (hsolv_298 - gsolv_298) / T1
T_transition = Tc * T_trans_factor # T_trans_factor is empirically set to 0.75 by default
# Generate Amatrix and bvector for Ax = b
Amatrix = np.zeros((4, 4))
bvec =
|
np.zeros((4, 1))
|
numpy.zeros
|
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for calfits object
"""
from __future__ import absolute_import, division, print_function
import nose.tools as nt
import os
import numpy as np
from astropy.io import fits
from pyuvdata import UVCal
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
import pyuvdata.utils as uvutils
def test_readwriteread():
"""
Omnical fits loopback test.
Read in uvfits file, write out new uvfits file, read back in and check for
object equality.
"""
cal_in = UVCal()
cal_out = UVCal()
testfile = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.gain.calfits')
write_file = os.path.join(DATA_PATH, 'test/outtest_omnical.fits')
cal_in.read_calfits(testfile)
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
nt.assert_equal(cal_in, cal_out)
# test without freq_range parameter
cal_in.freq_range = None
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
nt.assert_equal(cal_in, cal_out)
def test_readwriteread_delays():
"""
Read-Write-Read test with a fits calibration files containing delays.
Read in uvfits file, write out new uvfits file, read back in and check for
object equality
"""
cal_in = UVCal()
cal_out = UVCal()
testfile = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.delay.calfits')
write_file = os.path.join(DATA_PATH, 'test/outtest_firstcal.fits')
cal_in.read_calfits(testfile)
cal_in.write_calfits(write_file, clobber=True)
cal_out.read_calfits(write_file)
nt.assert_equal(cal_in, cal_out)
del(cal_in)
del(cal_out)
def test_errors():
"""
Test for various errors.
"""
cal_in = UVCal()
cal_out = UVCal()
testfile = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.delay.calfits')
write_file = os.path.join(DATA_PATH, 'test/outtest_firstcal.fits')
cal_in.read_calfits(testfile)
cal_in.set_unknown_cal_type()
nt.assert_raises(ValueError, cal_in.write_calfits, write_file, run_check=False, clobber=True)
# change values for various axes in flag and total quality hdus to not match primary hdu
cal_in.read_calfits(testfile)
# Create filler jones info
cal_in.jones_array = np.array([-5, -6, -7, -8])
cal_in.Njones = 4
cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)
cal_in.delay_array = np.ones(cal_in._delay_array.expected_shape(cal_in), dtype=np.float64)
cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))
# add total_quality_array so that can be tested as well
cal_in.total_quality_array = np.zeros(cal_in._total_quality_array.expected_shape(cal_in))
header_vals_to_double = [{'flag': 'CDELT2'}, {'flag': 'CDELT3'},
{'flag': 'CRVAL5'}, {'totqual': 'CDELT1'},
{'totqual': 'CDELT2'}, {'totqual': 'CRVAL4'}]
for i, hdr_dict in enumerate(header_vals_to_double):
cal_in.write_calfits(write_file, clobber=True)
unit = list(hdr_dict.keys())[0]
keyword = hdr_dict[unit]
F = fits.open(write_file)
data = F[0].data
primary_hdr = F[0].header
hdunames = uvutils._fits_indexhdus(F)
ant_hdu = F[hdunames['ANTENNAS']]
flag_hdu = F[hdunames['FLAGS']]
flag_hdr = flag_hdu.header
totqualhdu = F[hdunames['TOTQLTY']]
totqualhdr = totqualhdu.header
if unit == 'flag':
flag_hdr[keyword] *= 2
elif unit == 'totqual':
totqualhdr[keyword] *= 2
prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)
hdulist = fits.HDUList([prihdu, ant_hdu])
flag_hdu = fits.ImageHDU(data=flag_hdu.data, header=flag_hdr)
hdulist.append(flag_hdu)
totqualhdu = fits.ImageHDU(data=totqualhdu.data, header=totqualhdr)
hdulist.append(totqualhdu)
hdulist.writeto(write_file, overwrite=True)
nt.assert_raises(ValueError, cal_out.read_calfits, write_file, strict_fits=True)
# repeat for gain type file
testfile = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.gain.calfits')
write_file = os.path.join(DATA_PATH, 'test/outtest_omnical.fits')
cal_in.read_calfits(testfile)
# Create filler jones info
cal_in.jones_array = np.array([-5, -6, -7, -8])
cal_in.Njones = 4
cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)
cal_in.gain_array = np.ones(cal_in._gain_array.expected_shape(cal_in), dtype=np.complex64)
cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))
# add total_quality_array so that can be tested as well
cal_in.total_quality_array = np.zeros(cal_in._total_quality_array.expected_shape(cal_in))
header_vals_to_double = [{'totqual': 'CDELT1'}, {'totqual': 'CDELT2'},
{'totqual': 'CDELT3'}, {'totqual': 'CRVAL4'}]
for i, hdr_dict in enumerate(header_vals_to_double):
cal_in.write_calfits(write_file, clobber=True)
unit = list(hdr_dict.keys())[0]
keyword = hdr_dict[unit]
F = fits.open(write_file)
data = F[0].data
primary_hdr = F[0].header
hdunames = uvutils._fits_indexhdus(F)
ant_hdu = F[hdunames['ANTENNAS']]
totqualhdu = F[hdunames['TOTQLTY']]
totqualhdr = totqualhdu.header
if unit == 'totqual':
totqualhdr[keyword] *= 2
prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)
hdulist = fits.HDUList([prihdu, ant_hdu])
totqualhdu = fits.ImageHDU(data=totqualhdu.data, header=totqualhdr)
hdulist.append(totqualhdu)
hdulist.writeto(write_file, overwrite=True)
nt.assert_raises(ValueError, cal_out.read_calfits, write_file, strict_fits=True)
def test_extra_keywords():
cal_in = UVCal()
cal_out = UVCal()
calfits_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.gain.calfits')
testfile = os.path.join(DATA_PATH, 'test/outtest_omnical.fits')
cal_in.read_calfits(calfits_file)
# check for warnings & errors with extra_keywords that are dicts, lists or arrays
cal_in.extra_keywords['testdict'] = {'testkey': 23}
uvtest.checkWarnings(cal_in.check, message=['testdict in extra_keywords is a '
'list, array or dict'])
nt.assert_raises(TypeError, cal_in.write_calfits, testfile, run_check=False)
cal_in.extra_keywords.pop('testdict')
cal_in.extra_keywords['testlist'] = [12, 14, 90]
uvtest.checkWarnings(cal_in.check, message=['testlist in extra_keywords is a '
'list, array or dict'])
nt.assert_raises(TypeError, cal_in.write_calfits, testfile, run_check=False)
cal_in.extra_keywords.pop('testlist')
cal_in.extra_keywords['testarr'] = np.array([12, 14, 90])
uvtest.checkWarnings(cal_in.check, message=['testarr in extra_keywords is a '
'list, array or dict'])
nt.assert_raises(TypeError, cal_in.write_calfits, testfile, run_check=False)
cal_in.extra_keywords.pop('testarr')
# check for warnings with extra_keywords keys that are too long
cal_in.extra_keywords['test_long_key'] = True
uvtest.checkWarnings(cal_in.check, message=['key test_long_key in extra_keywords '
'is longer than 8 characters'])
uvtest.checkWarnings(cal_in.write_calfits, [testfile], {'run_check': False,
'clobber': True},
message=['key test_long_key in extra_keywords is longer than 8 characters'])
cal_in.extra_keywords.pop('test_long_key')
# check handling of boolean keywords
cal_in.extra_keywords['bool'] = True
cal_in.extra_keywords['bool2'] = False
cal_in.write_calfits(testfile, clobber=True)
cal_out.read_calfits(testfile)
nt.assert_equal(cal_in, cal_out)
cal_in.extra_keywords.pop('bool')
cal_in.extra_keywords.pop('bool2')
# check handling of int-like keywords
cal_in.extra_keywords['int1'] =
|
np.int(5)
|
numpy.int
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 29 14:34:01 2018
@author: <NAME>
"""
import os
import numpy as np
from model import GaussianProcess
import scipy
from platypus import NSGAII, Problem, Real
from acquisitions import UCB, LCB, TS, ei, pi,compute_beta
######################Algorithm input##############################
paths='.'
from benchmarks import branin,Currin,whiteBox_const1,combination_const1,BlackBox_const1,BlackBox_const2
functions=[branin,Currin]
#this is just a toy example of how to define constraints based on their type,
# this means we want branin and currin<=0 and all other constraints <=0
BlackBox_constraints=[BlackBox_const1,BlackBox_const2] # if none leave brackets empty []
whiteBox_constraints=[whiteBox_const1]# if none leave brackets empty []
Combination_constraints=[combination_const1]# if none leave brackets empty []
d=2
seed=1
np.random.seed(seed)
total_iterations=100
intial_number=1
############################Set aquisation function
acquisation=ei
batch_size=1 #In case you need batch version, you can set the batch size here
######################################################################
def evaluation(xx,d):
global functions,BlackBox_constraints,whiteBox_constraints,Combination_constraints
x=[item for item in xx]
y=[functions[i](x,d) for i in range(len(functions))]
B_c=[BlackBox_constraints[i](x,d) for i in range(len(BlackBox_constraints))]
W_c=[whiteBox_constraints[i](x,d) for i in range(len(whiteBox_constraints))]
C_c=[Combination_constraints[i](y,B_c,x) for i in range(len(Combination_constraints)) ]
all_=y+B_c+W_c+C_c
print(all_)
return all_
functions_and_contraints=evaluation
M=len(functions)
BB_C=len(BlackBox_constraints)
WB_C=len(whiteBox_constraints)
C_C=len(Combination_constraints)
total_C=BB_C+WB_C+C_C
bound=[0,1]
Fun_bounds=[bound]*d
###################GP Initialisation##########################
GPs=[]
GPs_C=[]
for i in range(M):
GPs.append(GaussianProcess(d))
for i in range(BB_C):
GPs_C.append(GaussianProcess(d))
for k in range(intial_number):
exist=True
while exist:
x_rand=list(np.random.uniform(low=bound[0], high=bound[1], size=(d,)))
if (any((x_rand == x).all() for x in GPs[0].xValues))==False:
exist=False
functions_contraints_values=functions_and_contraints(x_rand,d)
for i in range(M):
GPs[i].addSample(np.asarray(x_rand),functions_contraints_values[i])
for i in range(BB_C):
GPs_C[i].addSample(np.asarray(x_rand),functions_contraints_values[i+M])
with open(os.path.join(paths,'Inputs.txt'), "a") as filehandle:
for item in x_rand:
filehandle.write('%f ' % item)
filehandle.write('\n')
filehandle.close()
with open(os.path.join(paths,'Outputs.txt'), "a") as filehandle:
for listitem in functions_contraints_values:
filehandle.write('%f ' % listitem)
filehandle.write('\n')
filehandle.close()
for i in range(M):
GPs[i].fitModel()
for i in range(BB_C):
GPs_C[i].fitModel()
for l in range(total_iterations):
beta=compute_beta(l+1,d)
cheap_pareto_set=[]
def CMO(x):
global beta
x=
|
np.asarray(x)
|
numpy.asarray
|
"""This module contains the functions that are used to simulate and estimate the model and perform inference."""
import numpy as np
import pandas as pd
from scipy import stats, special
from scipy.optimize import minimize
from scipy.signal import windows
import statsmodels.api as sm
import statsmodels.tsa.api as tsa
import sympy as sym
import logging
from collections import OrderedDict
from functools import partial
from itertools import product
from libvolpriceinference import _simulate_autoregressive_gamma
from libvolpriceinference import link_total, link_jacobian, _covariance_kernel
from libvolpriceinference import compute_beta, compute_gamma, compute_psi
from tqdm.auto import tqdm
from multiprocessing import Pool
# We define some functions
_x, _y, beta, gamma, psi = sym.symbols('_x _y beta gamma psi', real=True, positive=True)
logit_rho, log_scale, log_both, zeta = sym.symbols("""logit_rho log_scale
log_both zeta""", real=True,
positive=True)
theta, pi, phi, pi1, pi2, theta1, theta2, phi1, phi2 = sym.symbols("""theta pi
phi pi1 pi2
theta1
theta2 phi1
phi2""")
# We define the functions that specify the model.
_logit = sym.log(_x) - sym.log(1 - _x)
_logistic = 1 / (1 + sym.exp(-1 * _x))
_psi_sym = (phi / sym.sqrt(2 * sym.exp(log_scale))) - (1 - phi**2) / 2 + (1 - phi**2) * theta
_theta_sym = sym.solveset(psi - _psi_sym, theta).args[0]
_B_func_in = 1 + sym.exp(log_scale) * _x
_A_func = _logistic.xreplace({_x: logit_rho}) * _x / _B_func_in
_C_func = psi * _x - ((1 - phi**2) / 2) * _x**2
_beta_sym = (_A_func.xreplace({_x: pi + _C_func.xreplace({_x: theta - 1})}) -
_A_func.xreplace({_x: pi + _C_func.xreplace({_x: theta})}))
# We create the functions that define the nonlinear constraint implied by the argument of the logarithm needing to
# be positive.
_constraint_sym = _B_func_in.xreplace({_x: pi + _C_func})
_gamma_sym = sym.exp(log_both - log_scale) * (sym.log(_constraint_sym.xreplace({_x: theta - 1})) -
sym.log(_constraint_sym.xreplace({_x: theta})))
_constraint1 = sym.lambdify((phi, pi, theta, log_scale, logit_rho, psi),
_constraint_sym.xreplace({_x: theta - 1}), modules='numpy')
_constraint2 = sym.lambdify((phi, pi, theta, log_scale, logit_rho, psi),
_constraint_sym.xreplace({_x: theta}), modules='numpy')
# We create a function to initialize the paramters with reasonable guesses in the optimization algorithms.
compute_theta = sym.lambdify((psi, logit_rho, log_scale, zeta),
_theta_sym.xreplace({phi: sym.Min(0, -sym.sqrt(1 - zeta))}),
modules='numpy')
_pi_from_gamma_in = _B_func_in.xreplace({_x: pi + _C_func})
_pi_from_gamma = sym.powsimp(sym.expand(sym.solveset(sym.exp(gamma / sym.exp(log_both - log_scale)) -
(_pi_from_gamma_in.xreplace({_x: theta - 1}) /
_pi_from_gamma_in.xreplace({_x: theta})),
pi).args[0].args[0]))
compute_pi = sym.lambdify((gamma, log_both, log_scale, phi, psi, logit_rho, theta), _pi_from_gamma, modules='numpy')
# We create the functions to jointly specify the links.
_link_sym = sym.powsimp(sym.expand(sym.Matrix([beta - _beta_sym, gamma - _gamma_sym, psi - _psi_sym,
1 - (zeta + phi**2)])))
# We define the moments used to estimate the volatility paramters.
_mean = _logistic.xreplace({_x: logit_rho}) * _x + sym.exp(log_both)
_var = 2 * sym.exp(log_scale) * _logistic.xreplace({_x: logit_rho}) * _x + sym.exp(log_scale + log_both)
# I now compute the heteroskedasticity-adjusted moments.
_row1 = (_y - _mean)
_row3 = ((_y - _mean)**2 - _var)
_vol_moments = sym.Matrix([_row1, _row1 * _x, _row3, _row3 * _x, _row1 * _x**2])
compute_vol_moments = sym.lambdify([_x, _y, log_both, log_scale, logit_rho], _vol_moments, modules='numpy')
compute_vol_moments_grad = sym.lambdify([_x, _y, log_both, log_scale, logit_rho],
_vol_moments.jacobian([log_both, log_scale, logit_rho]), modules='numpy')
# Define the gradient of the link function with respect to the structural paramters.
_link_price_grad_sym = sym.powsimp(sym.expand(sym.Matrix([_link_sym.jacobian([phi, pi, theta])])))
_link_price_grad_in = sym.lambdify((phi, pi, theta, beta, gamma, log_both,
log_scale, psi, logit_rho, zeta),
_link_price_grad_sym, modules='numpy')
def constraint(prices, omega):
"""Compute the constraint implied by logarithm's argument in the second link function being postiive."""
constraint1 = _constraint1(*prices, logit_rho=omega['logit_rho'],
log_scale=omega['log_scale'], psi=omega['psi'])
constraint2 = _constraint2(*prices, logit_rho=omega['logit_rho'],
log_scale=omega['log_scale'], psi=omega['psi'])
return np.minimum(constraint1, constraint2)
def compute_moments(log_both, logit_rho, log_scale, phi, pi, theta, psi):
"""Compute the means and variances implied by the paramters."""
rho = special.expit(logit_rho)
vol_mean = np.exp(log_both) / (1 - rho)
vol_var = ((2 * np.exp(log_scale) * rho * vol_mean + np.exp(log_scale)**2
* np.exp(log_both - log_scale)) / (1 - rho**2))
psi = compute_psi(log_scale=log_scale, phi=phi, theta=theta)
beta = compute_beta(logit_rho=logit_rho, log_scale=log_scale, phi=phi, pi=pi, theta=theta, psi=psi)
gamma = compute_gamma(log_both=log_both, psi=psi, log_scale=log_scale,
phi=phi, pi=pi, theta=theta)
return_mean = psi * vol_mean + beta * vol_mean + gamma
return_var = psi**2 * vol_var + beta**2 * vol_var + (1 - phi**2) * vol_mean
return {'return_mean': return_mean, 'return_var': return_var, 'vol_mean': vol_mean, 'vol_var': vol_var}
def compute_constraint_prices(omega, omega_cov, bounds):
"""Compute the slackness in the nonlinear constraint."""
phi_init = -np.sqrt(1 - omega['zeta']) if omega['zeta'] < 1 else 0
theta_init = compute_theta(psi=omega['psi'], log_scale=omega['log_scale'],
logit_rho=omega['logit_rho'], zeta=omega['zeta'])
vals = -1 * stats.truncexpon.rvs(loc=-bounds['pi']['max'], b=-bounds['pi']['min'], size=50)
arg_list = [(val, _qlr_in([phi_init, val, theta_init], omega, omega_cov)) for val in vals]
pi_est = compute_pi(log_both=omega['log_both'], gamma=omega['gamma'],
psi=omega['psi'], logit_rho=omega['logit_rho'],
log_scale=omega['log_scale'], theta=theta_init,
phi=phi_init)
if np.isfinite(pi_est):
arg_list.append((pi_est, _qlr_in([phi_init, pi_est, theta_init], omega, omega_cov)))
pi_init = pd.DataFrame(arg_list).sort_values(1).iloc[0, 0]
prices_init = [phi_init, pi_init, theta_init]
constraint_in = partial(constraint, omega=omega)
constraint_dict = {'type': 'ineq', 'fun': constraint_in}
# I ensure that the constraint is satisfied at the initial point.
if constraint_in(prices_init) < 0:
prices_init[1] = -.1
if constraint_in(prices_init) < 0:
prices_init[2] = .1
return constraint_dict, prices_init
def compute_hac_num_lags(data, kernel="parzen"):
"""
Compute the number of lags for an AR(1) process.
It uses the plug-in formula developed by Andrews (1991). (It uses a weight equal to 1, which is
irrelevant because we are assuming univariate data.
Parameters
--------
data : dataframe
The AR(1) coeffiicent
kernel: str
the kernel to use
Returns
------
num_lags : positive integer
"""
data = pd.DataFrame(data)
data.columns = np.arange(data.shape[1])
# This is Andrews (1991) Eq. 6.4.
slopes_and_vars = []
for _, col in data.items():
data_in = col.to_frame()
data_in.columns = ['name']
model = tsa.AR(data_in).fit(maxlag=1)
intercept, slope = model.params
innov_var = model.sigma2
slopes_and_vars.append([slope, innov_var])
slope_and_var_df = pd.DataFrame(slopes_and_vars, columns=['slope', 'var'])
summands = np.array([[(4 * row.slope**2 * row.var**4) / (1 - row.slope)**8,
row.var**4 / (1 - row.slope**4)]
for row in slope_and_var_df.itertuples()])
alpha_2 = np.mean(summands[:, 0]) / np.mean(summands[:, 1])
time_dim = data.shape[0]
if kernel == "parzen":
bandwidth = 2.6614 * (alpha_2 * time_dim)**.2
elif kernel == "tukey":
bandwidth = 1.7452 * (alpha_2 * time_dim)**.2
elif kernel == "quadratic-spectral":
bandwidth = 1.3221 * (alpha_2 * time_dim)**.2
else:
raise NotImplementedError("We only support the parzen, tukey, and quadratic-spectral kernels.")
# We do not want to average over subsequences that are more than the square-root of the sample size.
# This is essentially changing the constant because it is creating a maximum value for alpha_2)
return np.int(max(bandwidth, time_dim**.25))
def compute_names():
"""Compute the names."""
return ['phi', 'pi', 'theta']
def sliding_window_view(arr, window):
"""
Compute an effiicent rolling window function.
Paramters
-------
arr : arraylike
The array to roll over
window : positive scalar
The length of the window
Returns
------
iterator
"""
shape = (window, arr.shape[1])
arr = np.asarray(arr)
output_len = arr.shape[0] - window + 1
new_shape = (output_len,) + shape
new_strides = (arr.strides[0],) + arr.strides
return_data = np.lib.stride_tricks.as_strided(arr, shape=new_shape, strides=new_strides,
subok=False, writeable=False)
return return_data
def compute_link(prices, omega):
"""Compute the link function."""
return link_total(*prices, **omega)
def compute_link_grad(prices, omega):
"""Compute the gradient of the link function with respect to the reduced-form paramters."""
result = link_jacobian(phi=prices[0], pi=prices[1], theta=prices[2],
log_both=omega['log_both'],
log_scale=omega['log_scale'],
logit_rho=omega['logit_rho'], psi=omega['psi'])
return result
def compute_link_price_grad(prices, omega):
"""Compute the gradient of the link function with respect to the structural parameters."""
return np.atleast_2d(_link_price_grad_in(*prices, **omega))
def covariance_kernel(prices1, prices2, omega, omega_cov):
"""Compute the covarinace of the implied gaussian process as a function of the structural paramters."""
cov = omega_cov.sort_index(axis=0).sort_index(axis=1)
result = _covariance_kernel(*prices1, *prices2, psi=omega['psi'],
log_both=omega['log_both'],
log_scale=omega['log_scale'],
logit_rho=omega['logit_rho'],
omega_cov=cov)
return result
def simulate_autoregressive_gamma(log_both=0, logit_rho=0, log_scale=0, initial_point=None, time_dim=100,
start_date='2000-01-01'):
"""
Provide draws from the ARG(1) process of Gourieroux & Jaisak.
Parameters
--------
logit_rho : scalar
AR(1) coefficient
log_both : scalar
intercept
log_scale : scalar
Returns
-----
draws : dataframe
"""
# If initial_point is not specified, we start at the unconditional mean.
initial_point = ((sym.exp(log_both)) / (1 - special.expit(logit_rho)) if initial_point is None
else initial_point)
draws = _simulate_autoregressive_gamma(delta=np.exp(log_both - log_scale), rho=special.expit(logit_rho),
scale=np.exp(log_scale), initial_point=initial_point, time_dim=time_dim)
draws_df = pd.Series(draws, pd.date_range(start=start_date, freq='D', periods=len(draws))).to_frame()
return draws_df
def simulate_data(theta=1, pi=0, logit_rho=0, log_scale=0, log_both=0, phi=0, initial_point=None, time_dim=100,
start_date='2000-01-01'):
"""
Take the reduced-form paramters and risk prices and returns the data.
Parameters
--------
theta: scalar
pi : scalar
logit_rho : scalar
persistence
log_scale : positive scalar
initial_point: scalar, optional
Starting value for the volatility
time_dim : int, optional
number of periods
start_date : datelike, optional
The time to start the data from.
phi : scalar
The leverage effect. It must lie in (0,1)
Returns
-----
draws : dataframe
"""
vol_data = simulate_autoregressive_gamma(logit_rho=logit_rho, log_scale=log_scale, log_both=log_both,
initial_point=initial_point, time_dim=time_dim + 1,
start_date=pd.to_datetime(start_date) - pd.Timedelta('1 day'))
psi_val = compute_psi(log_scale=log_scale, theta=theta, phi=phi)
gamma_val = compute_gamma(log_both=log_both, psi=psi_val, log_scale=log_scale, phi=phi, pi=pi, theta=theta)
beta_val = compute_beta(logit_rho=logit_rho, log_scale=log_scale, pi=pi, theta=theta, phi=phi, psi=psi_val)
price_in = (phi, pi, theta)
if constraint(prices=price_in, omega={'psi': psi_val, 'logit_rho': logit_rho, 'log_scale': log_scale}) < 0:
raise ValueError(f"""The set of paramters given conflict with each other. No process exists with those
paramters. You might want to make the volatility price smaller in magnitude.""")
mean = gamma_val + beta_val * vol_data.shift(1) + psi_val * vol_data
var = (1 - phi**2) * vol_data
draws = mean + np.sqrt(var) * pd.DataFrame(np.random.standard_normal(var.size), index=vol_data.index)
data = pd.concat([vol_data, draws], axis=1).dropna()
data.columns = ['vol', 'rtn']
return data
def vol_moments(vol_data, log_both, log_scale, logit_rho):
"""Compute the moments of the volatility process."""
x = vol_data.values[:-1]
y = vol_data.values[1:]
moments = np.squeeze(compute_vol_moments(x, y, log_both=log_both, log_scale=log_scale, logit_rho=logit_rho)).T
return pd.DataFrame(moments)
def vol_moments_grad(vol_data, log_both, log_scale, logit_rho):
"""Compute the jacobian of the volatility moments."""
grad = np.mean([compute_vol_moments_grad(x, y, log_both=log_both, log_scale=log_scale, logit_rho=logit_rho)
for x, y in zip(vol_data.values[:-1], vol_data.values[1:])], axis=0)
return pd.DataFrame(grad, columns=['log_both', 'log_scale', 'logit_rho'])
def compute_init_constants(vol_data):
"""
Compute guesses for the volatlity paramters that we use to initialize the optimization.
Paramters
-------
vol_data : dataframe
The volatility data
Returns
-----
dict
"""
model = tsa.AR(vol_data).fit(maxlag=1)
intercept, persistence = model.params
init_constants = {'log_both': max(np.log(intercept), -10)}
init_constants['log_scale'] = max(np.log(np.var(vol_data)
* (1 - persistence**2))
- np.log(2 * persistence *
np.mean(vol_data) + intercept),
-10)
init_constants['logit_rho'] = max(special.logit(persistence), -10)
return init_constants
def compute_mean_square(x, data, func, weight=None):
"""
Apply func to the data at *x, and then computes its weighted mean square error.
Paramters
-------
x : iterable
paramters
data : dataframe
func : function
weight : 2d ndarray
Returns
------
scalar
"""
func_data = np.mean(func(data, *x), axis=0)
if weight is None:
weight = np.eye(len(func_data.T))
return np.asscalar(func_data @ weight @ func_data.T)
def compute_vol_gmm(vol_data, init_constants=None, options=None):
"""
Use GMM to compute the volatility paramters and their asymptotic covariance matrix.
Paramters
---------
vol_data : pandas series
init_constants : dict, None
This must contain initial guesses for the paramters as values and their names as keys.
options: dict, optional
Returns
--------
final_result : dict
cov : ndarray
"""
if options is None:
options = {'maxiter': 200}
if init_constants is None:
init_constants = compute_init_constants(vol_data)
x0 = list(init_constants.values())
init_constants = OrderedDict(sorted(init_constants.items(), key=lambda t: t[0]))
initial_result = minimize(
lambda x: vol_data.shape[0]**2 * compute_mean_square(x, vol_data, vol_moments),
x0=x0, method='BFGS', options=options)
if not initial_result['success']:
logging.warning(initial_result)
vol_moments_data1 = vol_moments(vol_data, *initial_result.x)
num_lags = compute_hac_num_lags(vol_moments_data1, kernel='parzen')
win = windows.parzen(M=2 * num_lags, sym=True) / np.sum(windows.parzen(M=2 * num_lags, sym=True))
sliding_it1 = sliding_window_view(vol_moments_data1, window=len(win))
moment_cov1 = np.mean([(x.T * win).dot(x) for x in sliding_it1], axis=0)
weight_matrix = np.linalg.pinv(moment_cov1)
final_result = minimize(
lambda x: compute_mean_square(x, vol_data, vol_moments, weight_matrix),
x0=initial_result.x, method="BFGS", options=options
)
if not final_result['success']:
logging.warning(final_result)
estimates = {key: val for key, val in zip(init_constants.keys(), final_result.x)}
vol_moments_data2 = vol_moments(vol_data, *final_result.x)
sliding_it2 = sliding_window_view(vol_moments_data2, window=len(win))
moment_cov2 = np.mean([(x.T * win).dot(x) for x in sliding_it2], axis=0)
moment_derivative = vol_moments_grad(vol_data, **estimates)
cov = pd.DataFrame(np.linalg.pinv(moment_derivative.T @ np.linalg.solve(moment_cov2, moment_derivative)),
columns=list(init_constants.keys()), index=list(init_constants.keys()))
if not final_result.success:
logging.warning("Convergence results are %s.\n", final_result)
return estimates, cov / (vol_data.size - 1)
def create_est_table(estimates, truth, cov, num_se=1.96):
"""
Create a table that prints the estimates, truth, and confidence interval.
Paramters:
--------
names : list of str
The values to print
truth : dict
The true values
cov : dataframe
Covariance matrix
num_se : positive float, optional
The number of standard errors to use.
Returns
-------
dataframe
"""
names = set(estimates.keys()).intersection(set(truth.keys()))
true_values = [truth[name] for name in names]
est_values = [estimates[name] for name in names]
lower_ci = [estimates[name] - num_se * np.sqrt(cov.loc[name, name]) for name in names]
upper_ci = [estimates[name] + num_se * np.sqrt(cov.loc[name, name]) for name in names]
return_df = pd.DataFrame(np.column_stack([true_values, est_values, lower_ci, upper_ci]),
columns=['truth', 'estimate', 'lower ci', 'upper ci'], index=names)
return_df.sort_index(inplace=True)
return_df['in_ci'] = ((return_df['truth'] >= return_df['lower ci'])
& (return_df['truth'] <= return_df['upper ci']))
return return_df
def cov_to_corr(cov):
"""Convert a covariance matrix to a correlation matrix."""
corr = pd.DataFrame(np.atleast_2d(np.diag(cov))**(-.5) * cov.values / np.atleast_2d(np.diag(cov)).T**.5,
index=cov.index, columns=cov.columns)
return corr
def estimate_zeta(data, parameter_mapping=None):
"""Estimate the log_scaled covariance paramter."""
if parameter_mapping is None:
parameter_mapping = {'vol': 'psi', 'vol.shift(1)': 'beta', 'Intercept': 'gamma'}
wls_results = sm.WLS.from_formula('rtn ~ 1+ vol.shift(1) + vol', weights=data.vol**(-1), data=data).fit()
estimates = wls_results.params.rename(parameter_mapping)
estimates['zeta'] = np.mean(wls_results.wresid**2)
zeta_cov = pd.DataFrame(np.atleast_2d(np.cov(wls_results.wresid**2)) / data.shape[0],
index=['zeta'], columns=['zeta'])
return_cov = wls_results.cov_params().rename(columns=parameter_mapping).rename(parameter_mapping)
return_cov = return_cov.merge(zeta_cov, left_index=True, right_index=True, how='outer').fillna(0)
return_cov = return_cov.sort_index(axis=1).sort_index(axis=0)
return dict(estimates), return_cov
def estimate_params(data, vol_estimates=None, vol_cov=None):
"""
Estimate the reduced-form model in one step.
Paramters
---------
data : ndarray
Must contain rtn and vol columns.
vol_estimates : dict
The volatility estimates.
vol_cov : dataframe
The volatility asymptotic covariance matrix.
Returns
------
estimates : dict
covariance : dataframe
"""
if vol_estimates is None or vol_cov is None:
# First we compute the volatility paramters.
init_constants = compute_init_constants(data.vol)
vol_estimates, vol_cov = compute_vol_gmm(data.vol, init_constants=init_constants)
# Then we compute the reduced form paramters.
estimates, cov_1st_stage2 = estimate_zeta(data)
estimates.update(vol_estimates)
covariance = vol_cov.merge(cov_1st_stage2, left_index=True, right_index=True,
how='outer').fillna(0).sort_index(axis=1).sort_index(axis=0)
return estimates, covariance
def compute_omega(data, vol_estimates=None, vol_cov=None):
"""
Compute the reduced-form paramters and their covariance matrix.
Paramters
---------
data : ndarray
Must contain rtn and vol columns.
vol_estimates : dict
The volatility estimates.
vol_cov : dataframe
The volatility asymptotic covariance matrix.
Returns
------
omega_est : dict
omega_cov: dataframe
"""
if vol_estimates is None or vol_cov is None:
# First we compute the volatility paramters.
init_constants = compute_init_constants(data.vol)
vol_estimates, vol_cov = compute_vol_gmm(data.vol, init_constants=init_constants)
# Then we compute the reduced form paramters.
reduced_form_estimates, cov_1st_stage2 = estimate_zeta(data)
reduced_form_estimates.update(vol_estimates)
reduced_form_cov = vol_cov.merge(cov_1st_stage2, left_index=True, right_index=True,
how='outer').fillna(0).sort_index(axis=1).sort_index(axis=0)
omega_names = ['beta', 'gamma', 'log_both', 'log_scale', 'psi', 'logit_rho', 'zeta']
covariance = reduced_form_cov.loc[omega_names, omega_names].sort_index(axis=0).sort_index(axis=1)
return {name: reduced_form_estimates[name] for name in omega_names}, covariance
def _qlr_in(prices, omega, omega_cov):
link_in = compute_link(prices=prices, omega=omega)
if not np.all(np.isfinite(prices)):
logging.warning(f"prices are {prices}")
return np.inf
try:
cov_prices = covariance_kernel(prices, prices, omega_cov=omega_cov,
omega=omega)
if not np.all(
|
np.isfinite(cov_prices)
|
numpy.isfinite
|
import numpy as np
import scipy.sparse
import scipy.stats
def Eudist2(x, y):
def square(m):
if isinstance(m, np.ndarray):
return m * m
else:
return m.multiply(m)
distance = -2 * (x @ y.T)
if not isinstance(distance, np.ndarray):
distance = distance.toarray()
distance += np.sum(square(x), axis=1).reshape((x.shape[0], 1))
distance += np.sum(square(y), axis=1).reshape((1, y.shape[0]))
return distance
def NormalizeFea(fea):
fea_norm = np.sum(
|
np.multiply(fea, fea)
|
numpy.multiply
|
import scipy.signal
import numpy as np
#===========================================================
# Routine by Luis-<NAME> (IPGP & IFSTTAR), Jan 2020.
#===========================================================
# Tapering with a Hanning window
def taper(x,p):
if p <= 0.0:
return x
else:
f0 = 0.5
f1 = 0.5
n = len(x)
nw = int(p*n)
if nw > 0:
ow = np.pi/nw
w = np.ones( n )
for i in range( nw ):
w[i] = f0 - f1 * np.cos(ow*i)
for i in range( n-nw,n ):
w[i] = 1.0 - w[i-n+nw]
return x * w
elif nw == 0:
return x
# Bitwise version
def next_power_of_2(n):
"""
Return next power of 2 greater than or equal to n
"""
return 2**(n-1).bit_length()
# PCC2 from Ventosa el al. (2019)
def pcc2(x1, x2, dt, lag0, lagu):
# Preprocessing
x1 = x1 - np.mean(x1)
x2 = x2 - np.mean(x2)
x1 = taper(x1, 0.05)
x2 = taper(x2, 0.05)
N = len(x1)
Nz = next_power_of_2( 2*N )
# Analytic signal and normalization
xa1 = scipy.signal.hilbert(x1)
xa2 = scipy.signal.hilbert(x2)
xa1 = xa1 / np.abs(xa1)
xa2 = xa2 / np.abs(xa2)
# Padding zeros
xa1 = np.append(xa1, np.zeros((Nz-N), dtype=np.complex_))
xa2 = np.append(xa2,
|
np.zeros((Nz-N), dtype=np.complex_)
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Config
dataset = "training_1"
path = "../" + dataset +"/"
kfold_split = 10
nan_to_neg = False
mm = True
std = False
numpy_save = True
# Script name struct for report
script_name = 'npysave'
dl_ = '_'
name_struct_meta = "_N_scale_mem"
## Imports
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
## Random seed
np.random.seed(seed=0)
## Folder and files
fnames = os.listdir(path)
fnames.sort()
if 'README.md' in fnames:
fnames.remove('README.md')
print('last file: ', fnames[-1])
n = len(fnames)
print(n, ' files present')
## Read data
def read_challenge_data(input_file, return_header = False):
with open(input_file, 'r') as f:
header = f.readline().strip()
column_names = header.split('|')
data = np.loadtxt(f, delimiter='|')
# ignore SepsisLabel column if present
if column_names[-1] == 'SepsisLabel':
column_names = column_names[:-1]
data = data[:, :-1]
return (data)
def read_challenge_data_label(input_file, return_header = False):
with open(input_file, 'r') as f:
header = f.readline().strip()
column_names = header.split('|')
data = np.loadtxt(f, delimiter='|')
# ignore SepsisLabel column if present
if column_names[-1] == 'SepsisLabel':
sep_lab = data[:,-1]
column_names = column_names[:-1]
data = data[:, :-1]
if return_header:
return (data, sep_lab, column_names)
else:
return (data, sep_lab)
## Create the feature matrix
features = []
patient = []
sepsis_label = []
## read data
for i in range(n):
input_file = os.path.join(path, fnames[i])
if i ==0:
data, sep_lab, columns = read_challenge_data_label(input_file, return_header=True)
else:
data, sep_lab = read_challenge_data_label(input_file)
features.append(data)
sepsis_label.append(sep_lab)
pat = i * np.ones((sep_lab.shape), dtype=np.int)
patient.append(pat)
feature_matrix = np.concatenate(features)
del(features)
sepsis_label = np.concatenate(sepsis_label)
patient =
|
np.concatenate(patient)
|
numpy.concatenate
|
# coding=utf-8
"""
Processing capabilities that are more general than the remaining modules categories.
Available Functions
-------------------
[Public]
poincare
With this function the user can easily generate a Poincaré Plot (Heart rate variability
analysis).
smooth
Function intended to smooth a signal through the application of the convolution operation
between a moving average window (the signal is segmented into multiple parts) and a fixed window
(from one of the predefined formats 'hanning', 'blackman'...) in order to a weigh be attributed
to each sample inside the moving window.
plotfft
This functions computes the Fast Fourier Transform of a signal, returning the frequency and magnitude values.
lowpass
For a given signal s rejects (attenuates) the frequencies higher than the cutoff frequency f and passes the
frequencies lower than that value by applying a Butterworth digital filter.
highpass
For a given signal s rejects (attenuates) the frequencies lower then the cutoff frequency f and passes the
frequencies higher than that value by applying a Butterworth digital filter.
bandstop
For a given signal s rejects (attenuates) the frequencies within a certain range (between f1 and f2) and passes the
frequencies outside that range by applying a Butterworth digital filter.
bandpass
For a given signal s passes the frequencies within a certain range (between f1 and f2) and rejects (attenuates) the
frequencies outside that range by applying a Butterworth digital filter.
Observations/Comments
---------------------
None
/\
"""
import numpy
from scipy.signal import filtfilt, butter, lfilter
from .detect import tachogram
from .visualise import plot
def poincare(data, sample_rate, signal=False, in_seconds=False):
"""
-----
Brief
-----
Function for generation of Poincaré Plot (Heart rate variability analysis).
-----------
Description
-----------
ECG signals measure the electric potential in the heart of the subject. In normal conditions, it is expeted that the
the electric potential to be similar in different heartbeats and that the rhythm of those heartbeats to be
maintained if all the conditions are maintained. Thus, by plotting the current RR interval against the previous one,
it is expected that the values to be maintained. Poincaré plot, is this representation, which allows to analyse the
heart rate variability.
This function returns the x and y axis of a Poincaré plot and also the standard deviations of the more
representative directions of the data points.
----------
Parameters
----------
data : list
ECG signal or R peak list. When the input is a raw signal the input flag signal should be
True.
sample_rate : int
Sampling frequency.
signal : boolean
If True, then the data argument contains the set of the ECG acquired samples.
in_seconds : boolean
If the R peaks list defined as the input argument "data" contains the sample numbers where
the R peaks occur,
then in_seconds needs to be True.
Returns
-------
out : list, list, float, float
Poincaré plot x axis and y axis, respectively. Additionally it will be returned SD1 and SD2
parameters.
"""
# Generation of tachogram.
tachogram_data = tachogram(data, sample_rate, signal=signal, in_seconds=in_seconds,
out_seconds=True)[0]
# Poincaré Plot (x and y axis).
x_axis = tachogram_data[:-1]
y_axis = tachogram_data[1:]
# Poincaré Parameters.
tachogram_diff = numpy.diff(tachogram_data)
sdsd = numpy.std(tachogram_diff)
sdnn = numpy.std(tachogram_data)
sd1 = numpy.sqrt(0.5 * numpy.power(sdsd, 2))
sd2 = numpy.sqrt(2 * numpy.power(sdnn, 2) - numpy.power(sd1, 2))
return x_axis, y_axis, sd1, sd2
def smooth(input_signal, window_len=10, window='hanning'):
"""
-----
Brief
-----
Smooth the data using a window with requested size.
-----------
Description
-----------
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that
transient parts are minimized in the beginning and end part of the output signal.
The results of the application of this functions is analogous to the application of a mean filter in image
processing. The results is the smoothed input_signal.
----------
Parameters
----------
input_signal: array-like
the input signal
window_len: int
the dimension of the smoothing window. the default is 10.
window: string.
the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'.
flat window will produce a moving average smoothing. the default is 'hanning'.
Returns
-------
out : signal_filt: array-like the smoothed signal.
@example:
time = linspace(-2,2,0.1)
input_signal = sin(t)+randn(len(t))*0.1
signal_filt = smooth(x)
@see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman,
numpy.convolve, scipy.signal.lfilter
@todo: the window parameter could be the window itself if an array instead
of a string
@bug: if window_len is equal to the size of the signal the returning
signal is smaller.
"""
if input_signal.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if input_signal.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return input_signal
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("""Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'""")
sig = numpy.r_[2 * input_signal[0] - input_signal[window_len:0:-1],
input_signal, 2 * input_signal[-1] - input_signal[-2:-window_len-2:-1]]
if window == 'flat': # moving average
win =
|
numpy.ones(window_len, 'd')
|
numpy.ones
|
import pandas as pd
from peer_fixed_effect.teacher_effect.structure.simple import SimpleTeacherEffectMixin
# df = pd.read_excel('tests/testdata/kinsler.xlsx', sheets='data')
# df = pd.read_excel('tests/testdata/kinsler.xlsx', sheets='data')
# TeacherEffectMixin.get_teacher_effect_discounted_cumsum_except_now(df, df, id_n, time_n, eft_jt_n, sigma)
def test_data():
id_col = 'ids'
tid_col = 'tid'
grade_col = 'grade'
time_col = 'time'
y_col = 'y'
eft_i_col = 'effect_i'
eft_it_col = 'effect_it'
eft_jt_col = 'effect_tid_t'
# setup test data
import numpy as np
import pandas as pd
n_id = 5000
n_tid = 30
n_time = 3
grade_range = list(range(1, 7))
start_time_range = np.array(grade_range)[np.argsort(grade_range)[0:(len(grade_range) - n_time + 1)]]
persistence = 0.2
sigma = persistence
var_tid = 1 # iid
var_id = 1 # iid
ids = np.kron(np.arange(n_id).reshape((n_id, 1)), np.ones(shape=(n_time, 1)))
grade_id = (
np.kron(
np.random.choice(start_time_range, size=(n_id, 1), replace=True),
np.ones(shape=(n_time, 1))
) +
np.kron(
np.ones(shape=(n_id, 1)),
np.array(range(0,n_time)).reshape((n_time, 1))
)
)
effect_i = np.kron(
np.random.normal(0, scale=var_id, size=(n_id, 1)),
np.ones(shape=(n_time, 1))
)
effect_it = effect_i
noise_it = np.kron(
np.random.normal(0, scale=var_id, size=(n_id, 1))*1,
np.ones(shape=(n_time, 1))
)
id_tid = np.random.choice(np.arange(n_tid), size=(n_id*n_time), replace=True)
df_id = pd.DataFrame(
np.c_[ids, grade_id, id_tid, effect_i, effect_it, noise_it],
columns=['ids', 'grade', 'tid', 'effect_i', 'effect_it', 'noise_it']
)
tids = np.kron(np.arange(n_tid).reshape((n_tid, 1)), np.ones(shape=(len(grade_range), 1)))
grades_tid = np.kron(
np.ones(shape=(n_tid, 1)),
np.array(grade_range).reshape((len(grade_range), 1))
)
effect_tid = np.random.normal(0, scale=var_tid, size=(n_tid*len(grade_range), 1))
df_tid = pd.DataFrame(
np.c_[tids, grades_tid, effect_tid],
columns=['tid', 'grade', 'effect_tid_t'])
df = pd.merge(df_id, df_tid, on=['tid', 'grade']).sort_values(['ids', 'grade']).reset_index(drop=True)
df['max_grade'] = df.groupby(['ids'])['grade'].transform('max').sort_index()
df['cumsum_effect_tid_t'] = SimpleTeacherEffectMixin().get_teacher_effect_ijgt_discounted_cumsum(
df,
id_col='ids',
grade_col='grade',
eft_jt_col='effect_tid_t',
max_grade_col= 'max_grade',
sigma=persistence)
df['y'] = df['effect_it'] + df['cumsum_effect_tid_t'] + df['noise_it']
df.to_csv('test.csv', index=False)
print(df[['cumsum_effect_tid_t', 'effect_tid_t']])
print('sigma=0の時は回らないことに注意')
def fixed_taecher():
id_col = 'ids'
tid_col = 'tid'
grade_col = 'grade'
time_col = 'time'
y_col = 'y'
eft_i_col = 'effect_i'
eft_it_col = 'effect_it'
eft_j_col = 'effect_tid'
eft_jt_col = 'effect_tid_t'
# setup test data
import numpy as np
import pandas as pd
n_id = 500
n_tid = 5000
n_time = 2
grade_range = list(range(1, 7))
start_time_range = np.array(grade_range)[np.argsort(grade_range)[0:(len(grade_range) - n_time + 1)]]
persistence = 0.8
sigma = persistence
var_tid = 1 # iid
var_id = 1 # iid
ids = np.kron(np.arange(n_id).reshape((n_id, 1)), np.ones(shape=(n_time, 1)))
grade_id = (
np.kron(
np.random.choice(start_time_range, size=(n_id, 1), replace=True),
np.ones(shape=(n_time, 1))
) +
np.kron(
np.ones(shape=(n_id, 1)),
np.array(range(0,n_time)).reshape((n_time, 1))
)
)
effect_i = np.kron(
np.random.normal(0, scale=var_id, size=(n_id, 1)),
|
np.ones(shape=(n_time, 1))
|
numpy.ones
|
"""This module contains tests of the substrates module."""
import os
import pickle
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from .. import substrates
SEED = 123
def test_free():
substrate = substrates.free()
npt.assert_equal(isinstance(substrate, substrates._Substrate), True)
npt.assert_equal(substrate.type, "free")
return
def test_sphere():
npt.assert_raises(ValueError, substrates.sphere, radius="r")
npt.assert_raises(ValueError, substrates.sphere, radius=-5e-6)
radius = 5e-6
substrate = substrates.sphere(radius)
npt.assert_equal(isinstance(substrate, substrates._Substrate), True)
npt.assert_equal(substrate.radius, radius)
npt.assert_equal(substrate.type, "sphere")
return
def test_cylinder():
orientation = np.array([1.0, 2, 0])
npt.assert_raises(
ValueError, substrates.cylinder, radius="r", orientation=orientation
)
npt.assert_raises(
ValueError, substrates.cylinder, radius=-5e-6, orientation=orientation
)
radius = 5e-6
npt.assert_raises(ValueError, substrates.cylinder, radius=radius, orientation="o")
npt.assert_raises(
ValueError, substrates.cylinder, radius=radius, orientation=np.arange(4)
)
npt.assert_raises(
ValueError,
substrates.cylinder,
radius=radius,
orientation=orientation.astype(int),
)
substrate = substrates.cylinder(radius, orientation)
npt.assert_equal(isinstance(substrate, substrates._Substrate), True)
npt.assert_equal(substrate.radius, radius)
npt.assert_equal(substrate.orientation, orientation / np.linalg.norm(orientation))
npt.assert_equal(substrate.type, "cylinder")
return
def test_ellipsoid():
npt.assert_raises(ValueError, substrates.ellipsoid, semiaxes="s")
npt.assert_raises(ValueError, substrates.ellipsoid, semiaxes=np.arange(4))
npt.assert_raises(
ValueError, substrates.ellipsoid, semiaxes=np.arange(3).astype(int)
)
semiaxes = np.array([5e-6, 1e-6, 10e-6])
npt.assert_raises(ValueError, substrates.ellipsoid, semiaxes=semiaxes, R="R")
npt.assert_raises(ValueError, substrates.ellipsoid, semiaxes=semiaxes, R=np.eye(4))
npt.assert_raises(
ValueError, substrates.ellipsoid, semiaxes=semiaxes, R=np.eye(3).astype(int)
)
npt.assert_raises(
ValueError, substrates.ellipsoid, semiaxes=semiaxes, R=np.zeros((3, 3))
)
substrate = substrates.ellipsoid(semiaxes)
npt.assert_equal(isinstance(substrate, substrates._Substrate), True)
npt.assert_equal(substrate.semiaxes, semiaxes)
npt.assert_equal(substrate.R, np.eye(3))
npt.assert_equal(substrate.type, "ellipsoid")
R = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]).astype(float)
substrate = substrates.ellipsoid(semiaxes, R)
npt.assert_equal(isinstance(substrate, substrates._Substrate), True)
npt.assert_equal(substrate.semiaxes, semiaxes)
npt.assert_equal(substrate.R, R)
npt.assert_equal(substrate.type, "ellipsoid")
return
def test_mesh():
mesh_path = os.path.join(
os.path.dirname(substrates.__file__), "tests", "sphere_mesh.pkl",
)
with open(mesh_path, "rb") as f:
example_mesh = pickle.load(f)
faces = example_mesh["faces"]
vertices = example_mesh["vertices"]
npt.assert_raises(
ValueError, substrates.mesh, vertices="v", faces=faces, periodic=True
)
npt.assert_raises(
ValueError, substrates.mesh, vertices=np.zeros(2), faces=faces, periodic=True
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=np.zeros((1, 4)),
faces=faces,
periodic=True,
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices.astype(int),
faces=faces,
periodic=True,
)
npt.assert_raises(
ValueError, substrates.mesh, vertices=vertices, faces="f", periodic=True,
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=np.zeros(2).astype(int),
periodic=True,
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=np.zeros((1, 4)).astype(int),
periodic=True,
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces.astype(float),
periodic=True,
)
npt.assert_raises(
ValueError, substrates.mesh, vertices=vertices, faces=faces, periodic=1,
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
padding="p",
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
padding=np.zeros(2),
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
padding=np.ones(3).astype(int),
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
init_pos=np.zeros(1),
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
init_pos=np.zeros((1, 4)),
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
init_pos=np.zeros((1, 3)).astype(int),
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
init_pos="s",
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
n_sv="n",
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
n_sv=np.zeros((3, 3)),
)
npt.assert_raises(
ValueError,
substrates.mesh,
vertices=vertices,
faces=faces,
periodic=True,
n_sv=np.zeros((3)).astype(float),
)
substrate = substrates.mesh(vertices, faces, True)
npt.assert_equal(substrate.type, "mesh")
return
def test__cross_product():
np.random.seed(SEED)
for _ in range(100):
a = np.random.random(3) - 0.5
b = np.random.random(3) - 0.5
npt.assert_almost_equal(substrates._cross_product(a, b), np.cross(a, b))
return
def test__dot_product():
np.random.seed(SEED)
for _ in range(100):
a = np.random.random(3) - 0.5
b = np.random.random(3) - 0.5
npt.assert_almost_equal(substrates._dot_product(a, b), np.dot(a, b))
return
def test__triangle_box_overlap():
triangle = np.array([[0.5, 0.7, 0.3], [0.9, 0.5, 0.2], [0.6, 0.9, 0.8]])
box = np.array([[0.1, 0.3, 0.1], [0.4, 0.7, 0.5]])
npt.assert_equal(substrates._triangle_box_overlap(triangle, box), False)
triangle = np.array([[0.4, 0.7, 0.2], [0.9, 0.5, 0.2], [0.6, 0.9, 0.2]])
box = np.array([[0.4, 0.4, 0.3], [0.5, 0.8, 0.6]])
npt.assert_equal(substrates._triangle_box_overlap(triangle, box), False)
triangle = np.array(
[
[0.63149023, 0.44235872, 0.77212144],
[0.25125724, 0.00087658, 0.66026559],
[0.8319006, 0.52731735, 0.22859846],
]
)
box = np.array(
[[0.33109806, 0.16637023, 0.91545459], [0.79806038, 0.83915475, 0.38118002],]
)
npt.assert_equal(substrates._triangle_box_overlap(triangle, box), True)
return
def manual_test__triangle_box_overlap():
"""Useful function for visually checking the performance of the triangle-
box overlap function."""
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
triangle = np.random.random((3, 3)) - 0.25
tri = Poly3DCollection(triangle)
tri.set_facecolor("tab:green")
ax.add_collection3d(tri)
box = np.random.random((2, 3)) - 0.25
vertices, faces = substrates._aabb_to_mesh(box[0], box[1])
for idx in faces:
tri = Poly3DCollection(vertices[idx], alpha=0.5)
tri.set_facecolor("tab:blue")
ax.add_collection3d(tri)
ax.set_title(substrates._triangle_box_overlap(triangle, box))
plt.show()
return triangle, box
def test__interval_sv_overlap_1d():
xs = np.arange(11)
npt.assert_equal(substrates._interval_sv_overlap(xs, 0, 0), (0, 1))
npt.assert_equal(substrates._interval_sv_overlap(xs, 0, 1.5), (0, 2))
npt.assert_equal(substrates._interval_sv_overlap(xs, 9.5, 1.5), (1, 10))
npt.assert_equal(substrates._interval_sv_overlap(xs, -1.1, 0.5), (0, 1))
npt.assert_equal(substrates._interval_sv_overlap(xs, 9.5, 11.5), (9, 10))
return
def test__triangle_aabb():
triangle = np.array([[0.5, 0.7, 0.3], [0.9, 0.5, 0.2], [0.6, 0.9, 0.8]])
npt.assert_equal(
substrates._triangle_aabb(triangle),
np.vstack((np.min(triangle, axis=0), np.max(triangle, axis=0))),
)
return
def test__box_subvoxel_overlap():
xs = np.arange(6)
ys = np.arange(11)
zs = np.arange(21)
box = np.array([[2.5, 5.0, 2.2], [9.2, 9.5, 20]])
subvoxels = np.array([[2, 5], [5, 10], [2, 20]])
npt.assert_equal(substrates._box_subvoxel_overlap(box, xs, ys, zs), subvoxels)
return
def test__mesh_space_subdivision():
mesh_path = os.path.join(
os.path.dirname(substrates.__file__), "tests", "sphere_mesh.pkl",
)
with open(mesh_path, "rb") as f:
example_mesh = pickle.load(f)
faces = example_mesh["faces"]
vertices = example_mesh["vertices"]
voxel_size = np.max(vertices, axis=0)
n_sv = np.array([2, 5, 10])
xs, ys, zs, triangle_indices, subvoxel_indices = substrates._mesh_space_subdivision(
vertices, faces, voxel_size, n_sv
)
npt.assert_almost_equal(xs, np.linspace(0, voxel_size[0], n_sv[0] + 1))
npt.assert_almost_equal(ys,
|
np.linspace(0, voxel_size[1], n_sv[1] + 1)
|
numpy.linspace
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from jittermodel import u, q2unitless
from jittermodel.simulation import (Simulation, SphereCapacitance, _alpha,
sum_sinh, _eta, _lambda, _thetaI,
_thetaII)
from jittermodel._sim import _thetaI_c
from jittermodel.base import Cantilever, Experiment, Transistor
from numpy.testing import assert_allclose
from nose.tools import eq_, assert_almost_equal, assert_raises
from bunch import Bunch
from jittermodel.tests import expected_failure
import unittest
u.d = u.dimensionless # For brevity
import mpmath as mp
def mp_sum_sinh(alpha):
"""Implements the infinite sum using mpmath, at very high precision.
Method 'r+s+e' was found to work accurately for all values of alpha,
unlike most other alogithms in Mathematica, python, etc."""
summand = lambda n: mp.sinh(alpha) / mp.sinh(alpha * n)
return mp.nsum(summand, [1, mp.inf], method='r+s+e')
class Test_sum_sinh(unittest.TestCase):
@staticmethod
def test_sum_sinh():
"""Test that the sum is working properly for a range of alpha values.
The mpmath module is used to verify that the sum meets error
specifications.
"""
alphas = [2 ** i for i in xrange(-12, 7)]
results = [sum_sinh(alpha) for alpha in alphas]
mp_results = [mp_sum_sinh(alpha) for alpha in alphas]
for mp_result, test_result in zip(mp_results, results):
assert_almost_equal(mp_result, test_result, 7)
class MockSimulationCapacitance(object):
"""A mock simulation object only containing the parameters necessary to
test SphereCapacitance"""
units = {"[mass]": u.pg, "[length]": u.um, "[time]": u.ms,
"[current]": u.aC / u.ms, "[temperature]": u.K, "[angle]": u.rad}
E_0 = q2unitless(u.epsilon_0, units)
q = q2unitless(u.elementary_charge, units)
k_B = q2unitless(u.boltzmann_constant, units)
Samp = Bunch(h=0.1, E_s1=3)
Cant = Bunch(R_tip=0.05)
Expt = Bunch(d=0.15)
def __init__(self):
self.sphere = SphereCapacitance(self)
# TODO: Where do these test cases come from?
class TestSphereCapacitance(unittest.TestCase):
def setUp(self):
self.sim = MockSimulationCapacitance()
def test_C(self):
assert_almost_equal(0.00623177, self.sim.sphere.C())
def test_Cd(self):
assert_almost_equal(-0.00322151, self.sim.sphere.Cd())
def test_Cd2(self):
assert_almost_equal(0.0311542, self.sim.sphere.Cd2())
class TestSimulation(unittest.TestCase):
@staticmethod
def test_init_Simulation():
cant = Cantilever(f_c=50*u.kHz, k_c=3.5*u.N/u.m, Q=20000*u.d,
R_tip=40*u.nm, L_tip=15*u.um, theta_tip=16*u.degrees,
geometry_c='perpendicular')
trans = Transistor(semiconductor='TPD', h=70 * u.nm, h_trans=1 * u.nm,
h_i=300 * u.nm, E_s1=3.5, E_s2=-0.0005, E_i1=4.65,
E_i2=0, mobility=3e-6 * u.cm ** 2 / u.V / u.s,
T=298 * u.K, V_g=10 * u.V, rho=None)
expt = Experiment(d=100 * u.nm, V_ts=5 * u.V, jitter_f_i=0.2 * u.Hz,
jitter_f_f=3 * u.Hz)
sim = Simulation(cant, trans, expt)
# Test some properties are correct
eq_(sim.Cant.f_c, 50)
eq_(sim.Expt.d, 0.1)
eq_(sim.Samp.h_i, 0.3)
assert_almost_equal(sim.Samp.diff, 0.0077038955272097955)
# These tests are all generated by implementing sympy code for the functions in
# validate-im-dielectric.ipynb. That should be a good comparison; sympy
# uses mpmath as a backend for its infinite precision arithmatic, so this
# should be robust against ordinary floating point errors.
class TestImDielectricHelperFunctions(unittest.TestCase):
@staticmethod
def test__eta():
k = np.array([1, 10, 100, 1000, 10000, 100000])
kappa = 3500
E_s = 3 - 0.001j
D = 0.005
omega = 300
# Expected values were calculated using sympy,
# to 15 digits of precision.
# See test_verification/validate-im-dielectric
expected_eta = np.array([2020.78311260126 + 15.182507854811j,
2020.80760652432 + 15.182323829782j,
2023.25550170076 + 15.163955048756j,
2254.66583909462 + 13.607584302718j,
10202.1243828582 + 3.007271263178j,
100020.414581093 + 0.30674293451j])
eta = _eta(k, kappa, E_s, D, omega)
assert_allclose(eta, expected_eta)
@staticmethod
def test__lambda():
k = np.array([1, 10, 100, 1000, 10000, 100000])
eta = np.array([2020.78311260126 + 15.182507854811j,
2020.80760652432 + 15.182323829782j,
2023.25550170076 + 15.163955048756j,
2254.66583909462 + 13.607584302718j,
10202.1243828582 + 3.007271263178j,
100020.414581093 + 0.30674293451j])
E_eff = 3 - 100j
E_s = 3 - 0.001j
expected_lambda = np.array([0.0001184255261724 + 0.0164941987549306j,
0.00118421087011718 + 0.164939988752172j,
0.0117978533636026 + 1.64740475175451j,
0.0842948437214929 + 14.7834985873234j,
-0.00125999301746353 + 32.672603689536j,
-0.0110065260871034 + 33.3261929274151j])
Lambda = _lambda(k, eta, E_eff, E_s)
assert_allclose(Lambda, expected_lambda)
@staticmethod
def test_thetaI():
k = np.array([1, 10, 100, 1000, 10000, 100000])
eta = np.array([2020.78311260126 + 15.182507854811j,
2020.80760652432 + 15.182323829782j,
2023.25550170076 + 15.163955048756j,
2254.66583909462 + 13.607584302718j,
10202.1243828582 + 3.007271263178j,
100020.414581093 + 0.30674293451j])
Lambda = np.array([0.0001184255261724 + 0.0164941987549306j,
0.00118421087011718 + 0.164939988752172j,
0.0117978533636026 + 1.64740475175451j,
0.0842948437214929 + 14.7834985873234j,
-0.00125999301746353 + 32.672603689536j,
-0.0110065260871034 + 33.3261929274151j])
expected_thetaI = np.array([0.00157126996626562 + 0.0210682675809495j,
0.00672782406000677 + 0.0281575198774334j,
0.050275664263775 + 0.0281213204722464j,
0.443934273416263 + 0.0140052914999941j,
0.980197277948465 + 0.000305155415174606j,
0.999795989512753 + 3.05416795636227e-6j])
h_s = 0.1
alpha = 0.65 - 0.0002j
E_s = 3 - 0.001j
E_eff = 3 - 100j
thetaI = [_thetaI(_k, h_s, alpha, _Lambda, _eta, E_s, E_eff) for
_k, _Lambda, _eta in zip(k, Lambda, eta)]
thetaI = np.array(thetaI)
assert_allclose(expected_thetaI, thetaI)
@staticmethod
def test_thetaI_c():
k = np.array([1, 10, 100, 1000, 10000, 100000])
eta = np.array([2020.78311260126 + 15.182507854811j,
2020.80760652432 + 15.182323829782j,
2023.25550170076 + 15.163955048756j,
2254.66583909462 + 13.607584302718j,
10202.1243828582 + 3.007271263178j,
100020.414581093 + 0.30674293451j])
Lambda = np.array([0.0001184255261724 + 0.0164941987549306j,
0.00118421087011718 + 0.164939988752172j,
0.0117978533636026 + 1.64740475175451j,
0.0842948437214929 + 14.7834985873234j,
-0.00125999301746353 + 32.672603689536j,
-0.0110065260871034 + 33.3261929274151j])
expected_thetaI = np.array([0.00157126996626562 + 0.0210682675809495j,
0.00672782406000677 + 0.0281575198774334j,
0.050275664263775 + 0.0281213204722464j,
0.443934273416263 + 0.0140052914999941j,
0.980197277948465 + 0.000305155415174606j,
0.999795989512753 + 3.05416795636227e-6j])
h_s = 0.1
alpha = 0.65 - 0.0002j
E_s = 3 - 0.001j
E_eff = 3 - 100j
thetaI = [_thetaI_c(_k, h_s, alpha, _Lambda, _eta, E_s, E_eff) for
_k, _Lambda, _eta in zip(k, Lambda, eta)]
thetaI = np.array(thetaI)
assert_allclose(expected_thetaI, thetaI)
@staticmethod
def test_thetaII():
k =
|
np.array([1, 10, 100, 1000, 10000, 100000])
|
numpy.array
|
import numpy as np
import sys
import os
import asdf
import matplotlib.pyplot as plt
from numpy import log10
from scipy.integrate import simps
from astropy.io import fits
from matplotlib.ticker import FormatStrFormatter
from .function import *
from .function_class import Func
from .basic_func import Basic
import corner
col = ['violet', 'indigo', 'b', 'lightblue', 'lightgreen', 'g', 'orange', 'coral', 'r', 'darkred']#, 'k']
#col = ['darkred', 'r', 'coral','orange','g','lightgreen', 'lightblue', 'b','indigo','violet','k']
def plot_sed(MB, flim=0.01, fil_path='./', scale=1e-19, f_chind=True, figpdf=False, save_sed=True, inputs=False, \
mmax=300, dust_model=0, DIR_TMP='./templates/', f_label=False, f_bbbox=False, verbose=False, f_silence=True, \
f_fill=False, f_fancyplot=False, f_Alog=True, dpi=300, f_plot_filter=True):
'''
Parameters
----------
MB.SNlim : float
SN limit to show flux or up lim in SED.
f_chind : bool
If include non-detection in chi2 calculation, using Sawicki12.
mmax : int
Number of mcmc realization for plot. Not for calculation.
f_fancy : bool
plot each SED component.
f_fill: bool
if True, and so is f_fancy, fill each SED component.
Returns
-------
plots
'''
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp
import matplotlib
import scipy.integrate as integrate
import scipy.special as special
import os.path
from astropy.io import ascii
import time
if f_silence:
import matplotlib
matplotlib.use("Agg")
def gaus(x,a,x0,sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))
lcb = '#4682b4' # line color, blue
fnc = MB.fnc
bfnc = MB.bfnc
ID = MB.ID
Z = MB.Zall
age = MB.age
nage = MB.nage
tau0 = MB.tau0
#col = ['violet', 'indigo', 'b', 'lightblue', 'lightgreen', 'g', 'orange', 'coral', 'r', 'darkred']#, 'k']
NUM_COLORS = len(age)
cm = plt.get_cmap('gist_rainbow')
col = [cm(1 - 1.*i/NUM_COLORS) for i in range(NUM_COLORS)]
nstep_plot = 1
if MB.f_bpass:
nstep_plot = 30
SNlim = MB.SNlim
################
# RF colors.
home = os.path.expanduser('~')
c = MB.c
chimax = 1.
m0set = MB.m0set
Mpc_cm = MB.Mpc_cm
d = MB.d * scale
##################
# Fitting Results
##################
DIR_FILT = MB.DIR_FILT
SFILT = MB.filts
try:
f_err = MB.ferr
except:
f_err = 0
###########################
# Open result file
###########################
file = MB.DIR_OUT + 'summary_' + ID + '.fits'
hdul = fits.open(file)
ndim_eff = hdul[0].header['NDIM']
# Redshift MC
zp16 = hdul[1].data['zmc'][0]
zp50 = hdul[1].data['zmc'][1]
zp84 = hdul[1].data['zmc'][2]
# Stellar mass MC
M16 = hdul[1].data['ms'][0]
M50 = hdul[1].data['ms'][1]
M84 = hdul[1].data['ms'][2]
if verbose:
print('Total stellar mass is %.2e'%(M50))
# Amplitude MC
A50 = np.zeros(len(age), dtype='float')
A16 = np.zeros(len(age), dtype='float')
A84 = np.zeros(len(age), dtype='float')
for aa in range(len(age)):
A16[aa] = 10**hdul[1].data['A'+str(aa)][0]
A50[aa] = 10**hdul[1].data['A'+str(aa)][1]
A84[aa] = 10**hdul[1].data['A'+str(aa)][2]
Asum = np.sum(A50)
aa = 0
Av16 = hdul[1].data['Av'+str(aa)][0]
Av50 = hdul[1].data['Av'+str(aa)][1]
Av84 = hdul[1].data['Av'+str(aa)][2]
AAv = [Av50]
Z50 = np.zeros(len(age), dtype='float')
Z16 = np.zeros(len(age), dtype='float')
Z84 = np.zeros(len(age), dtype='float')
NZbest = np.zeros(len(age), dtype='int')
for aa in range(len(age)):
Z16[aa] = hdul[1].data['Z'+str(aa)][0]
Z50[aa] = hdul[1].data['Z'+str(aa)][1]
Z84[aa] = hdul[1].data['Z'+str(aa)][2]
NZbest[aa]= bfnc.Z2NZ(Z50[aa])
# Light weighted Z.
ZZ50 = np.sum(Z50*A50)/np.sum(A50)
# FIR Dust;
try:
MD16 = hdul[1].data['MDUST'][0]
MD50 = hdul[1].data['MDUST'][1]
MD84 = hdul[1].data['MDUST'][2]
TD16 = hdul[1].data['TDUST'][0]
TD50 = hdul[1].data['TDUST'][1]
TD84 = hdul[1].data['TDUST'][2]
nTD16 = hdul[1].data['nTDUST'][0]
nTD50 = hdul[1].data['nTDUST'][1]
nTD84 = hdul[1].data['nTDUST'][2]
DFILT = inputs['FIR_FILTER'] # filter band string.
DFILT = [x.strip() for x in DFILT.split(',')]
DFWFILT = fil_fwhm(DFILT, DIR_FILT)
if verbose:
print('Total dust mass is %.2e'%(MD50))
f_dust = True
except:
f_dust = False
chi = hdul[1].data['chi'][0]
chin = hdul[1].data['chi'][1]
fitc = chin
Cz0 = hdul[0].header['Cz0']
Cz1 = hdul[0].header['Cz1']
zbes = zp50
zscl = (1.+zbes)
###############################
# Data taken from
###############################
if MB.f_dust:
MB.dict = MB.read_data(Cz0, Cz1, zbes, add_fir=True)
else:
MB.dict = MB.read_data(Cz0, Cz1, zbes)
NR = MB.dict['NR']
x = MB.dict['x']
fy = MB.dict['fy']
ey = MB.dict['ey']
con0 = (NR<1000)
xg0 = x[con0]
fg0 = fy[con0]
eg0 = ey[con0]
con1 = (NR>=1000) & (NR<10000)
xg1 = x[con1]
fg1 = fy[con1]
eg1 = ey[con1]
if len(xg0)>0 or len(xg1)>0:
f_grsm = True
else:
f_grsm = False
wht = fy * 0
con_wht = (ey>0)
wht[con_wht] = 1./np.square(ey[con_wht])
# BB data points;
NRbb = MB.dict['NRbb']
xbb = MB.dict['xbb']
fybb = MB.dict['fybb']
eybb = MB.dict['eybb']
exbb = MB.dict['exbb']
snbb = fybb/eybb
######################
# Weight by line
######################
wh0 = 1./np.square(eg0)
LW0 = []
model = fg0
wht3 = check_line_man(fy, x, wht, fy, zbes, LW0)
######################
# Mass-to-Light ratio.
######################
ms = np.zeros(len(age), dtype='float')
af = MB.af
sedpar = af['ML']
for aa in range(len(age)):
ms[aa] = sedpar['ML_' + str(int(NZbest[aa]))][aa]
try:
isochrone = af['isochrone']
LIBRARY = af['library']
except:
isochrone = ''
LIBRARY = ''
#############
# Plot.
#############
# Set the inset.
if f_grsm or f_dust:
fig = plt.figure(figsize=(7.,3.2))
fig.subplots_adjust(top=0.98, bottom=0.16, left=0.1, right=0.99, hspace=0.15, wspace=0.25)
ax1 = fig.add_subplot(111)
xsize = 0.29
ysize = 0.25
if f_grsm:
ax2t = ax1.inset_axes((1-xsize-0.01,1-ysize-0.01,xsize,ysize))
if f_dust:
ax3t = ax1.inset_axes((0.7,.35,.28,.25))
else:
fig = plt.figure(figsize=(5.5,2.2))
fig.subplots_adjust(top=0.98, bottom=0.16, left=0.1, right=0.99, hspace=0.15, wspace=0.25)
ax1 = fig.add_subplot(111)
#######################################
# D.Kelson like Box for BB photometry
#######################################
col_dat = 'r'
if f_bbbox:
for ii in range(len(xbb)):
if eybb[ii]<100 and fybb[ii]/eybb[ii]>1:
xx = [xbb[ii]-exbb[ii],xbb[ii]-exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]+exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]-exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]-exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
else: # Normal BB plot;
# Detection;
conbb_hs = (fybb/eybb>SNlim)
ax1.errorbar(xbb[conbb_hs], fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d, \
yerr=eybb[conbb_hs]*c/np.square(xbb[conbb_hs])/d, color='k', linestyle='', linewidth=0.5, zorder=4)
ax1.plot(xbb[conbb_hs], fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d, \
marker='.', color=col_dat, linestyle='', linewidth=0, zorder=4, ms=8)#, label='Obs.(BB)')
try:
# For any data removed fron fit (i.e. IRAC excess):
data_ex = ascii.read(DIR_TMP + 'bb_obs_' + ID + '_removed.cat')
NR_ex = data_ex['col1']
except:
NR_ex = []
# Upperlim;
sigma = 1.0
leng = np.max(fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d) * 0.05 #0.2
conebb_ls = (fybb/eybb<=SNlim) & (eybb>0)
for ii in range(len(xbb)):
if NRbb[ii] in NR_ex[:]:
conebb_ls[ii] = False
ax1.errorbar(xbb[conebb_ls], eybb[conebb_ls] * c / np.square(xbb[conebb_ls]) / d * sigma, yerr=leng,\
uplims=eybb[conebb_ls] * c / np.square(xbb[conebb_ls]) / d * sigma, linestyle='', color=col_dat, marker='', ms=4, label='', zorder=4, capsize=3)
# For any data removed fron fit (i.e. IRAC excess):
f_exclude = False
try:
col_ex = 'lawngreen'
#col_ex = 'limegreen'
#col_ex = 'r'
# Currently, this file is made after FILTER_SKIP;
data_ex = ascii.read(DIR_TMP + 'bb_obs_' + ID + '_removed.cat')
x_ex = data_ex['col2']
fy_ex = data_ex['col3']
ey_ex = data_ex['col4']
ex_ex = data_ex['col5']
ax1.errorbar(x_ex, fy_ex * c / np.square(x_ex) / d, \
xerr=ex_ex, yerr=ey_ex*c/np.square(x_ex)/d, color='k', linestyle='', linewidth=0.5, zorder=5)
ax1.scatter(x_ex, fy_ex * c / np.square(x_ex) / d, marker='s', color=col_ex, edgecolor='k', zorder=5, s=30)
f_exclude = True
except:
pass
#####################################
# Open ascii file and stock to array.
lib = fnc.open_spec_fits(fall=0)
lib_all = fnc.open_spec_fits(fall=1, orig=True)
#lib_all_conv = fnc.open_spec_fits(fall=1)
if f_dust:
DT0 = float(inputs['TDUST_LOW'])
DT1 = float(inputs['TDUST_HIG'])
dDT = float(inputs['TDUST_DEL'])
Temp = np.arange(DT0,DT1,dDT)
iimax = len(nage)-1
# FIR dust plot;
if f_dust:
from lmfit import Parameters
par = Parameters()
par.add('MDUST',value=MD50)
par.add('TDUST',value=nTD50)
par.add('zmc',value=zp50)
y0d, x0d = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust_all)
y0d_cut, x0d_cut = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust)
# data;
dat_d = ascii.read(MB.DIR_TMP + 'bb_dust_obs_' + MB.ID + '.cat')
NRbbd = dat_d['col1']
xbbd = dat_d['col2']
fybbd = dat_d['col3']
eybbd = dat_d['col4']
exbbd = dat_d['col5']
snbbd = fybbd/eybbd
try:
conbbd_hs = (fybbd/eybbd>SNlim)
ax1.errorbar(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
yerr=eybbd[conbbd_hs]*c/np.square(xbbd[conbbd_hs])/d, color='k', linestyle='', linewidth=0.5, zorder=4)
ax1.plot(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
'.r', linestyle='', linewidth=0, zorder=4)#, label='Obs.(BB)')
ax3t.plot(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
'.r', linestyle='', linewidth=0, zorder=4)#, label='Obs.(BB)')
except:
pass
try:
conebbd_ls = (fybbd/eybbd<=SNlim)
ax1.errorbar(xbbd[conebbd_ls], eybbd[conebbd_ls] * c / np.square(xbbd[conebbd_ls]) / d, \
yerr=fybbd[conebbd_ls]*0+np.max(fybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d)*0.05, \
uplims=eybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d, color='r', linestyle='', linewidth=0.5, zorder=4)
ax3t.errorbar(xbbd[conebbd_ls], eybbd[conebbd_ls] * c / np.square(xbbd[conebbd_ls]) / d, \
yerr=fybbd[conebbd_ls]*0+np.max(fybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d)*0.05, \
uplims=eybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d, color='r', linestyle='', linewidth=0.5, zorder=4)
except:
pass
#
# This is for UVJ color time evolution.
#
Asum = np.sum(A50[:])
alp = .5
for jj in range(len(age)):
ii = int(len(nage) - jj - 1) # from old to young templates.
if jj == 0:
y0, x0 = fnc.tmp03(A50[ii], AAv[0], ii, Z50[ii], zbes, lib_all)
y0p, x0p = fnc.tmp03(A50[ii], AAv[0], ii, Z50[ii], zbes, lib)
ysum = y0
ysump = y0p
nopt = len(ysump)
f_50_comp = np.zeros((len(age),len(y0)),'float')
# Keep each component;
f_50_comp[ii,:] = y0[:] * c / np.square(x0) / d
if f_dust:
ysump[:] += y0d_cut[:nopt]
ysump = np.append(ysump,y0d_cut[nopt:])
# Keep each component;
f_50_comp_dust = y0d * c / np.square(x0d) / d
else:
y0_r, x0_tmp = fnc.tmp03(A50[ii], AAv[0], ii, Z50[ii], zbes, lib_all)
y0p, x0p = fnc.tmp03(A50[ii], AAv[0], ii, Z50[ii], zbes, lib)
ysum += y0_r
ysump[:nopt] += y0p
f_50_comp[ii,:] = y0_r[:] * c / np.square(x0_tmp) / d
# The following needs revised.
f_uvj = False
if f_uvj:
if jj == 0:
fwuvj = open(MB.DIR_OUT + ID + '_uvj.txt', 'w')
fwuvj.write('# age uv vj\n')
ysum_wid = ysum * 0
for kk in range(0,ii+1,1):
tt = int(len(nage) - kk - 1)
nn = int(len(nage) - ii - 1)
nZ = bfnc.Z2NZ(Z50[tt])
y0_wid, x0_wid = fnc.open_spec_fits_dir(tt, nZ, nn, AAv[0], zbes, A50[tt])
ysum_wid += y0_wid
lmrest_wid = x0_wid/(1.+zbes)
band0 = ['u','v','j']
lmconv,fconv = filconv(band0, lmrest_wid, ysum_wid, fil_path) # f0 in fnu
fu_t = fconv[0]
fv_t = fconv[1]
fj_t = fconv[2]
uvt = -2.5*log10(fu_t/fv_t)
vjt = -2.5*log10(fv_t/fj_t)
fwuvj.write('%.2f %.3f %.3f\n'%(age[ii], uvt, vjt))
fwuvj.close()
#############
# Main result
#############
conbb_ymax = (xbb>0) & (fybb>0) & (eybb>0) & (fybb/eybb>1)
ymax = np.max(fybb[conbb_ymax]*c/np.square(xbb[conbb_ymax])/d) * 1.6
xboxl = 17000
xboxu = 28000
ax1.set_xlabel('Observed wavelength ($\mathrm{\mu m}$)', fontsize=12)
ax1.set_ylabel('Flux ($10^{%d}\mathrm{erg}/\mathrm{s}/\mathrm{cm}^{2}/\mathrm{\AA}$)'%(np.log10(scale)),fontsize=12,labelpad=-2)
x1min = 2000
x1max = 100000
xticks = [2500, 5000, 10000, 20000, 40000, 80000, x1max]
xlabels= ['0.25', '0.5', '1', '2', '4', '8', '']
if f_dust:
x1max = 400000
xticks = [2500, 5000, 10000, 20000, 40000, 80000, 400000]
xlabels= ['0.25', '0.5', '1', '2', '4', '8', '']
#if x1max < np.max(xbb[conbb_ymax]):
# x1max = np.max(xbb[conbb_ymax]) * 1.5
if x1max < np.max(xbb):
x1max = np.max(xbb) * 1.5
if x1min > np.min(xbb[conbb_ymax]):
x1min = np.min(xbb[conbb_ymax]) / 1.5
ax1.set_xlim(x1min, x1max)
ax1.set_xscale('log')
if f_plot_filter:
scl_yaxis = 0.2
else:
scl_yaxis = 0.1
ax1.set_ylim(-ymax*scl_yaxis,ymax)
ax1.text(x1min+100,-ymax*0.08,'SNlimit:%.1f'%(SNlim),fontsize=8)
ax1.set_xticks(xticks)
ax1.set_xticklabels(xlabels)
dely1 = 0.5
while (ymax-0)/dely1<1:
dely1 /= 2.
while (ymax-0)/dely1>4:
dely1 *= 2.
y1ticks = np.arange(0, ymax, dely1)
ax1.set_yticks(y1ticks)
ax1.set_yticklabels(np.arange(0, ymax, dely1), minor=False)
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax1.yaxis.labelpad = 1.5
xx = np.arange(100,400000)
yy = xx * 0
ax1.plot(xx, yy, ls='--', lw=0.5, color='k')
#############
# Plot
#############
eAAl = np.zeros(len(age),dtype='float')
eAAu = np.zeros(len(age),dtype='float')
eAMl = np.zeros(len(age),dtype='float')
eAMu = np.zeros(len(age),dtype='float')
MSsum = np.sum(ms)
Asum = np.sum(A50)
A50 /= Asum
A16 /= Asum
A84 /= Asum
AM50 = A50 * M50 * ms / MSsum
CM = M50/np.sum(AM50)
AM50 = A50 * M50 * ms / MSsum * CM
AM16 = A16 * M50 * ms / MSsum * CM
AM84 = A84 * M50 * ms / MSsum * CM
AC50 = A50 * 0 # Cumulative
for ii in range(len(A50)):
eAAl[ii] = A50[ii] - A16[ii]
eAAu[ii] = A84[ii] - A50[ii]
eAMl[ii] = AM50[ii] - AM16[ii]
eAMu[ii] = AM84[ii] - AM50[ii]
AC50[ii] = np.sum(AM50[ii:])
################
# Lines
################
LN = ['Mg2', 'Ne5', 'O2', 'Htheta', 'Heta', 'Ne3', 'Hdelta', 'Hgamma', 'Hbeta', 'O3', 'O3', 'Mgb', 'Halpha', 'S2L', 'S2H']
FLW = np.zeros(len(LN),dtype='int')
####################
# For cosmology
####################
DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm #, **cosmo) # Luminositydistance in cm
Cons = (4.*np.pi*DL**2/(1.+zbes))
if f_grsm:
print('This function (write_lines) needs to be revised.')
write_lines(ID, zbes, DIR_OUT=MB.DIR_OUT)
##########################
# Zoom in Line regions
##########################
if f_grsm:
conspec = (NR<10000) #& (fy/ey>1)
#ax2t.fill_between(xg1, (fg1-eg1) * c/np.square(xg1)/d, (fg1+eg1) * c/np.square(xg1)/d, lw=0, color='#DF4E00', zorder=10, alpha=0.7, label='')
#ax2t.fill_between(xg0, (fg0-eg0) * c/np.square(xg0)/d, (fg0+eg0) * c/np.square(xg0)/d, lw=0, color='royalblue', zorder=10, alpha=0.2, label='')
ax2t.errorbar(xg1, fg1 * c/np.square(xg1)/d, yerr=eg1 * c/np.square(xg1)/d, lw=0.5, color='#DF4E00', zorder=10, alpha=1., label='', capsize=0)
ax2t.errorbar(xg0, fg0 * c/np.square(xg0)/d, yerr=eg0 * c/np.square(xg0)/d, lw=0.5, linestyle='', color='royalblue', zorder=10, alpha=1., label='', capsize=0)
xgrism = np.concatenate([xg0,xg1])
fgrism = np.concatenate([fg0,fg1])
egrism = np.concatenate([eg0,eg1])
con4000b = (xgrism/zscl>3400) & (xgrism/zscl<3800) & (fgrism>0) & (egrism>0)
con4000r = (xgrism/zscl>4200) & (xgrism/zscl<5000) & (fgrism>0) & (egrism>0)
print('Median SN at 3400-3800 is;', np.median((fgrism/egrism)[con4000b]))
print('Median SN at 4200-5000 is;', np.median((fgrism/egrism)[con4000r]))
#ax1.errorbar(xg1, fg1 * c/np.square(xg1)/d, yerr=eg1 * c/np.square(xg1)/d, lw=0.5, color='#DF4E00', zorder=10, alpha=1., label='', capsize=0)
#ax1.errorbar(xg0, fg0 * c/np.square(xg0)/d, yerr=eg0 * c/np.square(xg0)/d, lw=0.5, linestyle='', color='royalblue', zorder=10, alpha=1., label='', capsize=0)
#
# From MCMC chain
#
file = MB.DIR_OUT + 'chain_' + ID + '_corner.cpkl'
niter = 0
data = loadcpkl(file)
try:
ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present.
burnin = data['burnin']
nmc = data['niter']
nwalk = data['nwalkers']
Nburn = burnin #*20
res = data['chain'][:]
except:
if verbose: print(' = > NO keys of ndim and burnin found in cpkl, use input keyword values')
samples = res
# Saved template;
ytmp = np.zeros((mmax,len(ysum)), dtype='float')
ytmp_each = np.zeros((mmax,len(ysum),len(age)), dtype='float')
ytmpmax = np.zeros(len(ysum), dtype='float')
ytmpmin = np.zeros(len(ysum), dtype='float')
# MUV;
DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm # Luminositydistance in cm
DL10 = Mpc_cm/1e6 * 10 # 10pc in cm
Fuv = np.zeros(mmax, dtype='float') # For Muv
Fuv28 = np.zeros(mmax, dtype='float') # For Fuv(1500-2800)
Lir = np.zeros(mmax, dtype='float') # For L(8-1000um)
UVJ = np.zeros((mmax,4), dtype='float') # For UVJ color;
Cmznu = 10**((48.6+m0set)/(-2.5)) # Conversion from m0_25 to fnu
# From random chain;
alp=0.02
for kk in range(0,mmax,1):
nr = np.random.randint(Nburn, len(samples['A%d'%MB.aamin[0]]))
try:
Av_tmp = samples['Av'][nr]
except:
Av_tmp = MB.AVFIX
try:
zmc = samples['zmc'][nr]
except:
zmc = zbes
for ss in MB.aamin:
try:
AA_tmp = 10**samples['A'+str(ss)][nr]
except:
AA_tmp = 0
pass
try:
Ztest = samples['Z'+str(len(age)-1)][nr]
ZZ_tmp = samples['Z'+str(ss)][nr]
except:
try:
ZZ_tmp = samples['Z0'][nr]
except:
ZZ_tmp = MB.ZFIX
if ss == MB.aamin[0]:
mod0_tmp, xm_tmp = fnc.tmp03(AA_tmp, Av_tmp, ss, ZZ_tmp, zmc, lib_all)
fm_tmp = mod0_tmp
else:
mod0_tmp, xx_tmp = fnc.tmp03(AA_tmp, Av_tmp, ss, ZZ_tmp, zmc, lib_all)
fm_tmp += mod0_tmp
# Each;
ytmp_each[kk,:,ss] = mod0_tmp[:] * c / np.square(xm_tmp[:]) / d
#
# Dust component;
#
if f_dust:
if kk == 0:
par = Parameters()
par.add('MDUST',value=samples['MDUST'][nr])
try:
par.add('TDUST',value=samples['TDUST'][nr])
except:
par.add('TDUST',value=0)
par['MDUST'].value = samples['MDUST'][nr]
try:
par['TDUST'].value = samples['TDUST'][nr]
except:
par['TDUST'].value = 0
model_dust, x1_dust = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust_all)
if kk == 0:
deldt = (x1_dust[1] - x1_dust[0])
x1_tot = np.append(xm_tmp,np.arange(np.max(xm_tmp),np.max(x1_dust),deldt))
# Redefine??
ytmp = np.zeros((mmax,len(x1_tot)), dtype='float')
ytmp_dust = np.zeros((mmax,len(x1_dust)), dtype='float')
ytmp_comp = np.zeros((mmax,len(x1_tot)), dtype='float')
ytmp_dust[kk,:] = model_dust * c/np.square(x1_dust)/d
model_tot = np.interp(x1_tot,xx_tmp,fm_tmp) + np.interp(x1_tot,x1_dust,model_dust)
ytmp[kk,:] = model_tot[:] * c/np.square(x1_tot[:])/d
else:
x1_tot = xm_tmp
ytmp[kk,:] = fm_tmp[:] * c / np.square(xm_tmp[:]) / d
#
# Grism plot + Fuv flux + LIR.
#
#if f_grsm:
#ax2t.plot(x1_tot, ytmp[kk,:], '-', lw=0.5, color='gray', zorder=3., alpha=0.02)
# Get FUV flux;
Fuv[kk] = get_Fuv(x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)) * (DL**2/(1.+zbes)) / (DL10**2), lmin=1250, lmax=1650)
Fuv28[kk] = get_Fuv(x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)) * (4*np.pi*DL**2/(1.+zbes))*Cmznu, lmin=1500, lmax=2800)
Lir[kk] = 0
# Get UVJ Color;
lmconv,fconv = filconv_fast(MB.filts_rf, MB.band_rf, x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)))
UVJ[kk,0] = -2.5*np.log10(fconv[0]/fconv[2])
UVJ[kk,1] = -2.5*np.log10(fconv[1]/fconv[2])
UVJ[kk,2] = -2.5*np.log10(fconv[2]/fconv[3])
UVJ[kk,3] = -2.5*np.log10(fconv[4]/fconv[3])
# Do stuff...
time.sleep(0.01)
# Update Progress Bar
printProgressBar(kk, mmax, prefix = 'Progress:', suffix = 'Complete', length = 40)
#
# Plot Median SED;
#
ytmp16 = np.percentile(ytmp[:,:],16,axis=0)
ytmp50 = np.percentile(ytmp[:,:],50,axis=0)
ytmp84 = np.percentile(ytmp[:,:],84,axis=0)
if f_dust:
ytmp_dust50 = np.percentile(ytmp_dust[:,:],50, axis=0)
#if not f_fill:
ax1.fill_between(x1_tot[::nstep_plot], ytmp16[::nstep_plot], ytmp84[::nstep_plot], ls='-', lw=.5, color='gray', zorder=-2, alpha=0.5)
ax1.plot(x1_tot[::nstep_plot], ytmp50[::nstep_plot], '-', lw=.5, color='gray', zorder=-1, alpha=1.)
# For grism;
if f_grsm:
from astropy.convolution import convolve
from .maketmp_filt import get_LSF
LSF, lmtmp = get_LSF(MB.inputs, MB.DIR_EXTR, ID, x1_tot[::nstep_plot], c=3e18)
spec_grsm16 = convolve(ytmp16[::nstep_plot], LSF, boundary='extend')
spec_grsm50 = convolve(ytmp50[::nstep_plot], LSF, boundary='extend')
spec_grsm84 = convolve(ytmp84[::nstep_plot], LSF, boundary='extend')
ax2t.plot(x1_tot[::nstep_plot], spec_grsm50, '-', lw=0.5, color='gray', zorder=3., alpha=1.0)
# Attach the data point in MB;
MB.sed_wave_obs = xbb
MB.sed_flux_obs = fybb * c / np.square(xbb) / d
MB.sed_eflux_obs = eybb * c / np.square(xbb) / d
# Attach the best SED to MB;
MB.sed_wave = x1_tot
MB.sed_flux16 = ytmp16
MB.sed_flux50 = ytmp50
MB.sed_flux84 = ytmp84
if f_fancyplot:
alp_fancy = 0.5
#ax1.plot(x1_tot[::nstep_plot], np.percentile(ytmp[:, ::nstep_plot], 50, axis=0), '-', lw=.5, color='gray', zorder=-1, alpha=1.)
ysumtmp = ytmp[0, ::nstep_plot] * 0
ysumtmp2 = ytmp[:, ::nstep_plot] * 0
ysumtmp2_prior = ytmp[0, ::nstep_plot] * 0
for ss in range(len(age)):
ii = int(len(nage) - ss - 1) # from old to young templates.
#ysumtmp += np.percentile(ytmp_each[:, ::nstep_plot, ii], 50, axis=0)
#ax1.plot(x1_tot[::nstep_plot], ysumtmp, linestyle='--', lw=.5, color=col[ii], alpha=0.5)
# !! Take median after summation;
ysumtmp2[:,:len(xm_tmp)] += ytmp_each[:, ::nstep_plot, ii]
if f_fill:
ax1.fill_between(x1_tot[::nstep_plot], ysumtmp2_prior, np.percentile(ysumtmp2[:,:], 50, axis=0), linestyle='None', lw=0., color=col[ii], alpha=alp_fancy, zorder=-3)
else:
ax1.plot(x1_tot[::nstep_plot], np.percentile(ysumtmp2[:, ::nstep_plot], 50, axis=0), linestyle='--', lw=.5, color=col[ii], alpha=alp_fancy, zorder=-3)
ysumtmp2_prior[:] = np.percentile(ysumtmp2[:, :], 50, axis=0)
elif f_fill:
print('f_fancyplot is False. f_fill is set to False.')
#########################
# Calculate non-det chi2
# based on Sawick12
#########################
def func_tmp(xint,eobs,fmodel):
int_tmp = np.exp(-0.5 * ((xint-fmodel)/eobs)**2)
return int_tmp
if f_chind:
conw = (wht3>0) & (ey>0) & (fy/ey>SNlim)
else:
conw = (wht3>0) & (ey>0) #& (fy/ey>SNlim)
#chi2 = sum((np.square(fy-ysump) * np.sqrt(wht3))[conw])
try:
logf = hdul[1].data['logf'][1]
ey_revised = np.sqrt(ey**2+ ysump**2 * np.exp(logf)**2)
except:
ey_revised = ey
chi2 = sum((np.square(fy-ysump) / ey_revised)[conw])
chi_nd = 0.0
if f_chind:
f_ex = np.zeros(len(fy), 'int')
if f_exclude:
for ii in range(len(fy)):
if x[ii] in x_ex:
f_ex[ii] = 1
con_up = (ey>0) & (fy/ey<=SNlim) & (f_ex == 0)
from scipy import special
#x_erf = (ey[con_up] - ysump[con_up]) / (np.sqrt(2) * ey[con_up])
#f_erf = special.erf(x_erf)
#chi_nd = np.sum( np.log(np.sqrt(np.pi / 2) * ey[con_up] * (1 + f_erf)) )
x_erf = (ey_revised[con_up] - ysump[con_up]) / (np.sqrt(2) * ey_revised[con_up])
f_erf = special.erf(x_erf)
chi_nd = np.sum( np.log(np.sqrt(np.pi / 2) * ey_revised[con_up] * (1 + f_erf)) )
# Number of degree;
con_nod = (wht3>0) & (ey>0) #& (fy/ey>SNlim)
nod = int(len(wht3[con_nod])-ndim_eff)
print('\n')
print('No-of-detection : %d'%(len(wht3[conw])))
print('chi2 : %.2f'%(chi2))
if f_chind:
print('No-of-non-detection: %d'%(len(ey[con_up])))
print('chi2 for non-det : %.2f'%(- 2 * chi_nd))
print('No-of-params : %d'%(ndim_eff))
print('Degrees-of-freedom : %d'%(nod))
if nod>0:
fin_chi2 = (chi2 - 2 * chi_nd) / nod
else:
fin_chi2 = -99
print('Final chi2/nu : %.2f'%(fin_chi2))
#
# plot BB model from best template (blue squares)
#
col_dia = 'blue'
if f_dust:
ALLFILT = np.append(SFILT,DFILT)
#for ii in range(len(x1_tot)):
# print(x1_tot[ii], model_tot[ii]*c/np.square(x1_tot[ii])/d)
lbb, fbb, lfwhm = filconv(ALLFILT, x1_tot, ytmp50, DIR_FILT, fw=True)
lbb, fbb16, lfwhm = filconv(ALLFILT, x1_tot, ytmp16, DIR_FILT, fw=True)
lbb, fbb84, lfwhm = filconv(ALLFILT, x1_tot, ytmp84, DIR_FILT, fw=True)
ax1.plot(x1_tot, ytmp50, '--', lw=0.5, color='purple', zorder=-1, label='')
ax3t.plot(x1_tot, ytmp50, '--', lw=0.5, color='purple', zorder=-1, label='')
iix = []
for ii in range(len(fbb)):
iix.append(ii)
con_sed = ()
ax1.scatter(lbb[iix][con_sed], fbb[iix][con_sed], lw=0.5, color='none', edgecolor=col_dia, zorder=3, alpha=1.0, marker='d', s=50)
# plot FIR range;
ax3t.scatter(lbb, fbb, lw=0.5, color='none', edgecolor=col_dia, \
zorder=2, alpha=1.0, marker='d', s=50)
else:
lbb, fbb, lfwhm = filconv(SFILT, x1_tot, ytmp50, DIR_FILT, fw=True, MB=MB, f_regist=False)
lbb, fbb16, lfwhm = filconv(SFILT, x1_tot, ytmp16, DIR_FILT, fw=True, MB=MB, f_regist=False)
lbb, fbb84, lfwhm = filconv(SFILT, x1_tot, ytmp84, DIR_FILT, fw=True, MB=MB, f_regist=False)
iix = []
for ii in range(len(fbb)):
iix.append(np.argmin(np.abs(lbb[ii]-xbb[:])))
con_sed = (eybb>0)
ax1.scatter(lbb[iix][con_sed], fbb[iix][con_sed], lw=0.5, color='none', edgecolor=col_dia, zorder=3, alpha=1.0, marker='d', s=50)
# Calculate EW, if there is excess band;
try:
iix2 = []
for ii in range(len(fy_ex)):
iix2.append(np.argmin(np.abs(lbb[:]-x_ex[ii])))
# Rest-frame EW;
# Note about 16/84 in fbb
EW16 = (fy_ex * c / np.square(x_ex) / d - fbb84[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
EW50 = (fy_ex * c / np.square(x_ex) / d - fbb[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
EW84 = (fy_ex * c / np.square(x_ex) / d - fbb16[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
EW50_er1 = ((fy_ex-ey_ex) * c / np.square(x_ex) / d - fbb[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
EW50_er2 = ((fy_ex+ey_ex) * c / np.square(x_ex) / d - fbb[iix2]) / (fbb[iix2]) * lfwhm[iix2] / (1.+zbes)
cnt50 = fbb[iix2]
cnt16 = fbb16[iix2]
cnt84 = fbb84[iix2]
# Luminosity;
#Lsun = 3.839 * 1e33 #erg s-1
L16 = EW16 * cnt16 * (4.*np.pi*DL**2) * scale * (1+zbes) # A * erg/s/A/cm2 * cm2
L50 = EW50 * cnt50 * (4.*np.pi*DL**2) * scale * (1+zbes) # A * erg/s/A/cm2 * cm2
L84 = EW84 * cnt84 * (4.*np.pi*DL**2) * scale * (1+zbes) # A * erg/s/A/cm2 * cm2
ew_label = []
for ii in range(len(fy_ex)):
lres = MB.band['%s_lam'%MB.filts[iix2[ii]]][:]
fres = MB.band['%s_res'%MB.filts[iix2[ii]]][:]
ew_label.append(MB.filts[iix2[ii]])
print('\n')
print('EW016 for', x_ex[ii], 'is %d'%EW16[ii])
print('EW050 for', x_ex[ii], 'is %d'%EW50[ii])
print('EW084 for', x_ex[ii], 'is %d'%EW84[ii])
print('%d_{-%d}^{+%d} , for sed error'%(EW50[ii],EW50[ii]-EW84[ii],EW16[ii]-EW50[ii]))
print('Or, %d\pm{%d} , for flux error'%(EW50[ii],EW50[ii]-EW50_er1[ii]))
except:
pass
if save_sed:
fbb16_nu = flamtonu(lbb, fbb16*scale, m0set=25.0)
fbb_nu = flamtonu(lbb, fbb*scale, m0set=25.0)
fbb84_nu = flamtonu(lbb, fbb84*scale, m0set=25.0)
# Then save full spectrum;
col00 = []
col1 = fits.Column(name='wave_model', format='E', unit='AA', array=x1_tot)
col00.append(col1)
col2 = fits.Column(name='f_model_16', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=ytmp16[:])
col00.append(col2)
col3 = fits.Column(name='f_model_50', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=ytmp50[:])
col00.append(col3)
col4 = fits.Column(name='f_model_84', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=ytmp84[:])
col00.append(col4)
# Each component
# Stellar
col1 = fits.Column(name='wave_model_stel', format='E', unit='AA', array=x0)
col00.append(col1)
for aa in range(len(age)):
col1 = fits.Column(name='f_model_stel_%d'%aa, format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=f_50_comp[aa,:])
col00.append(col1)
if f_dust:
col1 = fits.Column(name='wave_model_dust', format='E', unit='AA', array=x1_dust)
col00.append(col1)
col1 = fits.Column(name='f_model_dust', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=ytmp_dust50)
col00.append(col1)
# Grism;
if f_grsm:
col2 = fits.Column(name='f_model_conv_16', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=spec_grsm16)
col00.append(col2)
col3 = fits.Column(name='f_model_conv_50', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=spec_grsm50)
col00.append(col3)
col4 = fits.Column(name='f_model_conv_84', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=spec_grsm84)
col00.append(col4)
# BB for dust
if f_dust:
xbb = np.append(xbb,xbbd)
fybb = np.append(fybb,fybbd)
eybb = np.append(eybb,eybbd)
col5 = fits.Column(name='wave_obs', format='E', unit='AA', array=xbb)
col00.append(col5)
col6 = fits.Column(name='f_obs', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=fybb[:] * c / np.square(xbb[:]) / d)
col00.append(col6)
col7 = fits.Column(name='e_obs', format='E', unit='1e%derg/s/cm2/AA'%(np.log10(scale)), array=eybb[:] * c / np.square(xbb[:]) / d)
col00.append(col7)
hdr = fits.Header()
hdr['redshift'] = zbes
hdr['id'] = ID
hdr['hierarch isochrone'] = isochrone
hdr['library'] = LIBRARY
hdr['scale'] = scale
try:
# Chi square:
hdr['chi2'] = chi2
hdr['hierarch No-of-effective-data-points'] = len(wht3[conw])
hdr['hierarch No-of-nondetectioin'] = len(ey[con_up])
hdr['hierarch Chi2-of-nondetection'] = chi_nd
hdr['hierarch No-of-params'] = ndim_eff
hdr['hierarch Degree-of-freedom'] = nod
hdr['hierarch reduced-chi2'] = fin_chi2
except:
print('Chi seems to be wrong...')
pass
try:
# Muv
MUV = -2.5 * np.log10(Fuv[:]) + 25.0
hdr['MUV16'] = np.percentile(MUV[:],16)
hdr['MUV50'] = np.percentile(MUV[:],50)
hdr['MUV84'] = np.percentile(MUV[:],84)
# Fuv (!= flux of Muv)
hdr['FUV16'] = np.percentile(Fuv28[:],16)
hdr['FUV50'] = np.percentile(Fuv28[:],50)
hdr['FUV84'] = np.percentile(Fuv28[:],84)
# LIR
hdr['LIR16'] = np.percentile(Lir[:],16)
hdr['LIR50'] = np.percentile(Lir[:],50)
hdr['LIR84'] = np.percentile(Lir[:],84)
except:
pass
# UVJ
try:
hdr['uv16'] = np.percentile(UVJ[:,0],16)
hdr['uv50'] = np.percentile(UVJ[:,0],50)
hdr['uv84'] = np.percentile(UVJ[:,0],84)
hdr['bv16'] = np.percentile(UVJ[:,1],16)
hdr['bv50'] = np.percentile(UVJ[:,1],50)
hdr['bv84'] = np.percentile(UVJ[:,1],84)
hdr['vj16'] = np.percentile(UVJ[:,2],16)
hdr['vj50'] = np.percentile(UVJ[:,2],50)
hdr['vj84'] = np.percentile(UVJ[:,2],84)
hdr['zj16'] = np.percentile(UVJ[:,3],16)
hdr['zj50'] = np.percentile(UVJ[:,3],50)
hdr['zj84'] = np.percentile(UVJ[:,3],84)
except:
print('\nError when writinf UVJ colors;\n')
pass
# EW;
try:
for ii in range(len(EW50)):
hdr['EW_%s_16'%(ew_label[ii])] = EW16[ii]
hdr['EW_%s_50'%(ew_label[ii])] = EW50[ii]
hdr['EW_%s_84'%(ew_label[ii])] = EW84[ii]
hdr['EW_%s_e1'%(ew_label[ii])] = EW50_er1[ii]
hdr['EW_%s_e2'%(ew_label[ii])] = EW50_er2[ii]
hdr['HIERARCH cnt_%s_16'%(ew_label[ii])]= cnt16[ii]
hdr['HIERARCH cnt_%s_50'%(ew_label[ii])]= cnt50[ii]
hdr['HIERARCH cnt_%s_84'%(ew_label[ii])]= cnt84[ii]
hdr['L_%s_16'%(ew_label[ii])] = L16[ii]
hdr['L_%s_50'%(ew_label[ii])] = L50[ii]
hdr['L_%s_84'%(ew_label[ii])] = L84[ii]
except:
pass
# Version;
import gsf
hdr['version'] = gsf.__version__
# Write;
colspec = fits.ColDefs(col00)
hdu0 = fits.BinTableHDU.from_columns(colspec, header=hdr)
hdu0.writeto(MB.DIR_OUT + 'gsf_spec_%s.fits'%(ID), overwrite=True)
# ASDF;
tree_spec = {
'id': ID,
'redshift': '%.3f'%zbes,
'isochrone': '%s'%(isochrone),
'library': '%s'%(LIBRARY),
'scale': scale,
'version_gsf': gsf.__version__
}
# BB;
tree_spec.update({'wave': lbb})
tree_spec.update({'fnu_16': fbb16_nu})
tree_spec.update({'fnu_50': fbb_nu})
tree_spec.update({'fnu_84': fbb84_nu})
# full spectrum;
tree_spec.update({'wave_model': x1_tot})
tree_spec.update({'f_model_16': ytmp16})
tree_spec.update({'f_model_50': ytmp50})
tree_spec.update({'f_model_84': ytmp84})
# EW;
try:
for ii in range(len(EW50)):
tree_spec.update({'EW_%s_16'%(ew_label[ii]): EW16[ii]})
tree_spec.update({'EW_%s_50'%(ew_label[ii]): EW50[ii]})
tree_spec.update({'EW_%s_84'%(ew_label[ii]): EW84[ii]})
tree_spec.update({'EW_%s_e1'%(ew_label[ii]): EW50_er1[ii]})
tree_spec.update({'EW_%s_e2'%(ew_label[ii]): EW50_er2[ii]})
tree_spec.update({'cnt_%s_16'%(ew_label[ii]): cnt16[ii]})
tree_spec.update({'cnt_%s_50'%(ew_label[ii]): cnt50[ii]})
tree_spec.update({'cnt_%s_84'%(ew_label[ii]): cnt84[ii]})
tree_spec.update({'L_%s_16'%(ew_label[ii]): L16[ii]})
tree_spec.update({'L_%s_50'%(ew_label[ii]): L50[ii]})
tree_spec.update({'L_%s_84'%(ew_label[ii]): L84[ii]})
except:
pass
# Each component
# Stellar
tree_spec.update({'wave_model_stel': x0})
for aa in range(len(age)):
tree_spec.update({'f_model_stel_%d'%aa: f_50_comp[aa,:]})
if f_dust:
# dust
tree_spec.update({'wave_model_dust': x1_dust})
tree_spec.update({'f_model_dust': ytmp_dust50})
# BB for dust
tree_spec.update({'wave_obs': xbb})
tree_spec.update({'f_obs': fybb[:] * c / np.square(xbb[:]) / d})
tree_spec.update({'e_obs': eybb[:] * c / np.square(xbb[:]) / d})
# grism:
if f_grsm:
tree_spec.update({'fg0_obs': fg0 * c/np.square(xg0)/d})
tree_spec.update({'eg0_obs': eg0 * c/np.square(xg0)/d})
tree_spec.update({'wg0_obs': xg0})
tree_spec.update({'fg1_obs': fg1 * c/np.square(xg1)/d})
tree_spec.update({'eg1_obs': eg1 * c/np.square(xg1)/d})
tree_spec.update({'wg1_obs': xg1})
af = asdf.AsdfFile(tree_spec)
af.write_to(MB.DIR_OUT + 'gsf_spec_%s.asdf'%(ID), all_array_compression='zlib')
#
# SED params in plot
#
if f_label:
fd = fits.open(MB.DIR_OUT + 'SFH_' + ID + '.fits')[0].header
if f_dust:
label = 'ID: %s\n$z_\mathrm{obs.}:%.2f$\n$\log M_\mathrm{*}/M_\odot:%.2f$\n$\log M_\mathrm{dust}/M_\odot:%.2f$\n$\log Z_\mathrm{*}/Z_\odot:%.2f$\n$\log T_\mathrm{*}$/Gyr$:%.2f$\n$A_V$/mag$:%.2f$\n$\\chi^2/\\nu:%.2f$'\
%(ID, zbes, float(fd['Mstel_50']), MD50, float(fd['Z_MW_50']), float(fd['T_MW_50']), float(fd['AV_50']), fin_chi2)
ylabel = ymax*0.45
else:
label = 'ID: %s\n$z_\mathrm{obs.}:%.2f$\n$\log M_\mathrm{*}/M_\odot:%.2f$\n$\log Z_\mathrm{*}/Z_\odot:%.2f$\n$\log T_\mathrm{*}$/Gyr$:%.2f$\n$A_V$/mag$:%.2f$\n$\\chi^2/\\nu:%.2f$'\
%(ID, zbes, float(fd['Mstel_50']), float(fd['Z_MW_50']), float(fd['T_MW_50']), float(fd['AV_50']), fin_chi2)
ylabel = ymax*0.25
ax1.text(0.77, 0.65, label,\
fontsize=9, bbox=dict(facecolor='w', alpha=0.7), zorder=10,
ha='left', va='center', transform=ax1.transAxes)
#######################################
ax1.xaxis.labelpad = -3
if f_grsm:
if np.max(xg0)<23000: # E.g. WFC3, NIRISS grisms
conlim = (x0>10000) & (x0<25000)
xgmin, xgmax = np.min(x0[conlim]),np.max(x0[conlim]), #7500, 17000
ax2t.set_xlabel('')
ax2t.set_xlim(xgmin, xgmax)
conaa = (x0>xgmin-50) & (x0<xgmax+50)
ymaxzoom = np.max(ysum[conaa]*c/np.square(x0[conaa])/d) * 1.15
yminzoom = np.min(ysum[conaa]*c/np.square(x0[conaa])/d) / 1.15
ax2t.set_ylim(yminzoom, ymaxzoom)
ax2t.xaxis.labelpad = -2
if xgmax>20000:
ax2t.set_xticks([8000, 12000, 16000, 20000, 24000])
ax2t.set_xticklabels(['0.8', '1.2', '1.6', '2.0', '2.4'])
else:
ax2t.set_xticks([8000, 10000, 12000, 14000, 16000])
ax2t.set_xticklabels(['0.8', '1.0', '1.2', '1.4', '1.6'])
else:
conlim = (x0>10000) & (x0<54000) # NIRSPEC spectrum;
xgmin, xgmax = np.min(x0[conlim]),np.max(x0[conlim]), #7500, 17000
ax2t.set_xlabel('')
ax2t.set_xlim(xgmin, xgmax)
conaa = (x0>xgmin-50) & (x0<xgmax+50)
ymaxzoom = np.max(ysum[conaa]*c/np.square(x0[conaa])/d) * 1.15
yminzoom = np.min(ysum[conaa]*c/np.square(x0[conaa])/d) / 1.15
ax2t.set_ylim(yminzoom, ymaxzoom)
ax2t.xaxis.labelpad = -2
if xgmax>40000:
ax2t.set_xticks([8000, 20000, 32000, 44000, 56000])
ax2t.set_xticklabels(['0.8', '2.0', '3.2', '4.4', '5.6'])
else:
ax2t.set_xticks([8000, 20000, 32000, 44000])
ax2t.set_xticklabels(['0.8', '2.0', '3.2', '4.4'])
if f_dust:
try:
contmp = (x1_tot>10*1e4) #& (fybbd/eybbd>SNlim)
y3min, y3max = -.2*np.max((model_tot * c/ np.square(x1_tot) / d)[contmp]), np.max((model_tot * c/ np.square(x1_tot) / d)[contmp])*2.0
ax3t.set_ylim(y3min, y3max)
except:
if verbose:
print('y3 limit is not specified.')
pass
ax3t.set_xlim(1e5, 3e7)
ax3t.set_xscale('log')
ax3t.set_xticks([100000, 1000000, 10000000])
ax3t.set_xticklabels(['10', '100', '1000'])
###############
# Line name
###############
LN0 = ['Mg2', '$NeIV$', '[OII]', 'H$\theta$', 'H$\eta$', 'Ne3?', 'H$\delta$', 'H$\gamma$', 'H$\\beta$', 'O3', 'O3', 'Mgb', 'Halpha', 'S2L', 'S2H']
LW0 = [2800, 3347, 3727, 3799, 3836, 3869, 4102, 4341, 4861, 4959, 5007, 5175, 6563, 6717, 6731]
fsl = 9 # Fontsize for line
if f_grsm:
try:
for ii in range(len(LW)):
ll = np.argmin(np.abs(LW[ii]-LW0[:]))
if ll == 2 and FLW[ii] == 1: # FLW is the flag for line fitting.
yyl = np.arange(yminzoom+(ymaxzoom-yminzoom)*0.5,yminzoom+(ymaxzoom-yminzoom)*0.65, 0.01)
xxl = yyl * 0 + LW0[ll]
ax2t.errorbar(xxl, yyl, lw=0.5, color=lcb, zorder=20, alpha=1., label='', capsize=0)
ax2t.text(xxl[0]-130, yyl[0]*1.28, '%s'%(LN0[ll]), color=lcb, fontsize=9, rotation=90)
elif (ll == 9 and FLW[ii] == 1):
yyl = np.arange(yminzoom+(ymaxzoom-yminzoom)*0.5,yminzoom+(ymaxzoom-yminzoom)*0.65, 0.01)
xxl = yyl * 0 + LW0[ll]
ax2t.errorbar(xxl, yyl, lw=0.5, color=lcb, zorder=20, alpha=1., label='', capsize=0)
elif (ll == 10 and FLW[ii] == 1):
yyl = np.arange(yminzoom+(ymaxzoom-yminzoom)*0.5,yminzoom+(ymaxzoom-yminzoom)*0.65, 0.01)
xxl = yyl * 0 + LW0[ll]
ax2t.errorbar(xxl, yyl, lw=0.5, color=lcb, zorder=20, alpha=1., label='', capsize=0)
ax2t.text(xxl[0]+40, yyl[0]*0.75, '%s'%(LN0[ll]), color=lcb, fontsize=9, rotation=90)
elif FLW[ii] == 1 and (ll == 6 or ll == 7 or ll == 8):
yyl = np.arange(yminzoom+(ymaxzoom-yminzoom)*0.2,yminzoom+(ymaxzoom-yminzoom)*0.35, 0.01)
xxl = yyl * 0 + LW0[ll]
ax2t.errorbar(xxl, yyl, lw=0.5, color=lcb, zorder=20, alpha=1., label='', capsize=0)
ax2t.text(xxl[0]+40, yyl[0]*0.95, '%s'%(LN0[ll]), color=lcb, fontsize=9, rotation=90)
elif ll == 6 or ll == 7 or ll == 8:
yyl = np.arange(yminzoom+(ymaxzoom-yminzoom)*0.2,yminzoom+(ymaxzoom-yminzoom)*0.35, 0.01)
xxl = yyl * 0 + LW0[ll]
ax2t.errorbar(xxl, yyl, lw=0.5, color='gray', zorder=1, alpha=1., label='', capsize=0)
ax2t.text(xxl[0]+40, yyl[0]*0.95, '%s'%(LN0[ll]), color='gray', fontsize=9, rotation=90)
elif FLW[ii] == 1:
yyl = np.arange(yminzoom+(ymaxzoom-yminzoom)*0.7,yminzoom+(ymaxzoom-yminzoom)*.95, 0.01)
xxl = yyl * 0 + LW0[ll]
ax2t.errorbar(xxl, yyl, lw=0.5, color=lcb, zorder=20, alpha=1., label='', capsize=0)
ax2t.text(xxl[0]+40, yyl[0]*1.25, '%s'%(LN0[ll]), color=lcb, fontsize=9, rotation=90)
except:
pass
# Filters
if f_plot_filter:
ax1 = plot_filter(MB, ax1, ymax, scl=scl_yaxis)
####################
## Save
####################
ax1.legend(loc=1, fontsize=11)
if figpdf:
fig.savefig(MB.DIR_OUT + 'SPEC_' + ID + '_spec.pdf', dpi=dpi)
else:
fig.savefig(MB.DIR_OUT + 'SPEC_' + ID + '_spec.png', dpi=dpi)
def plot_sed_tau(MB, flim=0.01, fil_path='./', scale=1e-19, f_chind=True, figpdf=False, save_sed=True, inputs=False, \
mmax=300, dust_model=0, DIR_TMP='./templates/', f_label=False, f_bbbox=False, verbose=False, f_silence=True, \
f_fill=False, f_fancyplot=False, f_Alog=True, dpi=300, f_plot_filter=True):
'''
Parameters
----------
MB.SNlim : float
SN limit to show flux or up lim in SED.
f_chind : bool
If include non-detection in chi2 calculation, using Sawicki12.
mmax : int
Number of mcmc realization for plot. Not for calculation.
f_fancy : bool
plot each SED component.
f_fill : bool
if True, and so is f_fancy, fill each SED component.
Returns
-------
plots
'''
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp
import matplotlib
import scipy.integrate as integrate
import scipy.special as special
import os.path
from astropy.io import ascii
import time
if f_silence:
import matplotlib
matplotlib.use("Agg")
def gaus(x,a,x0,sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))
lcb = '#4682b4' # line color, blue
fnc = MB.fnc
bfnc = MB.bfnc
ID = MB.ID
Z = MB.Zall
age = MB.age
nage = MB.nage
tau0 = MB.tau0
NUM_COLORS = len(age)
cm = plt.get_cmap('gist_rainbow')
col = [cm(1 - 1.*i/NUM_COLORS) for i in range(NUM_COLORS)]
nstep_plot = 1
if MB.f_bpass:
nstep_plot = 30
SNlim = MB.SNlim
################
# RF colors.
home = os.path.expanduser('~')
c = MB.c
chimax = 1.
m0set = MB.m0set
Mpc_cm = MB.Mpc_cm
d = MB.d * scale
##################
# Fitting Results
##################
DIR_FILT = MB.DIR_FILT
SFILT = MB.filts
try:
f_err = MB.ferr
except:
f_err = 0
###########################
# Open result file
###########################
file = MB.DIR_OUT + 'summary_' + ID + '.fits'
hdul = fits.open(file)
ndim_eff = hdul[0].header['NDIM']
vals = {}
# Redshift MC
zp16 = hdul[1].data['zmc'][0]
zp50 = hdul[1].data['zmc'][1]
zp84 = hdul[1].data['zmc'][2]
vals['zmc'] = zp50
# Stellar mass MC
M16 = hdul[1].data['ms'][0]
M50 = hdul[1].data['ms'][1]
M84 = hdul[1].data['ms'][2]
if verbose:
print('Total stellar mass is %.2e'%(M50))
# Amplitude MC
A50 = np.zeros(len(age), dtype='float')
A16 = np.zeros(len(age), dtype='float')
A84 = np.zeros(len(age), dtype='float')
for aa in range(len(age)):
A16[aa] = 10**hdul[1].data['A'+str(aa)][0]
A50[aa] = 10**hdul[1].data['A'+str(aa)][1]
A84[aa] = 10**hdul[1].data['A'+str(aa)][2]
vals['A'+str(aa)] = np.log10(A50[aa])
Asum = np.sum(A50)
# TAU MC
# AGE MC
TAU50 = np.zeros(len(age), dtype='float')
TAU16 = np.zeros(len(age), dtype='float')
TAU84 = np.zeros(len(age), dtype='float')
AGE50 = np.zeros(len(age), dtype='float')
AGE16 = np.zeros(len(age), dtype='float')
AGE84 = np.zeros(len(age), dtype='float')
for aa in range(len(age)):
TAU16[aa] = 10**hdul[1].data['TAU'+str(aa)][0]
TAU50[aa] = 10**hdul[1].data['TAU'+str(aa)][1]
TAU84[aa] = 10**hdul[1].data['TAU'+str(aa)][2]
AGE16[aa] = 10**hdul[1].data['AGE'+str(aa)][0]
AGE50[aa] = 10**hdul[1].data['AGE'+str(aa)][1]
AGE84[aa] = 10**hdul[1].data['AGE'+str(aa)][2]
vals['TAU'+str(aa)] = np.log10(TAU50[aa])
vals['AGE'+str(aa)] = np.log10(AGE50[aa])
aa = 0
Av16 = hdul[1].data['Av'+str(aa)][0]
Av50 = hdul[1].data['Av'+str(aa)][1]
Av84 = hdul[1].data['Av'+str(aa)][2]
AAv = [Av50]
vals['Av'] = Av50
Z50 = np.zeros(len(age), dtype='float')
Z16 = np.zeros(len(age), dtype='float')
Z84 = np.zeros(len(age), dtype='float')
#NZbest = np.zeros(len(age), dtype='int')
for aa in range(len(age)):
Z16[aa] = hdul[1].data['Z'+str(aa)][0]
Z50[aa] = hdul[1].data['Z'+str(aa)][1]
Z84[aa] = hdul[1].data['Z'+str(aa)][2]
#NZbest[aa]= bfnc.Z2NZ(Z50[aa])
vals['Z'+str(aa)] = Z50[aa]
# Light weighted Z.
ZZ50 = np.sum(Z50*A50)/np.sum(A50)
# FIR Dust;
try:
MD16 = hdul[1].data['MDUST'][0]
MD50 = hdul[1].data['MDUST'][1]
MD84 = hdul[1].data['MDUST'][2]
TD16 = hdul[1].data['TDUST'][0]
TD50 = hdul[1].data['TDUST'][1]
TD84 = hdul[1].data['TDUST'][2]
nTD16 = hdul[1].data['nTDUST'][0]
nTD50 = hdul[1].data['nTDUST'][1]
nTD84 = hdul[1].data['nTDUST'][2]
DFILT = inputs['FIR_FILTER'] # filter band string.
DFILT = [x.strip() for x in DFILT.split(',')]
DFWFILT = fil_fwhm(DFILT, DIR_FILT)
if verbose:
print('Total dust mass is %.2e'%(MD50))
f_dust = True
except:
f_dust = False
chi = hdul[1].data['chi'][0]
chin = hdul[1].data['chi'][1]
fitc = chin
Cz0 = hdul[0].header['Cz0']
Cz1 = hdul[0].header['Cz1']
zbes = zp50
zscl = (1.+zbes)
###############################
# Data taken from
###############################
if MB.f_dust:
MB.dict = MB.read_data(Cz0, Cz1, zbes, add_fir=True)
else:
MB.dict = MB.read_data(Cz0, Cz1, zbes)
NR = MB.dict['NR']
x = MB.dict['x']
fy = MB.dict['fy']
ey = MB.dict['ey']
con0 = (NR<1000)
xg0 = x[con0]
fg0 = fy[con0] #* Cz0
eg0 = ey[con0] #* Cz0
con1 = (NR>=1000) & (NR<10000) #& (fy/ey>SNlim)
xg1 = x[con1]
fg1 = fy[con1] #* Cz1
eg1 = ey[con1] #* Cz1
if len(xg0)>0 or len(xg1)>0:
f_grsm = True
else:
f_grsm = False
# Weight is set to zero for those no data (ey<0).
wht = fy * 0
con_wht = (ey>0)
wht[con_wht] = 1./np.square(ey[con_wht])
# BB data points;
NRbb = MB.dict['NRbb'] #dat[:, 0]
xbb = MB.dict['xbb'] #dat[:, 1]
fybb = MB.dict['fybb'] #dat[:, 2]
eybb = MB.dict['eybb'] #dat[:, 3]
exbb = MB.dict['exbb'] #dat[:, 4]
snbb = fybb/eybb
######################
# Weight by line
######################
wh0 = 1./np.square(eg0)
LW0 = []
model = fg0
wht3 = check_line_man(fy, x, wht, fy, zbes, LW0)
######################
# Mass-to-Light ratio.
######################
af = MB.af
sedpar = af['ML']
try:
isochrone = af['isochrone']
LIBRARY = af['library']
except:
isochrone = ''
LIBRARY = ''
#############
# Plot.
#############
# Set the inset.
if f_grsm or f_dust:
fig = plt.figure(figsize=(7.,3.2))
fig.subplots_adjust(top=0.98, bottom=0.16, left=0.1, right=0.99, hspace=0.15, wspace=0.25)
ax1 = fig.add_subplot(111)
xsize = 0.29
ysize = 0.25
if f_grsm:
ax2t = ax1.inset_axes((1-xsize-0.01,1-ysize-0.01,xsize,ysize))
if f_dust:
ax3t = ax1.inset_axes((0.7,.35,.28,.25))
else:
fig = plt.figure(figsize=(5.5,2.2))
fig.subplots_adjust(top=0.98, bottom=0.16, left=0.1, right=0.99, hspace=0.15, wspace=0.25)
ax1 = fig.add_subplot(111)
#######################################
# D.Kelson like Box for BB photometry
#######################################
#col_dat = 'darkgreen'
#col_dat = 'tomato'
col_dat = 'r'
if f_bbbox:
for ii in range(len(xbb)):
if eybb[ii]<100 and fybb[ii]/eybb[ii]>1:
xx = [xbb[ii]-exbb[ii],xbb[ii]-exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]+exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]-exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]-eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
xx = [xbb[ii]-exbb[ii],xbb[ii]+exbb[ii]]
yy = [(fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d, (fybb[ii]+eybb[ii])*c/np.square(xbb[ii])/d]
ax1.plot(xx, yy, color='k', linestyle='-', linewidth=0.5, zorder=3)
else: # Normal BB plot;
# Detection;
conbb_hs = (fybb/eybb>SNlim)
ax1.errorbar(xbb[conbb_hs], fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d, \
yerr=eybb[conbb_hs]*c/np.square(xbb[conbb_hs])/d, color='k', linestyle='', linewidth=0.5, zorder=4)
ax1.plot(xbb[conbb_hs], fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d, \
marker='.', color=col_dat, linestyle='', linewidth=0, zorder=4, ms=8)#, label='Obs.(BB)')
try:
# For any data removed fron fit (i.e. IRAC excess):
data_ex = ascii.read(DIR_TMP + 'bb_obs_' + ID + '_removed.cat')
NR_ex = data_ex['col1']
except:
NR_ex = []
# Upperlim;
sigma = 1.0
leng = np.max(fybb[conbb_hs] * c / np.square(xbb[conbb_hs]) / d) * 0.05 #0.2
conebb_ls = (fybb/eybb<=SNlim) & (eybb>0)
for ii in range(len(xbb)):
if NR[ii] in NR_ex[:]:
conebb_ls[ii] = False
ax1.errorbar(xbb[conebb_ls], eybb[conebb_ls] * c / np.square(xbb[conebb_ls]) / d * sigma, yerr=leng,\
uplims=eybb[conebb_ls] * c / np.square(xbb[conebb_ls]) / d * sigma, linestyle='',color=col_dat, marker='', ms=4, label='', zorder=4, capsize=3)
# For any data removed fron fit (i.e. IRAC excess):
f_exclude = False
try:
col_ex = 'lawngreen'
#col_ex = 'limegreen'
#col_ex = 'r'
# Currently, this file is made after FILTER_SKIP;
data_ex = ascii.read(DIR_TMP + 'bb_obs_' + ID + '_removed.cat')
x_ex = data_ex['col2']
fy_ex = data_ex['col3']
ey_ex = data_ex['col4']
ex_ex = data_ex['col5']
ax1.errorbar(x_ex, fy_ex * c / np.square(x_ex) / d, \
xerr=ex_ex, yerr=ey_ex*c/np.square(x_ex)/d, color='k', linestyle='', linewidth=0.5, zorder=5)
ax1.scatter(x_ex, fy_ex * c / np.square(x_ex) / d, marker='s', color=col_ex, edgecolor='k', zorder=5, s=30)
f_exclude = True
except:
pass
#####################################
# Open ascii file and stock to array.
MB.lib = fnc.open_spec_fits(fall=0)
MB.lib_all = fnc.open_spec_fits(fall=1)
if f_dust:
DT0 = float(inputs['TDUST_LOW'])
DT1 = float(inputs['TDUST_HIG'])
dDT = float(inputs['TDUST_DEL'])
Temp = np.arange(DT0,DT1,dDT)
MB.lib_dust = fnc.open_spec_dust_fits(fall=0)
MB.lib_dust_all = fnc.open_spec_dust_fits(fall=1)
# FIR dust plot;
if f_dust:
from lmfit import Parameters
par = Parameters()
par.add('MDUST',value=MD50)
par.add('TDUST',value=nTD50)
par.add('zmc',value=zp50)
y0d, x0d = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust_all)
y0d_cut, x0d_cut = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust)
# data;
dat_d = ascii.read(MB.DIR_TMP + 'bb_dust_obs_' + MB.ID + '.cat')
NRbbd = dat_d['col1']
xbbd = dat_d['col2']
fybbd = dat_d['col3']
eybbd = dat_d['col4']
exbbd = dat_d['col5']
snbbd = fybbd/eybbd
try:
conbbd_hs = (fybbd/eybbd>SNlim)
ax1.errorbar(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
yerr=eybbd[conbbd_hs]*c/np.square(xbbd[conbbd_hs])/d, color='k', linestyle='', linewidth=0.5, zorder=4)
ax1.plot(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
'.r', linestyle='', linewidth=0, zorder=4)#, label='Obs.(BB)')
ax3t.plot(xbbd[conbbd_hs], fybbd[conbbd_hs] * c / np.square(xbbd[conbbd_hs]) / d, \
'.r', linestyle='', linewidth=0, zorder=4)#, label='Obs.(BB)')
except:
pass
try:
conebbd_ls = (fybbd/eybbd<=SNlim)
ax1.errorbar(xbbd[conebbd_ls], eybbd[conebbd_ls] * c / np.square(xbbd[conebbd_ls]) / d, \
yerr=fybbd[conebbd_ls]*0+np.max(fybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d)*0.05, \
uplims=eybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d, color='r', linestyle='', linewidth=0.5, zorder=4)
ax3t.errorbar(xbbd[conebbd_ls], eybbd[conebbd_ls] * c / np.square(xbbd[conebbd_ls]) / d, \
yerr=fybbd[conebbd_ls]*0+np.max(fybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d)*0.05, \
uplims=eybbd[conebbd_ls]*c/np.square(xbbd[conebbd_ls])/d, color='r', linestyle='', linewidth=0.5, zorder=4)
except:
pass
#
# This is for UVJ color time evolution.
#
Asum = np.sum(A50[:])
alp = .5
# Get total templates
y0p, x0p = MB.fnc.tmp04(vals, f_val=False, check_bound=False)
y0, x0 = MB.fnc.tmp04(vals, f_val=False, check_bound=False, lib_all=True)
ysum = y0
#f_50_comp = np.zeros((len(age),len(y0)),'float')
f_50_comp = y0[:] * c / np.square(x0) / d
ysump = y0p
nopt = len(ysump)
if f_dust:
ysump[:] += y0d_cut[:nopt]
ysump = np.append(ysump,y0d_cut[nopt:])
f_50_comp_dust = y0d * c / np.square(x0d) / d
# Plot each best fit:
vals_each = vals.copy()
for aa in range(len(age)):
vals_each['A%d'%aa] = -99
for aa in range(len(age)):
vals_each['A%d'%aa] = vals['A%d'%aa]
y0tmp, x0tmp = MB.fnc.tmp04(vals_each, f_val=False, check_bound=False, lib_all=True)
if aa == 0:
y0keep = y0tmp
else:
y0keep += y0tmp
ax1.plot(x0tmp, y0tmp * c / np.square(x0tmp) / d, linestyle='--', lw=0.5, color=col[aa])
vals_each['A%d'%aa] = 0
# Plot best fit;
ax1.plot(x0, f_50_comp, linestyle='-', lw=0.5, color='k')
#############
# Main result
#############
conbb_ymax = (xbb>0) & (fybb>0) & (eybb>0) & (fybb/eybb>1) # (conbb) &
ymax = np.max(fybb[conbb_ymax]*c/np.square(xbb[conbb_ymax])/d) * 1.6
xboxl = 17000
xboxu = 28000
x1max = 22000
if x1max < np.max(xbb[conbb_ymax]):
x1max = np.max(xbb[conbb_ymax]) * 1.5
ax1.set_xlim(2000, 11000)
ax1.set_xscale('log')
if f_plot_filter:
scl_yaxis = 0.2
else:
scl_yaxis = 0.1
ax1.set_ylim(-ymax*scl_yaxis,ymax)
ax1.text(2100,-ymax*0.08,'SNlimit:%.1f'%(SNlim),fontsize=8)
ax1.set_xlabel('Observed wavelength ($\mathrm{\mu m}$)', fontsize=12)
ax1.set_ylabel('Flux ($10^{%d}\mathrm{erg}/\mathrm{s}/\mathrm{cm}^{2}/\mathrm{\AA}$)'%(np.log10(scale)),fontsize=12,labelpad=-2)
xticks = [2500, 5000, 10000, 20000, 40000, 80000, 110000]
xlabels= ['0.25', '0.5', '1', '2', '4', '8', '']
if f_dust:
xticks = [2500, 5000, 10000, 20000, 40000, 80000, 400000]
xlabels= ['0.25', '0.5', '1', '2', '4', '8', '']
ax1.set_xticks(xticks)
ax1.set_xticklabels(xlabels)
dely1 = 0.5
while (ymax-0)/dely1<1:
dely1 /= 2.
while (ymax-0)/dely1>4:
dely1 *= 2.
y1ticks = np.arange(0, ymax, dely1)
ax1.set_yticks(y1ticks)
ax1.set_yticklabels(np.arange(0, ymax, dely1), minor=False)
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax1.yaxis.labelpad = 1.5
xx = np.arange(1200,400000)
yy = xx * 0
ax1.plot(xx, yy, ls='--', lw=0.5, color='k')
#############
# Plot
#############
ms = np.zeros(len(age), dtype='float')
af = MB.af
sedpar = af['ML']
eAAl = np.zeros(len(age),dtype='float')
eAAu = np.zeros(len(age),dtype='float')
eAMl = np.zeros(len(age),dtype='float')
eAMu = np.zeros(len(age),dtype='float')
MSsum = np.sum(ms)
Asum = np.sum(A50)
A50 /= Asum
A16 /= Asum
A84 /= Asum
AM50 = A50 * M50 * ms / MSsum
CM = M50/np.sum(AM50)
AM50 = A50 * M50 * ms / MSsum * CM
AM16 = A16 * M50 * ms / MSsum * CM
AM84 = A84 * M50 * ms / MSsum * CM
AC50 = A50 * 0 # Cumulative
for ii in range(len(A50)):
eAAl[ii] = A50[ii] - A16[ii]
eAAu[ii] = A84[ii] - A50[ii]
eAMl[ii] = AM50[ii] - AM16[ii]
eAMu[ii] = AM84[ii] - AM50[ii]
AC50[ii] = np.sum(AM50[ii:])
################
# Lines
################
LN = ['Mg2', 'Ne5', 'O2', 'Htheta', 'Heta', 'Ne3', 'Hdelta', 'Hgamma', 'Hbeta', 'O3', 'O3', 'Mgb', 'Halpha', 'S2L', 'S2H']
FLW = np.zeros(len(LN),dtype='int')
####################
# For cosmology
####################
DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm
Cons = (4.*np.pi*DL**2/(1.+zbes))
if f_grsm:
print('This function (write_lines) needs to be revised.')
write_lines(ID, zbes, DIR_OUT=MB.DIR_OUT)
##########################
# Zoom in Line regions
##########################
if f_grsm:
conspec = (NR<10000) #& (fy/ey>1)
#ax2t.fill_between(xg1, (fg1-eg1) * c/np.square(xg1)/d, (fg1+eg1) * c/np.square(xg1)/d, lw=0, color='#DF4E00', zorder=10, alpha=0.7, label='')
#ax2t.fill_between(xg0, (fg0-eg0) * c/np.square(xg0)/d, (fg0+eg0) * c/np.square(xg0)/d, lw=0, color='royalblue', zorder=10, alpha=0.2, label='')
ax2t.errorbar(xg1, fg1 * c/np.square(xg1)/d, yerr=eg1 * c/np.square(xg1)/d, lw=0.5, color='#DF4E00', zorder=10, alpha=1., label='', capsize=0)
ax2t.errorbar(xg0, fg0 * c/np.square(xg0)/d, yerr=eg0 * c/np.square(xg0)/d, lw=0.5, linestyle='', color='royalblue', zorder=10, alpha=1., label='', capsize=0)
xgrism = np.concatenate([xg0,xg1])
fgrism = np.concatenate([fg0,fg1])
egrism = np.concatenate([eg0,eg1])
con4000b = (xgrism/zscl>3400) & (xgrism/zscl<3800) & (fgrism>0) & (egrism>0)
con4000r = (xgrism/zscl>4200) & (xgrism/zscl<5000) & (fgrism>0) & (egrism>0)
print('Median SN at 3400-3800 is;', np.median((fgrism/egrism)[con4000b]))
print('Median SN at 4200-5000 is;', np.median((fgrism/egrism)[con4000r]))
#
# From MCMC chain
#
file = MB.DIR_OUT + 'chain_' + ID + '_corner.cpkl'
niter = 0
data = loadcpkl(file)
ndim = data['ndim']
burnin = data['burnin']
nmc = data['niter']
nwalk = data['nwalkers']
Nburn = burnin
res = data['chain'][:]
samples = res
# Saved template;
ytmp = np.zeros((mmax,len(ysum)), dtype='float')
ytmp_each = np.zeros((mmax,len(ysum),len(age)), dtype='float')
ytmpmax = np.zeros(len(ysum), dtype='float')
ytmpmin = np.zeros(len(ysum), dtype='float')
# MUV;
DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm # Luminositydistance in cm
DL10 = Mpc_cm/1e6 * 10 # 10pc in cm
Fuv = np.zeros(mmax, dtype='float') # For Muv
Fuv28 = np.zeros(mmax, dtype='float') # For Fuv(1500-2800)
Lir = np.zeros(mmax, dtype='float') # For L(8-1000um)
UVJ = np.zeros((mmax,4), dtype='float') # For UVJ color;
Cmznu = 10**((48.6+m0set)/(-2.5)) # Conversion from m0_25 to fnu
# From random chain;
alp=0.02
for kk in range(0,mmax,1):
nr = np.random.randint(Nburn, len(samples['A%d'%MB.aamin[0]]))
try:
Av_tmp = samples['Av'][nr]
except:
Av_tmp = MB.AVFIX
vals['Av'] = Av_tmp
try:
zmc = samples['zmc'][nr]
except:
zmc = zbes
vals['zmc'] = zmc
for ss in MB.aamin:
try:
AA_tmp = 10**samples['A'+str(ss)][nr]
except:
AA_tmp = 0
vals['A%d'%ss] = np.log10(AA_tmp)
if ss == 0 or MB.ZEVOL:
try:
ZZtmp = samples['Z%d'%ss][nr]
except:
ZZtmp = MB.ZFIX
vals['Z%d'%ss] = ZZtmp
mod0_tmp, xm_tmp = fnc.tmp04(vals, f_val=False, check_bound=False, lib_all=True)
fm_tmp = mod0_tmp
if False:
# Each;
ytmp_each[kk,:,ss] = mod0_tmp[:] * c / np.square(xm_tmp[:]) / d
#if kk == 100:
# ax1.plot(xm_tmp[:], ytmp_each[kk,:,ss], color=col[ss], linestyle='--')
#
# Dust component;
#
if f_dust:
if kk == 0:
par = Parameters()
par.add('MDUST',value=samples['MDUST'][nr])
try:
par.add('TDUST',value=samples['TDUST'][nr])
except:
par.add('TDUST',value=0)
par['MDUST'].value = samples['MDUST'][nr]
try:
par['TDUST'].value = samples['TDUST'][nr]
except:
par['TDUST'].value = 0
model_dust, x1_dust = fnc.tmp04_dust(par.valuesdict())#, zbes, lib_dust_all)
if kk == 0:
deldt = (x1_dust[1] - x1_dust[0])
x1_tot = np.append(xm_tmp,np.arange(np.max(xm_tmp),np.max(x1_dust),deldt))
# Redefine??
ytmp = np.zeros((mmax,len(x1_tot)), dtype='float')
ytmp_dust = np.zeros((mmax,len(x1_dust)), dtype='float')
ytmp_dust[kk,:] = model_dust * c/np.square(x1_dust)/d
model_tot = np.interp(x1_tot,xm_tmp,fm_tmp) + np.interp(x1_tot,x1_dust,model_dust)
ytmp[kk,:] = model_tot[:] * c/np.square(x1_tot[:])/d
else:
x1_tot = xm_tmp
ytmp[kk,:] = fm_tmp[:] * c / np.square(xm_tmp[:]) / d
# plot random sed;
plot_mc = True
if plot_mc:
ax1.plot(x1_tot, ytmp[kk,:], '-', lw=1, color='gray', zorder=-2, alpha=0.02)
# Grism plot + Fuv flux + LIR.
if f_grsm:
ax2t.plot(x1_tot, ytmp[kk,:], '-', lw=0.5, color='gray', zorder=3., alpha=0.02)
if True:
# Get FUV flux;
Fuv[kk] = get_Fuv(x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)) * (DL**2/(1.+zbes)) / (DL10**2), lmin=1250, lmax=1650)
Fuv28[kk] = get_Fuv(x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)) * (4*np.pi*DL**2/(1.+zbes))*Cmznu, lmin=1500, lmax=2800)
Lir[kk] = 0
# Get UVJ Color;
lmconv,fconv = filconv_fast(MB.filts_rf, MB.band_rf, x1_tot[:]/(1.+zbes), (ytmp[kk,:]/(c/np.square(x1_tot)/d)))
UVJ[kk,0] = -2.5*np.log10(fconv[0]/fconv[2])
UVJ[kk,1] = -2.5*np.log10(fconv[1]/fconv[2])
UVJ[kk,2] = -2.5*np.log10(fconv[2]/fconv[3])
UVJ[kk,3] = -2.5*np.log10(fconv[4]/fconv[3])
# Do stuff...
time.sleep(0.01)
# Update Progress Bar
printProgressBar(kk, mmax, prefix = 'Progress:', suffix = 'Complete', length = 40)
print('')
#
# Plot Median SED;
#
ytmp16 = np.percentile(ytmp[:,:],16,axis=0)
ytmp50 = np.percentile(ytmp[:,:],50,axis=0)
ytmp84 = np.percentile(ytmp[:,:],84,axis=0)
if f_dust:
ytmp_dust50 = np.percentile(ytmp_dust[:,:],50, axis=0)
#if not f_fill:
ax1.fill_between(x1_tot[::nstep_plot], ytmp16[::nstep_plot], ytmp84[::nstep_plot], ls='-', lw=.5, color='gray', zorder=-2, alpha=0.5)
ax1.plot(x1_tot[::nstep_plot], ytmp50[::nstep_plot], '-', lw=.5, color='gray', zorder=-1, alpha=1.)
# Attach the data point in MB;
MB.sed_wave_obs = xbb
MB.sed_flux_obs = fybb * c / np.square(xbb) / d
MB.sed_eflux_obs = eybb * c / np.square(xbb) / d
# Attach the best SED to MB;
MB.sed_wave = x1_tot
MB.sed_flux16 = ytmp16
MB.sed_flux50 = ytmp50
MB.sed_flux84 = ytmp84
#########################
# Calculate non-det chi2
# based on Sawick12
#########################
#chi2,fin_chi2 = get_chi2(fy, ey, wht3, ysump, ndim_eff, SNlim=1.0, f_chind=f_chind, f_exclude=f_exclude, xbb=xbb, x_ex=x_ex)
def func_tmp(xint,eobs,fmodel):
int_tmp = np.exp(-0.5 * ((xint-fmodel)/eobs)**2)
return int_tmp
if f_chind:
conw = (wht3>0) & (ey>0) & (fy/ey>SNlim)
else:
conw = (wht3>0) & (ey>0)
chi2 = sum((np.square(fy-ysump) * np.sqrt(wht3))[conw])
chi_nd = 0.0
if f_chind:
f_ex = np.zeros(len(fy), 'int')
for ii in range(len(fy)):
if f_exclude:
if xbb[ii] in x_ex:
f_ex[ii] = 1
con_up = (ey>0) & (fy/ey<=SNlim) & (f_ex == 0)
from scipy import special
x_erf = (ey[con_up] - ysump[con_up]) / (np.sqrt(2) * ey[con_up])
f_erf = special.erf(x_erf)
chi_nd = np.sum( np.log(np.sqrt(np.pi / 2) * ey[con_up] * (1 + f_erf)) )
# Number of degree;
con_nod = (wht3>0) & (ey>0) #& (fy/ey>SNlim)
if MB.ferr:
ndim_eff -= 1
nod = int(len(wht3[con_nod])-ndim_eff)
if nod>0:
fin_chi2 = (chi2 - 2 * chi_nd) / nod
else:
fin_chi2 = -99
if f_chind:
conw = (wht3>0) & (ey>0) & (fy/ey>SNlim)
con_up = (ey>0) & (fy/ey<=SNlim) & (f_ex == 0)
else:
conw = (wht3>0) & (ey>0)
# Print results;
print('\n')
print('No-of-detection : %d'%(len(wht3[conw])))
print('chi2 : %.2f'%(chi2))
if f_chind:
print('No-of-non-detection: %d'%(len(ey[con_up])))
print('chi2 for non-det : %.2f'%(- 2 * chi_nd))
print('No-of-params : %d'%(ndim_eff))
print('Degrees-of-freedom : %d'%(nod))
print('Final chi2/nu : %.2f'%(fin_chi2))
if False:
from lmfit import Model, Parameters, minimize, fit_report, Minimizer
from .posterior_flexible import Post
class_post = Post(MB)
residual = class_post.residual
MB.set_param()
fit_params = MB.fit_params #Parameters()
for key in vals.keys():
try:
fit_params[key].value=vals[key]
except:
pass
out_tmp = minimize(residual, fit_params, args=(fy, ey, wht3, False), method='differential_evolution') # nelder is the most efficient.
csq = out_tmp.chisqr
rcsq = out_tmp.redchi
print(csq, rcsq)
#
# plot BB model from best template (blue squares)
#
col_dia = 'blue'
if f_dust:
ALLFILT = np.append(SFILT,DFILT)
#for ii in range(len(x1_tot)):
# print(x1_tot[ii], model_tot[ii]*c/np.square(x1_tot[ii])/d)
lbb, fbb, lfwhm = filconv(ALLFILT, x1_tot, ytmp50, DIR_FILT, fw=True)
lbb, fbb16, lfwhm = filconv(ALLFILT, x1_tot, ytmp16, DIR_FILT, fw=True)
lbb, fbb84, lfwhm = filconv(ALLFILT, x1_tot, ytmp84, DIR_FILT, fw=True)
ax1.plot(x1_tot, ytmp50, '--', lw=0.5, color='purple', zorder=-1, label='')
ax3t.plot(x1_tot, ytmp50, '--', lw=0.5, color='purple', zorder=-1, label='')
iix = []
for ii in range(len(fbb)):
iix.append(ii)
con_sed = ()
ax1.scatter(lbb[iix][con_sed], fbb[iix][con_sed], lw=0.5, color='none', edgecolor=col_dia, zorder=3, alpha=1.0, marker='d', s=50)
# plot FIR range;
ax3t.scatter(lbb, fbb, lw=0.5, color='none', edgecolor=col_dia, \
zorder=2, alpha=1.0, marker='d', s=50)
else:
lbb, fbb, lfwhm = filconv(SFILT, x1_tot, ytmp50, DIR_FILT, fw=True, MB=MB, f_regist=False)
lbb, fbb16, lfwhm = filconv(SFILT, x1_tot, ytmp16, DIR_FILT, fw=True, MB=MB, f_regist=False)
lbb, fbb84, lfwhm = filconv(SFILT, x1_tot, ytmp84, DIR_FILT, fw=True, MB=MB, f_regist=False)
iix = []
for ii in range(len(fbb)):
iix.append(np.argmin(np.abs(lbb[ii]-xbb[:])))
con_sed = (eybb>0)
ax1.scatter(lbb[iix][con_sed], fbb[iix][con_sed], lw=0.5, color='none', edgecolor=col_dia, zorder=3, alpha=1.0, marker='d', s=50)
# Calculate EW, if there is excess band;
try:
iix2 = []
for ii in range(len(fy_ex)):
iix2.append(np.argmin(np.abs(lbb[:]-x_ex[ii])))
# Rest-frame EW;
# Note about 16/84 in fbb
EW16 = (fy_ex * c /
|
np.square(x_ex)
|
numpy.square
|
"""
This example uses vor_fast to calculate daily moment diagnostics
for DJFM for a model. It also calculates the modal centroid latitude and aspect ratio for DJFM for the model.
"""
from netCDF4 import Dataset
import numpy as np
import vor_fast
import vor_fast_setup
from scipy import stats
# Read in NetCDF file with geopotential height values 10hPa northern hemisphere
ncin = Dataset('DJFM_data.nc', 'r')
gph = ncin.variables['zg'][:].squeeze()
lons = ncin.variables['lon'][:]
lats = ncin.variables['lat'][:]
days = ncin.variables['time'][:]
ncin.close()
# Set up cartesian mapping xypoints and restrict to NH
gph_nh, lats_nh, xypoints = vor_fast_setup.setup(gph,lats,lons,'NH')
# Set up moment diagnostics
aspect = np.empty(0)
latcent = np.empty(0)
# Calculate diagnostics for each day
# vortex edge should be calculated for each individual model
for iday in range(len(days)):
print('Calculating moments for day '+str(iday))
moments = vor_fast.calc_moments(gph_nh[iday,:,:],lats_nh,lons,xypoints,
hemisphere='NH',field_type='GPH',
edge=3.016e4,resolution='low')
aspect = np.append(aspect, moments['aspect_ratio'])
latcent = np.append(latcent, moments['centroid_latitude'])
np.save('model_aspect.npy',aspect)
np.save('model_centlat.npy',latcent)
#remove nans and inf values from arrays and limit aspect values to <100
aspect=aspect[np.logical_not(np.isnan(aspect))]
aspect=aspect[aspect < float('+inf')]
aspect=aspect[aspect < 100.0]
latcent=latcent[np.logical_not(np.isnan(latcent))]
latcent=latcent[latcent < float('+inf')]
#for latcent, cube the values to transform the distribution
latcent3=np.power(latcent,3)
#fit Gaussian distribution and use KS test for fit
dist=getattr(stats,'norm')
parameters=dist.fit(latcent3)
(mean,SD)=parameters
ks_stat,ks_pval=stats.kstest(latcent3,"norm",parameters)
# convert latitude back (cube root)
mode=np.power(mean,1/3)
print('latcent mode:',mode)
print('norm KS statistic and p value:',ks_stat,ks_pval)
#fit GEV to aspect data
dist2=getattr(stats,'genextreme')
parameters=dist2.fit(aspect)
(shape,location,scale)=parameters
ks_stat_GEV,ks_pval_GEV = stats.kstest(aspect,"genextreme",parameters)
print('location:', location)
print(' GEV KS statistic and p value:',ks_stat_GEV,ks_pval_GEV)
#aspect mode calculated according to Seviour et al. 2016 (S16)
asp_params=stats.genextreme.fit(aspect)
asp_pdf=stats.genextreme.pdf(aspect,asp_params[0],loc=asp_params[1],scale=asp_params[2])
index=
|
np.argmax(asp_pdf)
|
numpy.argmax
|
##NumPy Arrays
import numpy as np
#Instiates a list with fixed value
my_list = [1,2,3]
#Casts List as an array and assign value to var
arr =
|
np.array(my_list)
|
numpy.array
|
import os
import inspect
import csv
import numpy as np
import matplotlib.pyplot as plt
from calc.examples.example_systems import make_big_system
def get_layer_results(version='2'):
filename = '{}/layer-results-{}.txt'.format(os.path.dirname(inspect.stack()[0][1]), version)
with open(filename) as csvfile:
r = csv.reader(csvfile)
n = []
e = []
w_rf = []
for row in r:
vals = [_get_float(x) for x in row]
# print(row)
n.append([vals[0], vals[1]])
e.append([vals[2], vals[3]])
w_rf.append([vals[4], vals[5]])
return np.array(n), np.array(e), np.array(w_rf)
def get_connection_results(version='2'):
filename = '{}/connection-results-{}.txt'.format(os.path.dirname(inspect.stack()[0][1]), version)
with open(filename) as csvfile:
r = csv.reader(csvfile)
b = []
f = []
for row in r:
vals = [_get_float(x) for x in row]
if vals[0] == 0:
f.append([vals[1], vals[2]])
else:
b.append([vals[1], vals[2]])
return
|
np.array(b)
|
numpy.array
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
simulation_angles_distances.py: Create angle vs. distances recovery results.
"""
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
from pylocus.algorithms import procrustes
from pylocus.algorithms import reconstruct_mds
from pylocus.basics_angles import get_theta_tensor
from angle_set import AngleSet
from algorithms import reconstruct_from_angles
from algorithms import reconstruct_theta
from algorithms import solve_constrained_optimization
from simulation_discrepancy import generate_linear_constraints
from simulation_discrepancy import mse
def add_edm_noise(edm, sigma=0.1):
distances = np.sqrt(edm[np.triu(edm) > 0])
noisy_distances = distances + np.random.normal(scale=sigma, size=distances.shape)
noisy_edm = np.empty(edm.shape)
np.fill_diagonal(noisy_edm, 0.0)
noisy_edm[np.triu_indices(N, 1)] = noisy_distances**2
noisy_edm += noisy_edm.T
SNR = np.mean(distances**2) / sigma**2
SNR_dB = 10 *
|
np.log10(SNR)
|
numpy.log10
|
"""
##
Code modified by <NAME>, PhD Candidate, University of Washington
Modified to keep adjacency matrix, i.e. disable stochastic rewiring by Deep R, for better biological plausibility
Modified from https://github.com/IGITUGraz/LSNN-official
with the following copyright message retained from the original code:
##
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import tensorflow as tf
import numpy as np
import numpy.random as rd
import numpy.linalg as la
import matplotlib.pyplot as plt
def balance_matrix_per_neuron(M):
M = M.copy()
n_in, n_out = M.shape
for k in range(n_out):
# Change only non zero synapses to keep as much zeros as possible
e_act = M[:, k] > 0
i_act = M[:, k] < 0
if np.sum(i_act) == 0:
M[:, k] = 0
print(
'Warning: Neuron {} has not incoming synpases from inhibitory neurons. Setting all incoming weights to 0 to avoid un-balanced behaviour.'.format(
k))
if np.sum(e_act) == 0:
M[:, k] = 0
print(
'Warning: Neuron {} has not incoming synpases from excitatory neurons. Setting all incoming weights to 0 to avoid un-balanced behaviour.'.format(
k))
s_e = M[e_act, k].sum()
s_i = M[i_act, k].sum()
# Add a small portion to compensate if the mean is not balanced
if s_e + s_i < 0:
M[e_act, k] += np.abs(s_e + s_i) / np.sum(e_act)
else:
M[i_act, k] -= np.abs(s_e + s_i) / np.sum(i_act)
sum_check = M[:, k].sum()
assert sum_check ** 2 < 1e-5, 'Mismatch of row balancing for neuron {}, sum is {} with on exci {} and inhib {}'.format(
k, sum_check, s_e, s_i)
return M
def max_eigen_value_on_unit_circle(w):
vals = np.abs(la.eig(w)[0])
factor = 1. / np.max(vals)
return w * factor, factor
def random_sparse_signed_matrix(neuron_sign, p=1., balance_zero_mean_per_neuron=True, n_out=None):
'''
Provide a good initialization for a matrix with restricted sign.
This is a personal recipe.
:param neuron_sign:
:param p:
:param balance_zero_mean_per_neuron:
:param n_out:
:return:
'''
E = neuron_sign > 0
I = neuron_sign < 0
n = neuron_sign.__len__()
if n_out is None:
n_out = n
# Random numbers
is_con = rd.rand(n, n) < p
theta = np.abs(rd.randn(n, n))
theta = (2 * is_con - 1) * theta
sign = np.tile(np.expand_dims(neuron_sign, 1), (1, n))
w = lambda theta, sign: (theta) * (theta > 0) * sign
_w = w(theta, sign)
if (np.sum(I) > 0):
# Normalize a first time, but this is obsolete if the stabilization happens also on a single neuron basis
val_E = np.sum(_w[E, :])
val_I = - np.sum(_w[I, :])
assert val_I > 0 and val_E > 0, 'Sign error'
theta[I, :] *= val_E / val_I
_w = w(theta, sign)
if balance_zero_mean_per_neuron:
w_balanced = balance_matrix_per_neuron(_w)
theta[theta > 0] = np.abs(w_balanced[theta > 0])
_w = w(theta, sign)
assert (_w[np.logical_not(is_con)] == 0).all(), 'Balancing the neurons procuded a sign error'
else:
print("Warning: no inhibitory neurons detected, no balancing is performed")
# Normalize to scale the eigenvalues
_, factor = max_eigen_value_on_unit_circle(_w)
theta *= factor
_w = w(theta, sign)
assert (_w[E] >= 0).all(), 'Found negative excitatory weights'
assert (_w[I] <= 0).all(), 'Found negative excitatory weights'
if n_out is None:
return w, sign, theta, is_con
elif n < n_out:
sel = np.random.choice(n, size=n_out)
else:
sel = np.arange(n_out)
theta = theta[:, sel]
sign = sign[:, sel]
is_con = is_con[:, sel]
return w(theta, sign), sign, theta, is_con
def test_random_sparse_signed_matrix():
# Define parameter
p = .33
p_e = .75
mean_E = .4
std_E = 0
n_in = 400
neuron_sign = rd.choice([1, -1], n_in, p=[p_e, 1 - p_e])
M1, M1_sign, M1_theta, M1_is_con = random_sparse_signed_matrix(neuron_sign=neuron_sign, p=p,
balance_zero_mean_per_neuron=True)
s1, _ = la.eig(M1)
assert np.all(np.abs(M1[M1_is_con]) == M1_theta[M1_is_con])
assert np.all(np.sign(M1) == M1_sign * M1_is_con)
assert np.all(M1_is_con == (M1_theta > 0))
M2, _, _, _ = random_sparse_signed_matrix(neuron_sign=neuron_sign, p=1., balance_zero_mean_per_neuron=True)
M2 = M2 * (rd.rand(n_in, n_in) < p)
s2, _ = la.eig(M2)
fig, ax_list = plt.subplots(2)
ax_list[0].set_title('Random sign constrained without neuron specific balance (p={:.3g})'.format(p))
ax_list[1].set_title('Random sign constrained, probability mask taken after scaling')
ax_list[0].scatter(s1.real, s1.imag)
ax_list[1].scatter(s2.real, s2.imag)
c = plt.Circle(xy=(0, 0), radius=1, edgecolor='r', alpha=.5)
ax_list[0].add_artist(c)
c = plt.Circle(xy=(0, 0), radius=1, edgecolor='r', alpha=.5)
ax_list[1].add_artist(c)
for ax in ax_list:
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
plt.show()
def sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(theta_list, ps, upper_bound_check=False):
with tf.name_scope('NBreconnectGenerator'):
theta_vals = [theta.read_value() for theta in theta_list]
# Compute size and probability of connections
nb_possible_connections_list = [tf.cast(tf.size(th), dtype=tf.float32) * p for th, p in zip(theta_list, ps)]
total_possible_connections = tf.reduce_sum(nb_possible_connections_list)
max_total_connections = tf.cast(total_possible_connections, dtype=tf.int32)
sampling_probs = [nb_possible_connections / total_possible_connections \
for nb_possible_connections in nb_possible_connections_list]
def nb_connected(theta_val):
is_con = tf.greater(theta_val, 0)
n_connected = tf.reduce_sum(tf.cast(is_con, tf.int32))
return n_connected
total_connected = tf.reduce_sum([nb_connected(theta) for theta in theta_vals])
if upper_bound_check:
assert_upper_bound_check = tf.Assert(tf.less_equal(total_connected, max_total_connections),
data=[max_total_connections, total_connected],
name='RewiringUpperBoundCheck')
else:
assert_upper_bound_check = tf.Assert(True,
data=[max_total_connections, total_connected],
name='SkippedRewiringUpperBoundCheck')
with tf.control_dependencies([assert_upper_bound_check]):
nb_reconnect = tf.maximum(0, max_total_connections - total_connected)
sample_split = tf.distributions.Categorical(probs=sampling_probs).sample(nb_reconnect)
is_class_i_list = [tf.equal(sample_split, i) for i in range(len(theta_list))]
counts = [tf.reduce_sum(tf.cast(is_class_i, dtype=tf.int32)) for is_class_i in is_class_i_list]
return counts
def compute_gradients_with_rewiring_variables_NP(dEdWi, dEdWr, opt, loss, var_list):
rewiring_w_list = tf.get_collection('Rewiring/Weights')
rewiring_sign_list = tf.get_collection('Rewiring/Signs')
rewiring_var_list = tf.get_collection('Rewiring/Variables')
# generate the two sets of variables
grads_and_vars = opt.compute_gradients(loss, var_list=var_list)
# compute the gradients of rewired variables (disconnected vars have non zero gradients to avoid irregularities for optimizers with momentum)
rewiring_gradient_list = tf.gradients(loss, rewiring_w_list)
rewiring_gradient_list[0] = dEdWi
rewiring_gradient_list[1] = dEdWr
rewiring_gradient_list = [g * s if g is not None else None for g, s in
zip(rewiring_gradient_list, rewiring_sign_list)]
rewiring_gradient_dict = dict([(v, g) for g, v in zip(rewiring_gradient_list, rewiring_var_list)])
# OP to apply all gradient descent updates
gathered_grads_and_vars = []
for (g, v) in grads_and_vars:
if v not in rewiring_var_list:
gathered_grads_and_vars.append((g, v))
else:
gathered_grads_and_vars.append((rewiring_gradient_dict[v], v))
return gathered_grads_and_vars
def compute_gradients_with_rewiring_variables(opt, loss, var_list):
rewiring_w_list = tf.get_collection('Rewiring/Weights')
rewiring_sign_list = tf.get_collection('Rewiring/Signs')
rewiring_var_list = tf.get_collection('Rewiring/Variables')
# generate the two sets of variables
grads_and_vars = opt.compute_gradients(loss, var_list=var_list)
# compute the gradients of rewired variables (disconnected vars have non zero gradients to avoid irregularities for optimizers with momentum)
rewiring_gradient_list = tf.gradients(loss, rewiring_w_list)
rewiring_gradient_list = [g * s if g is not None else None for g, s in
zip(rewiring_gradient_list, rewiring_sign_list)]
rewiring_gradient_dict = dict([(v, g) for g, v in zip(rewiring_gradient_list, rewiring_var_list)])
# OP to apply all gradient descent updates
gathered_grads_and_vars = []
for (g, v) in grads_and_vars:
if v not in rewiring_var_list:
gathered_grads_and_vars.append((g, v))
else:
gathered_grads_and_vars.append((rewiring_gradient_dict[v], v))
return gathered_grads_and_vars
def get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities):
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th.read_value(), 0) for th in rewiring_var_list]
n_connected_list = [tf.reduce_sum(tf.cast(is_pos, dtype=tf.float32)) for is_pos in is_positive_theta_list]
size_list = [tf.size(is_pos) for is_pos in is_positive_theta_list]
init_n_connected_list = [tf.cast(size, dtype=tf.float32) * p for size, p in
zip(size_list, rewiring_connectivities)]
total_connected = tf.reduce_sum(n_connected_list)
limit_connected = tf.reduce_sum(init_n_connected_list)
check_connectivity = tf.Assert(total_connected <= limit_connected, [total_connected, limit_connected],
name='CheckRewiringConnectivityBound')
return check_connectivity
def rewiring_optimizer_wrapper(opt, loss, learning_rate, l1s, temperatures,
rewiring_connectivities, global_step=None,
var_list=None,
grads_and_vars=None):
if var_list is None:
var_list = tf.trainable_variables()
# Select the rewired variable in the given list of variable to train
rewiring_var_list = []
rewiring_con_list = []
for v,c in zip(tf.get_collection('Rewiring/Variables'), tf.get_collection('Rewiring/ini_con')):
if v in var_list:
rewiring_var_list.append(v)
rewiring_con_list.append(c)
if grads_and_vars is None:
grads_and_vars = compute_gradients_with_rewiring_variables(opt, loss, var_list)
else:
grads_and_vars = grads_and_vars
assert len(var_list) == len(grads_and_vars), 'Found {} elements in var_list and {} in grads_and_vars'.format(len(var_list),len(grads_and_vars))
for v, gv in zip(var_list, grads_and_vars):
assert v == gv[1]
if np.isscalar(l1s): l1s = [l1s for _ in range(len(rewiring_var_list))]
if np.isscalar(temperatures): temperatures = [temperatures for _ in range(len(rewiring_var_list))]
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th, 0) for th in rewiring_var_list]
with tf.control_dependencies(is_positive_theta_list):
check_connectivity = get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities)
with tf.control_dependencies([check_connectivity]):
gradient_check_list = [
tf.check_numerics(g, message='Found NaN or Inf in gradients with respect to the variable ' + v.name) for
(g, v) in grads_and_vars]
with tf.control_dependencies(gradient_check_list):
apply_gradients = opt.apply_gradients(grads_and_vars, global_step=global_step)
if len(rewiring_var_list) == 0:
print('Warning: No variable to rewire are found by the rewiring optimizer wrapper')
return apply_gradients
with tf.control_dependencies([apply_gradients]):
# This is to make sure that the algorithms does not reconnect synapses by mistakes,
# This can happen with optimizers like Adam
disconnection_guards = [tf.assign(var, tf.where(is_pos, var, tf.zeros_like(var))) for var, is_pos in
zip(rewiring_var_list, is_positive_theta_list)]
with tf.control_dependencies(disconnection_guards):
rewiring_var_value_list = [th.read_value() for th in rewiring_var_list]
mask_connected = lambda th: tf.cast(tf.greater(th, 0), tf.float32)
noise_update = lambda th: mask_connected(th) * tf.random_normal(shape=tf.shape(th))
apply_regularization = [tf.assign_add(th, - learning_rate * mask_connected(th_) * l1 \
+ tf.sqrt(2 * learning_rate * temp) * noise_update(th_))
for th, th_, l1, temp in
zip(rewiring_var_list, rewiring_var_value_list, l1s, temperatures)]
with tf.control_dependencies(apply_regularization):
number_of_rewired_connections = sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(
rewiring_var_list, rewiring_connectivities)
apply_rewiring = [rewiring(th, ic, nb_reconnect=nb) for th, ic, nb in
zip(rewiring_var_list, rewiring_con_list, number_of_rewired_connections)]
with tf.control_dependencies(apply_rewiring):
train_step = tf.no_op('Train')
return train_step
def rewiring_optimizer_wrapper_NP(dEdWi, dEdWr, opt, loss, learning_rate, l1s, temperatures,
rewiring_connectivities, global_step=None,
var_list=None,
grads_and_vars=None):
if var_list is None:
var_list = tf.trainable_variables()
# Select the rewired variable in the given list of variable to train
rewiring_var_list = []
rewiring_con_list = []
for v,c in zip(tf.get_collection('Rewiring/Variables'), tf.get_collection('Rewiring/ini_con')):
if v in var_list:
rewiring_var_list.append(v)
rewiring_con_list.append(c)
if grads_and_vars is None:
grads_and_vars = compute_gradients_with_rewiring_variables_NP(dEdWi, dEdWr, opt, loss, var_list)
else:
grads_and_vars = grads_and_vars
assert len(var_list) == len(grads_and_vars), 'Found {} elements in var_list and {} in grads_and_vars'.format(len(var_list),len(grads_and_vars))
for v, gv in zip(var_list, grads_and_vars):
assert v == gv[1]
if np.isscalar(l1s): l1s = [l1s for _ in range(len(rewiring_var_list))]
if np.isscalar(temperatures): temperatures = [temperatures for _ in range(len(rewiring_var_list))]
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th, 0) for th in rewiring_var_list]
with tf.control_dependencies(is_positive_theta_list):
check_connectivity = get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities)
with tf.control_dependencies([check_connectivity]):
gradient_check_list = [
tf.check_numerics(g, message='Found NaN or Inf in gradients with respect to the variable ' + v.name) for
(g, v) in grads_and_vars]
with tf.control_dependencies(gradient_check_list):
apply_gradients = opt.apply_gradients(grads_and_vars, global_step=global_step)
if len(rewiring_var_list) == 0:
print('Warning: No variable to rewire are found by the rewiring optimizer wrapper')
return apply_gradients
with tf.control_dependencies([apply_gradients]):
# This is to make sure that the algorithms does not reconnect synapses by mistakes,
# This can happen with optimizers like Adam
disconnection_guards = [tf.assign(var, tf.where(is_pos, var, tf.zeros_like(var))) for var, is_pos in
zip(rewiring_var_list, is_positive_theta_list)]
with tf.control_dependencies(disconnection_guards):
rewiring_var_value_list = [th.read_value() for th in rewiring_var_list]
mask_connected = lambda th: tf.cast(tf.greater(th, 0), tf.float32)
# 0*
noise_update = lambda th: mask_connected(th) * tf.random_normal(shape=tf.shape(th))
apply_regularization = [tf.assign_add(th, - learning_rate * mask_connected(th_) * l1 \
+ tf.sqrt(2 * learning_rate * temp) * noise_update(th_))
for th, th_, l1, temp in
zip(rewiring_var_list, rewiring_var_value_list, l1s, temperatures)]
with tf.control_dependencies(apply_regularization):
number_of_rewired_connections = sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(
rewiring_var_list, rewiring_connectivities)
apply_rewiring = [rewiring(th, ic, nb_reconnect=nb) for th, ic, nb in
zip(rewiring_var_list, rewiring_con_list, number_of_rewired_connections)]
with tf.control_dependencies(apply_rewiring):
train_step = tf.no_op('Train')
return train_step
def weight_sampler(n_in, n_out, p, dtype=tf.float32, neuron_sign=None, w_scale=1., eager=False):
'''
Returns a weight matrix and its underlying, variables, and sign matrices needed for rewiring.
:param n_in:
:param n_out:
:param p0:
:param dtype:
:return:
'''
if eager:
Variable = tf.contrib.eager.Variable
else:
Variable = tf.Variable
with tf.name_scope('SynapticSampler'):
nb_non_zero = int(n_in * n_out * p)
is_con_0 = np.zeros((n_in, n_out), dtype=bool)
ind_in = rd.choice(np.arange(n_in), size=nb_non_zero)
ind_out = rd.choice(np.arange(n_out), size=nb_non_zero)
is_con_0[ind_in, ind_out] = True
# Generate random signs
if neuron_sign is None:
theta_0 = np.abs(rd.randn(n_in, n_out) / np.sqrt(n_in)) # initial weight values
theta_0 = theta_0 * is_con_0
sign_0 = np.sign(
|
rd.randn(n_in, n_out)
|
numpy.random.randn
|
# -*- coding: utf-8 -*-
def plot_levels(imin, m=None, p=0, n=13):
"""plotlevels with logspace
"""
import numpy as np
if not isinstance(imin, (int, float)):
raise ValueError("Require a int or float")
if m is not None and not isinstance(m, (int, float)):
raise ValueError("Require a int or float")
if np.log(np.abs(imin)) < 2 and p == 0:
p += 1
# Centered around 0
if m is None:
values = (-1 * np.round(imin * np.logspace(0, 2, n/2) / 100., p)).tolist() + [0] + np.round(
imin * np.logspace(0, 2, n/2) / 100., p).tolist()
else:
if imin > m:
tmp = imin
imin = m
m = tmp
if imin == 0:
# only Positive
values = np.round(m * np.logspace(0, 2, n) / 100., p).tolist()
elif imin < 0 and m < 0:
# only negative
values = np.round(imin * np.logspace(0, 2, n) / 100., p).tolist()
else:
# positve and negative
values = np.round(imin * np.logspace(0, 2, n/2) / 100., p).tolist() + np.round(
m * np.logspace(0, 2, n/2) / 100., p).tolist()
return np.unique(np.sort(np.asarray(values))).tolist()
def plot_arange(imin, m=None, p=0, n=7):
import numpy as np
if not isinstance(imin, (int, float)):
raise ValueError("Require a int or float")
if m is not None and not isinstance(m, (int, float)):
raise ValueError("Require a int or float")
if np.log(np.abs(imin)) < 2 and p == 0:
p += 1
if m is None:
values = np.linspace(-1 * imin, imin, n)
else:
values = np.linspace(imin, m, n)
values = np.round(values, p)
return np.unique(np.sort(np.asarray(values))).tolist()
def get_info(x):
return x.attrs.get('standard_name', x.name if x.name is not None else 'var') + ' [' + x.attrs.get('units', '1') + ']'
def set_labels(known, **kwargs):
for ikey, ival in kwargs.items():
known.update({ikey: known.get(ikey, ival)})
def line(dates, values, title='', ylabel='', xlabel='', xerr=None, yerr=None, filled=False, minmax=False, ax=None, **kwargs):
"""
Args:
dates (ndarray): datetime
values (ndarray): values
title (str): title
ylabel (str): y-axis label
xlabel (str): x-axis label
xerr (ndarray): x error
yerr (ndarray): y error
filled (bool): fill between error lines
ax (axes): matplotlib axis
**kwargs: optional keyword arguments for plotting
Returns:
axes : matplotlib axis
"""
import matplotlib.pyplot as plt
if ax is None:
f, ax = plt.subplots(figsize=kwargs.get('figsize', None)) # 1D SNHT PLOT
if xerr is None and yerr is None:
ax.plot(dates, values,
ls=kwargs.get('ls', '-'),
lw=kwargs.get('lw', 1),
label=kwargs.get('label', None),
marker=kwargs.get('marker', None),
alpha=kwargs.get('alpha', 1),
color=kwargs.get('color', None),
zorder=kwargs.get('zorder', 1)) # Line Plot
elif filled:
ax.plot(dates, values,
ls=kwargs.get('ls', '-'),
lw=kwargs.get('lw', 1),
label=kwargs.get('label', None),
marker=kwargs.get('marker', None),
# alpha=kwargs.get('alpha', 1),
color=kwargs.get('color', None)) # Line Plot
low, high = lowhigh(dates, values, xerr=xerr, yerr=yerr, minmax=minmax)
if xerr is None:
ax.fill_between(dates, low, high,
alpha=kwargs.get('alpha', 0.5),
color=ax.get_lines()[-1].get_color(),
hatch=kwargs.get('hatch', None),
zorder=-1)
else:
ax.fill_betweenx(values, low, high,
alpha=kwargs.get('alpha', 0.5),
color=ax.get_lines()[-1].get_color(),
hatch=kwargs.get('hatch', None),
zorder=-1)
else:
ax.errorbar(dates, values, xerr=xerr, yerr=yerr,
ls=kwargs.get('ls', '-'),
lw=kwargs.get('lw', 1),
label=kwargs.get('label', None),
marker=kwargs.get('marker', None),
alpha=kwargs.get('alpha', 1),
color=kwargs.get('color', None),
zorder=kwargs.get('zorder', 1)) # Line Plot
ax.grid('gray', ls='--')
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
return ax
def lowhigh(dates, values, xerr=None, yerr=None, minmax=False):
import numpy as np
if xerr is None:
if hasattr(yerr, '__iter__') and len(np.shape(yerr)) == 2:
if minmax:
low = yerr[0]
high = yerr[1]
else:
low = values - yerr[0]
high = values + yerr[1]
else:
low = values - yerr
high = values + yerr
else:
if hasattr(xerr, '__iter__') and len(np.shape(xerr)) == 2:
if minmax:
low = xerr[0]
high = xerr[1]
else:
low = dates - xerr[0]
high = dates + xerr[1]
else:
low = dates - xerr
high = dates + xerr
return low, high
def contour(ax, dates, plevs, test, logy=False, colorlevels=None, yticklabels=None, legend=True,
title='', xlabel='', ylabel='', clabel='', **kwargs):
import numpy as np
import matplotlib.pyplot as plt
if ax is None:
f, ax = plt.subplots(figsize=kwargs.get('figsize', None)) # 1D SNHT PLOT
if kwargs.get('use_pcolormesh', False):
from matplotlib.colors import BoundaryNorm
cmap = plt.get_cmap(kwargs.pop('cmap', 'RdYlBu_r'))
norm = BoundaryNorm(colorlevels, ncolors=cmap.N, clip=True)
cs = ax.pcolormesh(dates, plevs, test.T, cmap=cmap, norm=kwargs.pop('norm', norm),
vmin=kwargs.pop('vmin', None),
vmax=kwargs.pop('vmax', None))
else:
cs = ax.contourf(dates, plevs, test.T, levels=colorlevels,
cmap=kwargs.pop('cmap', 'RdYlBu_r'),
extend=kwargs.get('extend', 'neither'),
vmin=kwargs.pop('vmin', None),
vmax=kwargs.pop('vmax', None),
norm=kwargs.pop('norm', None)
) # hatches=kwargs.pop('hatches', [])
if logy:
ax.set_yscale('log')
# xlim auto range
tmp =
|
np.isfinite(test)
|
numpy.isfinite
|
import unittest
from unittest import mock
import numpy as np
from ..hardware import *
from qupulse.hardware.dacs.alazar import AlazarCard, AlazarProgram
from qupulse.utils.types import TimeType
class AlazarProgramTest(unittest.TestCase):
def setUp(self) -> None:
# we currently allow overlapping masks in AlazarProgram (It will throw an error on upload)
# This probably will change in the future
self.masks = {
'unsorted': (np.array([1., 100, 13]), np.array([10., 999, 81])),
'sorted': (np.array([30., 100, 1300]), np.array([10., 990, 811])),
'overlapping': (np.array([30., 100, 300]), np.array([20., 900, 100]))
}
self.sample_factor = TimeType.from_fraction(10**8, 10**9)
self.expected = {
'unsorted': (np.array([0, 1, 10]).astype(np.uint64), np.array([1, 8, 99]).astype(np.uint64)),
'sorted': (np.array([3, 10, 130]).astype(np.uint64), np.array([1, 99, 81]).astype(np.uint64)),
'overlapping': (np.array([3, 10, 30]).astype(np.uint64), np.array([2, 90, 10]).astype(np.uint64))
}
def test_length_computation(self):
program = AlazarProgram()
for name, data in self.masks.items():
program.set_measurement_mask(name, self.sample_factor, *data)
self.assertEqual(program.total_length, 130 + 81)
self.assertIsNone(program._total_length)
program.total_length = 17
self.assertEqual(program.total_length, 17)
def test_masks(self):
program = AlazarProgram()
for name, data in self.masks.items():
program.set_measurement_mask(name, self.sample_factor, *data)
names = []
def make_mask(name, *data):
np.testing.assert_equal(data, self.expected[name])
assert name not in names
names.append(name)
return name
result = program.masks(make_mask)
self.assertEqual(names, result)
def test_set_measurement_mask(self):
program = AlazarProgram()
begins, lengths = self.masks['sorted']
with self.assertRaises(AssertionError):
program.set_measurement_mask('foo', self.sample_factor, begins.astype(int), lengths)
expected_unsorted =
|
np.array([0, 1, 10])
|
numpy.array
|
import numpy as np
import sys
import random
import os
import time
import argparse
import glob
import matplotlib.pyplot as plt
try:
from mayavi import mlab as mayalab
except:
pass
np.random.seed(2)
# from contact_point_dataset_torch_multi_label import MyDataset
from hang_dataset import MyDataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UTILS_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'utils'))
sys.path.append(UTILS_DIR)
from data_helper import *
from coord_helper import *
from rotation_lib import *
from bullet_helper import *
from s2_utils import *
import pybullet as p
K = 128
def plot_corr(pc_o, pc_h, pose_transl, pose_quat, cp_top_k_idx_o, cp_top_k_idx_h, corr, aa=False):
corr = np.reshape(corr, (K, K))
pc_o_transformed = transform_pc(pc_o, pose_transl, pose_quat, aa=aa)
# plot_pc(pc_h)
# plot_pc(pc_o_transformed)
top_k_corr, top_k_corr_idx = top_k_np(corr, 512, sort=True)
top_k_corr_idx_o = top_k_corr_idx[:, 0]
top_k_corr_idx_h = top_k_corr_idx[:, 1]
# print('top k corr mean', np.mean(top_k_corr), np.max(top_k_corr), np.min(top_k_corr))
# plot_pc_s(pc_o_transformed[cp_top_k_idx_o][top_k_corr_idx_o], top_k_corr)
# plot_pc_s(pc_h[cp_top_k_idx_h][top_k_corr_idx_h], top_k_corr)
# mayalab.show()
plot_pc(pc_h)
plot_pc(pc_o_transformed)
partial_pc_o = pc_o_transformed[cp_top_k_idx_o][top_k_corr_idx_o[:3]]
partial_pc_h = pc_h[cp_top_k_idx_h][top_k_corr_idx_h[:3]]
plot_pc(partial_pc_o, color=(0, 1, 0), scale=0.002)
plot_pc(partial_pc_h, color=(0, 0, 1), scale=0.002)
mayalab.show()
rotation_center = np.mean(partial_pc_h - partial_pc_o, axis=0)
# plot_pc(pc_h)
# plot_pc(pc_o_transformed + rotation_center[np.newaxis, :])
# plot_pc(partial_pc_o + rotation_center[np.newaxis, :], color=(0, 1, 0), scale=0.002)
# plot_pc(partial_pc_h, color=(0, 0, 1), scale=0.002)
# mayalab.show()
# plot_pc(pc_o_transformed[cp_top_k_idx_o][top_k_np(corr[:, 0], 5)[1]], scale=0.002, color=(1, 0, 0))
return rotation_center[:3]
def print_helper(a):
return '{} {} {}'.format(np.mean(a), np.max(a), np.min(a), np.std(a))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
parser.add_argument("--exp_name", default="")
parser.add_argument("--eval_epoch", type=int, default=-1)
parser.add_argument("--eval_ct", type=int, default=-1)
parser.add_argument('--test_list', default='test_list')
parser.add_argument('--n_gt_sample', type=int, default=128)
parser.add_argument('--restrict_object_cat', default='')
args = parser.parse_args()
assert (args.eval_ct != -1) or (args.eval_epoch != -1)
data_dir = os.path.join(args.home_dir_data, 'geo_data')
hook_dict, object_dict = load_all_hooks_objects(data_dir, ret_dict=True)
runs_dir = 'runs/exp_s2b'
p_env = p_Env(args.home_dir_data, gui=True, physics=False)
for i, run_folder_dir in enumerate(glob.glob('{}/*{}'.format(runs_dir, args.exp_name))):
# assert i == 0, run_folder_dir
result_folder = run_folder_dir
if args.eval_ct != -1:
eval_file_dir_arr = glob.glob('{}/eval/*_ct_{}.json'.format(run_folder_dir, args.eval_ct))
elif args.eval_epoch != -1:
eval_file_dir_arr = glob.glob('{}/eval/*eval_epoch_{}_ct_*.json'.format(run_folder_dir, args.eval_epoch))
assert len(eval_file_dir_arr) == 1, eval_file_dir_arr
eval_file_dir = eval_file_dir_arr[0]
eval_result_dict = load_json(eval_file_dir)
for result_file_name in eval_result_dict:
for i, one_result in enumerate(eval_result_dict[result_file_name]):
print(result_file_name, i)
pc_o = np.array(one_result['pc_o'])
pc_h = np.array(one_result['pc_h'])
gt_cp_score_o = np.array(one_result['gt_cp_score_o'])
gt_cp_score_h = np.array(one_result['gt_cp_score_h'])
pred_cp_score_o = np.array(one_result['pred_cp_score_o'])
pred_cp_score_h = np.array(one_result['pred_cp_score_h'])
pred_cp_top_k_idx_o = np.array(one_result['pred_cp_top_k_idx_o'])
pred_cp_top_k_idx_h = np.array(one_result['pred_cp_top_k_idx_h'])
loss_ce = one_result['loss_ce']
loss_listnet = one_result['loss_listnet']
if 'gt_cp_map_per_o' in one_result:
gt_cp_map_per_o = np.array(one_result['gt_cp_map_per_o'])
gt_cp_map_per_h = np.array(one_result['gt_cp_map_per_h'])
_, gt_cp_top_k_idx_o = top_k_np(gt_cp_score_o, k=128)
_, gt_cp_top_k_idx_h = top_k_np(gt_cp_score_h, k=128)
gt_gt_cp_corr = create_gt_cp_corr_preload_discretize(gt_cp_map_per_o[np.newaxis, :], gt_cp_map_per_h[np.newaxis, :], gt_cp_top_k_idx_o[np.newaxis, :], gt_cp_top_k_idx_h[np.newaxis, :], n_gt_sample=args.n_gt_sample)
gt_gt_cp_corr = gt_gt_cp_corr[0]
gt_cp_corr = np.array(one_result['gt_cp_corr'])
pred_cp_corr = np.array(one_result['pred_cp_corr'])
pred_cp_corr_top_k_idx =
|
np.array(one_result['pred_cp_corr_top_k_idx'])
|
numpy.array
|
import numpy as np
import cv2
def calc_hsl_sobelx_mask(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:, :, 1]
s_channel = hls[:, :, 2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
# Absolute x derivative to accentuate lines away from horizontal
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) &
(scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Stack each channel
return s_binary, sxbinary
def calc_roi_mask(image_shape, top_left, top_right, bottom_left, bottom_right):
roi = np.zeros(image_shape, np.uint8)
roi = cv2.fillPoly(
roi, np.array([[top_left, top_right, bottom_right, bottom_left]]), 1)
return roi
def calc_tranform_matrix(image_shape,
src_top_left, src_top_right, src_bottom_left, src_bottom_right, lr_margin):
# src_top_left, .. are (x,y) corrdinates of a box to be mapped in another box
tb_margin = 100
W = cv2.getPerspectiveTransform(np.float32([
src_top_left,
src_top_right,
src_bottom_right,
src_bottom_left]),
np.float32([
(lr_margin, tb_margin),
(image_shape[1] - lr_margin, tb_margin),
(image_shape[1] - lr_margin, image_shape[0]),
(lr_margin, image_shape[0])])
)
return W
def measure_curvature_pixels(left_fit_coeff, right_fit_coeff, y_eval, mx, my):
'''
Calculates the curvature of polynomial functions in pixels.
'''
# y_eval: y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
# Calculation of R_curve (radius of curvature)
# # Define conversions in x and y from pixels space to meters
# ym_per_pix = 30/720 # meters per pixel in y dimension
# xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Once the parabola coefficients are obtained, in pixels, convert them into meters. If the parabola is x= a*(y**2) +b*y+c; and mx and my are the scale for the x and y axis, respectively (in meters/pixel); then the scaled parabola is x= mx / (my ** 2) *a*(y**2)+(mx/my)*b*y+c
denum = np.absolute(2 * left_fit_coeff[0] * mx / (my ** 2))
if denum != 0:
left_curverad = (
(1 + (2 * mx / (my ** 2) * left_fit_coeff[0] * y_eval * my + (mx / my) * left_fit_coeff[1])**2)**1.5) / denum
else:
print('BUG: SHOULD NOT HAPPEN')
left_curverad = 1e6
denum = np.absolute(2 * right_fit_coeff[0] * mx / (my ** 2))
if denum != 0:
right_curverad = (
(1 + (2 * mx / (my ** 2) * right_fit_coeff[0] * y_eval * my + (mx / my) * right_fit_coeff[1])**2)**1.5) / denum
else:
print('BUG: SHOULD NOT HAPPEN')
right_curverad = 1e6
return left_curverad, right_curverad
def plot_lanes_on_road(undist, warped_size, left_fitx, right_fitx, ploty, Winv):
# Create an image to draw the lines on
warp_zero = np.zeros(warped_size).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array(
[np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts =
|
np.hstack((pts_left, pts_right))
|
numpy.hstack
|
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
from scipy import stats
from scipy.signal import periodogram
from statsmodels.api import add_constant, OLS
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from sklearn.decomposition import PCA
def scale(x):
"""
Z-Score.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
scaled = (x - np.nanmean(x)) / np.nanstd(x, ddof=1)
return scaled
def fft_infer_period(x):
"""Fourier inference period.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
References
----------
1. https://github.com/xuawai/AutoPeriod/blob/master/auto_period.ipynb
"""
try:
if isinstance(x, pd.DataFrame):
x = x.values.reshape(-1, )
ft = np.fft.rfft(x)
freqs = np.fft.rfftfreq(len(x), 1)
mags = abs(ft)
inflection = np.diff(np.sign(np.diff(mags)))
peaks = (inflection < 0).nonzero()[0] + 1
peak = peaks[mags[peaks].argmax()]
signal_freq = freqs[peak]
period = int(1 / signal_freq)
except:
period = 2
return {'period': period}
def freq_to_numerical(x, timestamp, freq_mapping_dict=None):
"""Calculates frequency.
Parameters
----------
x: pd.DataFrame, the timestamp.
timestamp: str, timestamp name of x.
freq_mapping_dict, dict, default {'H': 24, 'D': 7, 'W': 54, 'M': 12,
'Q': 4, 'Y': 1, 'A': 1, 'S': 60, 'T': 60}.
"""
x[timestamp] = pd.to_datetime(x[timestamp])
x = x.sort_values([timestamp])
dateindex = pd.DatetimeIndex(x[timestamp])
sfreq = pd.infer_freq(dateindex)
if sfreq is None:
for i in range(len(x)):
sfreq = pd.infer_freq(dateindex[i:i + 3])
if sfreq != None:
break
if freq_mapping_dict is None:
freq_mapping_dict = {
'H': 24,
'D': 7,
'W': 54,
'M': 12,
'Q': 4,
'Y': 1,
'A': 1,
'T': 60,
'S': 60}
nfreq = freq_mapping_dict.get(sfreq[0], np.nan)
return {'nfreq': nfreq}, sfreq
def statistics(x, period: int = 1):
"""
Calculates statistics features, including length, count,
mean, var, min, max, median, range, hmean, iqr.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
x_len = x.shape[0]
x_col = x.shape[1]
x_mean = np.nanmean(x, axis=0)
x_var = np.nanvar(x, ddof=1, axis=0)
x_min = np.nanmin(x, axis=0)
x_max = np.nanmax(x, axis=0)
x_median =
|
np.nanmedian(x, axis=0)
|
numpy.nanmedian
|
##
# The MIT License (MIT)
#
# Copyright (c) 2019 snkas
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##
import numpy as np
import os
import sys
import exputil
def analyze_flow_info(logs_floodns_dir, analysis_folder_dir):
# Read in all the columns
flows_info_csv_columns = exputil.read_csv_direct_in_columns(
logs_floodns_dir + '/flow_info.csv',
"pos_int,pos_int,pos_int,string,pos_int,pos_int,pos_int,pos_float,pos_float,string"
)
flow_id_list = flows_info_csv_columns[0]
source_id_list = flows_info_csv_columns[1]
target_id_list = flows_info_csv_columns[2]
path_list = flows_info_csv_columns[3]
path_length_list = list(map(lambda x: len(x.split(">")) - 1, path_list))
# start_time_list = flows_info_csv_columns[4]
# end_time_list = flows_info_csv_columns[5]
# duration_list = flows_info_csv_columns[6]
# total_sent_list = flows_info_csv_columns[7]
avg_throughput_list = flows_info_csv_columns[8]
# metadata_list = flows_info_csv_columns[9]
# Calculate some statistics
if len(flow_id_list) == 0:
statistics = {
'all_num_flows': len(flow_id_list)
}
else:
statistics = {
'all_num_flows': len(flow_id_list),
'all_flow_num_unique_sources': len(set(source_id_list)),
'all_flow_num_unique_targets': len(set(target_id_list)),
'all_flow_avg_throughput_sum': sum(avg_throughput_list),
'all_flow_avg_throughput_min': np.min(avg_throughput_list),
'all_flow_avg_throughput_0.1th': np.percentile(avg_throughput_list, 0.1),
'all_flow_avg_throughput_1th': np.percentile(avg_throughput_list, 1),
'all_flow_avg_throughput_mean': np.mean(avg_throughput_list),
'all_flow_avg_throughput_median': np.median(avg_throughput_list),
'all_flow_avg_throughput_99th': np.percentile(avg_throughput_list, 99),
'all_flow_avg_throughput_99.9th': np.percentile(avg_throughput_list, 99.9),
'all_flow_avg_throughput_max': np.max(avg_throughput_list),
'all_flow_path_length_min': np.min(path_length_list),
'all_flow_path_length_0.1th': np.percentile(path_length_list, 0.1),
'all_flow_path_length_1th': np.percentile(path_length_list, 1),
'all_flow_path_length_mean': np.mean(path_length_list),
'all_flow_path_length_median': np.median(path_length_list),
'all_flow_path_length_99th': np.percentile(path_length_list, 99),
'all_flow_path_length_99.9th': np.percentile(path_length_list, 99.9),
'all_flow_path_length_max': np.max(path_length_list),
}
# Print results
output_filename = analysis_folder_dir + '/flow_info.statistics'
print('Writing flow statistics: ' + output_filename)
with open(output_filename, 'w+') as outfile:
for key, value in sorted(statistics.items()):
outfile.write(str(key) + "=" + str(value) + "\n")
def analyze_connection_info(logs_floodns_dir, analysis_folder_dir):
# Read in all the columns
flows_info_csv_columns = exputil.read_csv_direct_in_columns(
logs_floodns_dir + '/connection_info.csv',
"pos_int,pos_int,pos_int,pos_float,pos_float,string,pos_int,pos_int,pos_int,pos_float,string,string"
)
connection_id_list = flows_info_csv_columns[0]
source_id_list = flows_info_csv_columns[1]
target_id_list = flows_info_csv_columns[2]
# total_size_list = flows_info_csv_columns[3]
# total_sent_list = flows_info_csv_columns[4]
# flows_string_list = flows_info_csv_columns[5]
# num_flows_list = list(map(lambda x: len(x.split(";")), flows_string_list))
# start_time_list = flows_info_csv_columns[6]
# end_time_list = flows_info_csv_columns[7]
duration_list = flows_info_csv_columns[8]
avg_throughput_list = flows_info_csv_columns[9]
completed_string_list = flows_info_csv_columns[10]
completed_list = []
count_completed = 0
count_incomplete = 0
for c in completed_string_list:
if c == "T":
completed_list.append(True)
count_completed += 1
elif c == "F":
completed_list.append(False)
count_incomplete += 1
else:
raise ValueError("Invalid completed value: " + c)
# metadata_list = flows_info_csv_columns[11]
# Calculate some statistics
if len(connection_id_list) == 0:
statistics = {
'all_num_connections': len(connection_id_list),
}
else:
statistics = {
'all_num_connections': len(connection_id_list),
'all_num_connections_completed': count_completed,
'all_num_connections_incomplete': count_incomplete,
'all_num_connections_fraction_completed': float(count_completed) / float(len(connection_id_list)),
'all_connection_num_unique_sources': len(set(source_id_list)),
'all_connection_num_unique_targets': len(set(target_id_list)),
'all_connection_avg_throughput_min': np.min(avg_throughput_list),
'all_connection_avg_throughput_0.1th': np.percentile(avg_throughput_list, 0.1),
'all_connection_avg_throughput_1th': np.percentile(avg_throughput_list, 1),
'all_connection_avg_throughput_mean': np.mean(avg_throughput_list),
'all_connection_avg_throughput_median': np.median(avg_throughput_list),
'all_connection_avg_throughput_99th': np.percentile(avg_throughput_list, 99),
'all_connection_avg_throughput_99.9th': np.percentile(avg_throughput_list, 99.9),
'all_connection_avg_throughput_max': np.max(avg_throughput_list),
'all_connection_avg_throughput_sum': sum(avg_throughput_list),
}
completion_time = []
completion_throughput = []
for i in range(len(connection_id_list)):
if completed_list[i]:
completion_time.append(duration_list[i])
completion_throughput.append(avg_throughput_list[i])
if count_completed > 0:
statistics.update({
'completed_connection_completion_time_min': np.min(completion_time),
'completed_connection_completion_time_0.1th': np.percentile(completion_time, 0.1),
'completed_connection_completion_time_1th': np.percentile(completion_time, 1),
'completed_connection_completion_time_mean': np.mean(completion_time),
'completed_connection_completion_time_median': np.median(completion_time),
'completed_connection_completion_time_99th': np.percentile(completion_time, 99),
'completed_connection_completion_time_99.9th': np.percentile(completion_time, 99.9),
'completed_connection_completion_time_max': np.max(completion_time),
'completed_connection_throughput_min': np.min(completion_throughput),
'completed_connection_throughput_0.1th': np.percentile(completion_throughput, 0.1),
'completed_connection_throughput_1th': np.percentile(completion_throughput, 1),
'completed_connection_throughput_mean': np.mean(completion_throughput),
'completed_connection_throughput_median': np.median(completion_throughput),
'completed_connection_throughput_99th': np.percentile(completion_throughput, 99),
'completed_connection_throughput_99.9th': np.percentile(completion_throughput, 99.9),
'completed_connection_throughput_max': np.max(completion_throughput),
})
# Print raw results
output_filename = analysis_folder_dir + '/connection_info.statistics'
print('Writing connection statistics: %s' % output_filename)
with open(output_filename, 'w+') as outfile:
for key, value in sorted(statistics.items()):
outfile.write(str(key) + "=" + str(value) + "\n")
def analyze_link_info(logs_floodns_dir, analysis_folder_dir):
# Read in all the columns
link_info_csv_columns = exputil.read_csv_direct_in_columns(
logs_floodns_dir + '/link_info.csv',
"pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_float,pos_float,string"
)
link_id_list = link_info_csv_columns[0]
source_id_list = link_info_csv_columns[1]
target_id_list = link_info_csv_columns[2]
# start_time_list = link_info_csv_columns[3]
# end_time_list = link_info_csv_columns[4]
# duration_list = link_info_csv_columns[5]
avg_utilization_list = link_info_csv_columns[6]
# avg_active_flows_list = link_info_csv_columns[7]
# metadata_list = link_info_csv_columns[8]
# Count how many links had utilization of zero
num_link_inactive = 0
num_link_active = 0
for u in avg_utilization_list:
if u == 0:
num_link_inactive += 1
else:
num_link_active += 1
# Calculate some statistics
if len(link_id_list) == 0:
statistics = {
'all_num_links': len(link_id_list),
}
else:
# General statistics
statistics = {
'all_num_links': len(link_id_list),
'all_num_links_active': num_link_active,
'all_num_links_inactive': num_link_inactive,
'all_link_unique_sources': len(set(source_id_list)),
'all_link_unique_targets': len(set(target_id_list)),
'all_link_avg_utilization_min': np.min(avg_utilization_list),
'all_link_avg_utilization_0.1th': np.percentile(avg_utilization_list, 0.1),
'all_link_avg_utilization_1th': np.percentile(avg_utilization_list, 1),
'all_link_avg_utilization_mean': np.mean(avg_utilization_list),
'all_link_avg_utilization_median': np.median(avg_utilization_list),
'all_link_avg_utilization_std': np.std(avg_utilization_list),
'all_link_avg_utilization_99th': np.percentile(avg_utilization_list, 99),
'all_link_avg_utilization_99.9th':
|
np.percentile(avg_utilization_list, 99.9)
|
numpy.percentile
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import wget
import zipfile
import csv
local = 0
download = 0
arquitecture = 1
def get_array_Images(name_path,images,measurements):
lines = []
if download ==1:
with zipfile.ZipFile(name_path+".zip", 'r') as zip_ref:
zip_ref.extractall("./")
print("Finish Unziping")
with open ('./'+ name_path +'/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
for line in lines:
source_path = line[0]
filename = source_path.split("\\")[-1]
current_path = './'+ name_path +'/IMG/' + filename
image=mpimg.imread(current_path)
images.append(image)
measurement = float(line[3])
measurements.append(measurement)
return images, measurements
if download ==1:
wget.download("https://docs.google.com/uc?export=download&id=1KIVaoiAmXkuRSBusvfKfgYZxcXHhg6VP")
wget.download("https://docs.google.com/uc?export=download&id=14yRvt5VkfBjaMFss7IK5Ew12RpBbJTS1")
wget.download("https://docs.google.com/uc?export=download&id=1zBGIyYXSxlbmB8LJrWjavKBQaX_3ctYI")
print("Finish Downloading")
images = []
measurements = []
images,measurements = get_array_Images('first_track_1_lap_forward',images,measurements)
images,measurements = get_array_Images('first_track_1_lap_clockwise',images,measurements)
images,measurements = get_array_Images('first_track_1_lap_forward_2nd',images,measurements)
print("Finish building images array")
X_train = np.array(images)
y_train =
|
np.array(measurements)
|
numpy.array
|
"""Eval CBIR.
Author: gongyou.zyq
Date: 2020.11.25
"""
import os
import pickle
import shutil
import time
import cv2
import numpy as np
class CBIREvaluator():
"""CBIR Evaluator."""
def __init__(self):
self.query_instance_dic = pickle.load(open('GLDv2_search_label_competition_2021.pkl', 'rb'))
self.selected_test_id = list(self.query_instance_dic.keys())
self.result_dic = self.init_result_dic()
self.search_dir = '../input/landmark-retrieval-2021/index/'
self.query_dir = '../input/landmark-retrieval-2021/test/'
self.VERBOSE = False
self.MAP_METRIC = 'retrieval'
self.VIS_FLAG = False
self.VIS_TOPK = 20
self.RANK_GLOBAL_TOPK = [1, 5, 10, 20, 100]
@staticmethod
def init_result_dic():
"""Set empty dic to cache results."""
result_dic = {'gt_num_list': [], 'best_recall_list': [],
'upper_bound_list': [], 'best_thr_list': [],
'proposal_recall_list': [], 'pred_num_list': [],
'cmc_list': [], 'ap_list': [],
'prec_list': [], 'rec_list': [],
'out_prec_list': [], 'out_rec_list': []}
return result_dic
def output_final_result(self):
"""Output final result."""
mean_largest_recall = np.mean(self.result_dic['best_recall_list'])
mean_bound = np.mean(self.result_dic['upper_bound_list'])
mean_thr = np.mean(self.result_dic['best_thr_list'])
mean_gt_num = np.mean(self.result_dic['gt_num_list'])
mean_ap = np.mean(self.result_dic['ap_list'])
mean_proposal_recall = np.mean(self.result_dic['proposal_recall_list'],
axis=0)
mean_pred_num = np.mean(self.result_dic['pred_num_list'])
# np.save('./tests/localizer/average_recall_%s' % \
# self._cfg.EVAL.SIM_MODE, mean_topk_recall)
mean_cmc = np.mean(self.result_dic['cmc_list'], axis=0)
mean_prec = np.mean(self.result_dic['out_prec_list'], axis=0)
mean_rec = np.mean(self.result_dic['out_rec_list'], axis=0)
mean_cmc = np.round(mean_cmc, 4)
mean_prec = np.round(mean_prec, 4)
mean_rec = np.round(mean_rec, 4)
sim_mode = self.MAP_METRIC
cmc_list = self.result_dic['cmc_list']
print(f'----------Final Results for sim_mode: {sim_mode}------------')
print(f'Total valid query num: {len(cmc_list)}')
print('detection metric: ')
print(f'average_gt_num: {mean_gt_num:.1f}, '
f'average pred num: {mean_pred_num:.1f} '
f'largest recall: {mean_largest_recall:.4f}, '
f' average upper bound: {mean_bound:.1f}, '
f'mean_thr: {mean_thr:.4f}')
print(f'ranking metric for global {self.RANK_GLOBAL_TOPK}: ')
print(f'CMC: {mean_cmc}, mAP: {mean_ap:.4f}')
print(f'mean precision: {mean_prec}, mean recall: {mean_rec}')
def log_info(self, info_str):
"""Log verbose info."""
if self.VERBOSE:
print(info_str)
# pylint:disable=too-many-locals
def eval_data(self, all_reid_info):
"""Eval data."""
start_time = time.time()
for query_instance_id in self.selected_test_id:
self.log_info('----------eval query_instance_id: '
f'{query_instance_id}----------')
if len(self.query_instance_dic[query_instance_id]) == 0:
self.log_info('invalid query, skip eval this query')
continue
gt_info = self.load_gt_info(query_instance_id)
gt_num = gt_info['gt_num']
self.result_dic['gt_num_list'].append(gt_num)
if gt_num == 0:
self.log_info('gt_num=0, skip eval this query')
continue
pred_info = self.postprocess_pred_info(query_instance_id,
all_reid_info)
res = self.get_matching_flag(gt_info, pred_info)
[tp_flag, fp_flag, thrs, gt_matched_flag, valid_flag] = res
rec, prec = self.get_pr(tp_flag, fp_flag, gt_num)
if len(rec) == 0:
print('empty pred, put all zeros')
rec = np.array([0.0])
prec, thrs, tp_flag = rec.copy(), rec.copy(), rec.copy()
pad_lenth = 100
if len(rec) < pad_lenth:
# print('pad data')
rec = np.pad(rec, (0, pad_lenth-len(rec)), 'edge')
prec = np.pad(prec, (0, pad_lenth-len(prec)))
thrs = np.pad(thrs, (0, pad_lenth-len(thrs)))
tp_flag = np.pad(tp_flag, (0, pad_lenth-len(tp_flag)))
unmatched_data_list = self.get_unmatched(query_instance_id,
gt_matched_flag)
self.get_det_eval(rec, prec, thrs)
self.get_rank_eval(rec, prec, tp_flag, gt_num)
if self.VERBOSE:
self.output_current_result(query_instance_id, tp_flag,
valid_flag)
if self.VIS_FLAG:
trimmed_pred = [tp_flag, pred_info, valid_flag]
self.vis_retrieval(query_instance_id, unmatched_data_list,
trimmed_pred)
print(f'{time.time() - start_time:.4f} seconds to eval all data')
def output_current_result(self, query_instance_id, tp_flag, valid_flag):
"""Output current result."""
matched_tp_index = np.argwhere(tp_flag > 0).flatten()
print(f'matched tp index: {matched_tp_index}')
sim_mode = self.MAP_METRIC
best_recall = round(self.result_dic['best_recall_list'][-1], 4)
upper_bound = round(self.result_dic['upper_bound_list'][-1], 4)
best_thr = round(self.result_dic['best_thr_list'][-1], 4)
gt_num = self.result_dic['gt_num_list'][-1]
proposal_recall = self.result_dic['proposal_recall_list'][-1]
cmc = self.result_dic['cmc_list'][-1]
average_precision = self.result_dic['ap_list'][-1]
out_prec = self.result_dic['out_prec_list'][-1]
out_rec = self.result_dic['out_rec_list'][-1]
print(f'sim_mode: {sim_mode}, data_shape: {valid_flag.shape}')
print(f'best recall: {best_recall}, upper bound: {upper_bound}, '
f'thr: {best_thr}, gt_num: {gt_num}, '
f'proposal recall: {proposal_recall:.4f}')
print(f'CMC: {cmc}, AP: {average_precision}')
print(f'precision: {out_prec}, recall: {out_rec}')
def load_gt_info(self, query_instance_id):
"""Load gt."""
query_bbox_dic = self.query_instance_dic[query_instance_id]
gt_bbox_dic = {}
gt_matched_flag = {}
gt_num = 0
# query image should always be ignored whatever separate camera or not
ignore_list = [query_bbox_dic['image_name']]
gt_data_list = query_bbox_dic['pos_gallery_list']
separate_cam = False
for gt_data in gt_data_list:
device_id = gt_data['device_id']
image_name = gt_data['image_name']
gt_bbox_dic[image_name] = gt_data['bbox']
if gt_data['ignore']:
ignore_list.append(image_name)
if separate_cam and device_id != query_bbox_dic['device_id'] \
and not gt_data['ignore']:
gt_num += 1
gt_matched_flag[image_name] = 0
if not separate_cam and not gt_data['ignore'] and\
image_name != query_bbox_dic['image_name']:
gt_num += 1
gt_matched_flag[image_name] = 0
if image_name == query_bbox_dic['image_name']:
ignore_list.append(image_name)
if separate_cam and device_id == query_bbox_dic['device_id']:
ignore_list.append(image_name)
gt_info = {'gt_num': gt_num, 'gt_bbox_dic': gt_bbox_dic,
'gt_matched_flag': gt_matched_flag,
'ignore_list': ignore_list}
return gt_info
def load_local_proposal(self, loc_gallery_bbox_dic):
"""Keep topk per large image for eval localizer."""
merged_bboxes = []
merged_sims = []
unique_image_ids = []
repeat_times = []
keep_num = 1
for image_name, loc_pred_for_large in loc_gallery_bbox_dic.items():
loc_pred_for_large = loc_gallery_bbox_dic[image_name]
if len(loc_pred_for_large['sim']) == 0:
continue
indexes = np.argsort(-loc_pred_for_large['sim'])[:keep_num]
merged_bboxes.append(loc_pred_for_large['bbox'][indexes])
merged_sims.append(loc_pred_for_large['sim'][indexes])
repeat_times.append(len(indexes))
unique_image_ids.append(image_name)
merged_bboxes = np.concatenate(merged_bboxes)
merged_sims = np.concatenate(merged_sims)
image_ids = np.repeat(unique_image_ids, repeat_times)
return {'sim': merged_sims,
'bbox': merged_bboxes,
'image_name': image_ids}
def postprocess_pred_info(self, query_instance_id, all_reid_info):
"""Postprocess pred info (How to modify proposal)."""
pred_dic = all_reid_info[query_instance_id]
pred_dic = self.load_local_proposal(pred_dic)
pred_info = self.re_sort(pred_dic)
return pred_info
def re_sort(self, pred_dic):
"""Resort data."""
# Ref: https://zhuanlan.zhihu.com/p/37910324
pred_sim = pred_dic['sim']
pred_bboxes =
|
np.array(pred_dic['bbox'])
|
numpy.array
|
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class Test_cutout(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked(self, mock_inplace):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
mock_inplace.return_value = "foo"
rng = iarandom.RNG(0)
image_aug = iaa.cutout(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="gaussian",
cval=1,
fill_per_channel=0.5,
seed=rng)
assert mock_inplace.call_count == 1
assert image_aug == "foo"
args = mock_inplace.call_args_list[0][0]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(args[1], 10)
assert np.isclose(args[2], 20)
assert np.isclose(args[3], 30)
assert np.isclose(args[4], 40)
assert args[5] == "gaussian"
assert args[6] == 1
assert np.isclose(args[7], 0.5)
assert args[8] is rng
class Test_cutout_(unittest.TestCase):
def test_with_simple_image(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
mask = np.zeros(image.shape, dtype=bool)
mask[20:40, 10:30, :] = True
overlap_inside = np.sum(image_aug[mask] == 0) / np.sum(mask)
overlap_outside = np.sum(image_aug[~mask] > 0) / np.sum(~mask)
assert image_aug is image
assert overlap_inside >= 1.0 - 1e-4
assert overlap_outside >= 1.0 - 1e-4
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_constant_")
def test_fill_mode_constant_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("constant", mock_fill)
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_gaussian_")
def test_fill_mode_gaussian_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("gaussian", mock_fill)
@classmethod
def _test_with_fill_mode_mocked(cls, fill_mode, mock_fill):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
mock_fill.return_value = image
seed = iarandom.RNG(0)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode=fill_mode,
cval=0,
fill_per_channel=False,
seed=seed)
assert mock_fill.call_count == 1
args = mock_fill.call_args_list[0][0]
kwargs = mock_fill.call_args_list[0][1]
assert image_aug is image
assert args[0] is image
assert kwargs["x1"] == 10
assert kwargs["y1"] == 20
assert kwargs["x2"] == 30
assert kwargs["y2"] == 40
assert kwargs["cval"] == 0
assert kwargs["per_channel"] is False
assert kwargs["random_state"] is seed
def test_zero_height(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=20,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_zero_height_width(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=10,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_fully_outside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-50,
y1=150,
x2=-1,
y2=200,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_partially_inside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=-25,
y1=-25,
x2=25,
y2=25,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.all(image_aug[0:25, 0:25] == 0)
assert np.all(image_aug[0:25, 25:] > 0)
assert np.all(image_aug[25:, :] > 0)
def test_zero_sized_axes(self):
shapes = [(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(0, 1, 1),
(1, 1, 0),
(1, 0, 1),
(1, 0),
(0, 1),
(0, 0)]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-5,
y1=-5,
x2=5,
y2=5,
fill_mode="constant",
cval=0)
assert np.array_equal(image_aug, image_cp)
class Test_fill_rectangle_gaussian_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=20,
x2=60,
y2=70,
cval=0,
per_channel=False,
random_state=rng)
assert np.array_equal(image_aug[:20, :],
image_cp[:20, :])
assert not np.array_equal(image_aug[20:70, 10:60],
image_cp[20:70, 10:60])
assert np.isclose(np.average(image_aug[20:70, 10:60]), 127.5,
rtol=0, atol=5.0)
assert np.isclose(np.std(image_aug[20:70, 10:60]), 255.0/2.0/3.0,
rtol=0, atol=2.5)
def test_per_channel(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=False,
random_state=iarandom.RNG(0))
image_aug_pc = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
diff11 = (image_aug[..., 0] != image_aug[..., 1])
diff12 = (image_aug[..., 0] != image_aug[..., 2])
diff21 = (image_aug_pc[..., 0] != image_aug_pc[..., 1])
diff22 = (image_aug_pc[..., 0] != image_aug_pc[..., 2])
assert not np.any(diff11)
assert not np.any(diff12)
assert np.any(diff21)
assert np.any(diff22)
def test_deterministic_with_same_seed(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug_pc1 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc2 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc3 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(1))
assert np.array_equal(image_aug_pc1, image_aug_pc2)
assert not np.array_equal(image_aug_pc2, image_aug_pc3)
def test_no_channels(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = image.reshape((1, 10))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=per_channel,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 3, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(nb_channels=nb_channels,
per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)),
(1, 1, nb_channels))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=10,
x2=300-10,
y2=300-10,
cval=0,
per_channel=per_channel,
random_state=rng)
rect = image_aug[10:-10, 10:-10]
p_true = np.sum(rect) / rect.size
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect, image_cp[10:-10, 10:-10])
assert np.isclose(p_true, 0.5, rtol=0, atol=0.1)
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_int_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = int(max_value) - int(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect,
image_cp[10:-10, 10:-10])
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
min_value = 0.0
center_value = 0.5
max_value = 1.0
dynamic_range = np.float128(max_value) - np.float128(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert np.any(np.isclose(image, min_value,
rtol=0, atol=1e-4))
assert np.any(np.isclose(image, max_value,
rtol=0, atol=1e-4))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
assert not np.allclose(rect, image_cp[10:-10, 10:-10],
rtol=0, atol=1e-4)
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.allclose(image_aug[..., 0],
image_aug[..., c],
rtol=0, atol=1e-4)
class Test_fill_rectangle_constant_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_but_per_channel_is_false(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 25)
def test_iterable_cval_with_per_channel_is_true_channel_mismatch(self):
image = np.mod(np.arange(100*100*5), 256).astype(np.uint8).reshape(
(100, 100, 5))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
assert np.all(image_aug[20:70, 10:60, 3] == 21)
assert np.all(image_aug[20:70, 10:60, 4] == 17)
def test_single_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 17)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
def test_no_channels_single_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=per_channel, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_no_channels_iterable_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100*nb_channels), 256
).astype(np.uint8).reshape((100, 100, nb_channels))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
if per_channel:
for c in np.arange(nb_channels):
val = 17 if c % 2 == 0 else 21
assert np.all(image_aug[20:70, 10:60, c] == val)
else:
assert np.all(image_aug[20:70, 10:60, :] == 17)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[0, 1], per_channel=per_channel,
random_state=None)
rect = image_aug[10:-10, 10:-10]
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0] == 0)
assert np.all(image_aug[10:-10, 10:-10, 1] == 1)
assert np.all(image_aug[10:-10, 10:-10, 2] == 0)
else:
assert np.all(image_aug[20:70, 10:60] == 0)
def test_other_dtypes_uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0]
== min_value)
assert np.all(image_aug[10:-10, 10:-10, 1]
== 10)
assert np.all(image_aug[10:-10, 10:-10, 2]
== max_value)
else:
assert np.all(image_aug[-10:-10, 10:-10] == min_value)
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
# Use this here instead of any(isclose(...)) because
# the latter one leads to overflow warnings.
assert image.flat[0] <= np.float128(min_value) + 1.0
assert image.flat[4] >= np.float128(max_value) - 1.0
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert image_aug.dtype.name == dtype
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
if per_channel:
assert np.allclose(image_aug[10:-10, 10:-10, 0],
np.float128(min_value),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 1],
np.float128(10),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 2],
np.float128(max_value),
rtol=0, atol=1e-4)
else:
assert np.allclose(image_aug[-10:-10, 10:-10],
np.float128(min_value),
rtol=0, atol=1e-4)
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image =
|
np.zeros(shape, dtype=np.uint8)
|
numpy.zeros
|
'''
Automatic and Tunable Artifact Removal (ATAR) algorithm
----------------------------------------------------------
Author @ <NAME>
updated on Date: 26 Sep 2021
Version : 0.0.4
Github : https://github.com/Nikeshbajaj/spkit
Contact: <EMAIL> | <EMAIL>
For more details, check this:
Bajaj, Nikesh, et al. "Automatic and tunable algorithm for EEG artifact removal using wavelet decomposition with applications in
predictive modeling during auditory tasks." Biomedical Signal Processing and Control 55 (2020): 101624.
'''
from __future__ import absolute_import, division, print_function
name = "Signal Processing toolkit | EEG | ATAR Algorith"
import sys
if sys.version_info[:2] < (3, 3):
old_print = print
def print(*args, **kwargs):
flush = kwargs.pop('flush', False)
old_print(*args, **kwargs)
if flush:
file = kwargs.get('file', sys.stdout)
# Why might file=None? IDK, but it works for print(i, file=None)
file.flush() if file is not None else sys.stdout.flush()
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import butter, lfilter#, convolve, boxcar
from joblib import Parallel, delayed
from scipy import stats
import pywt as wt
def SoftThresholding(w,theta_a,theta_g):
w1 = w.copy()
if theta_g>=theta_a:
print('thresholds are not satisfying t2>t1')
print('Correcting: with default setting theta_g = 0.8*theta_a')
theta_g = theta_a*0.8
alpha = -(1.0/theta_g)*np.log((theta_a-theta_g)/(theta_a+theta_g))
#to avoid +inf value... np.exp(710)-->inf
w1[-(alpha*w1)>709]=708.0/(-alpha)
w2 = (1-np.exp(-alpha*w1))/(1+np.exp(-alpha*w1))*theta_a
w2[np.abs(w)<theta_g]=w[np.abs(w)<theta_g]
return w2
def LinearAttenuanating(w,theta_a,theta_b):
w1 = w.copy()
w1 = np.sign(w1)*theta_a*(1 - (np.abs(w1)-theta_a)/(theta_b - theta_a))
w1[abs(w)<=theta_a]=w[abs(w)<=theta_a]
w1[abs(w)>theta_b]=0
return w1
def Elimination(w,theta_a):
w1 = w.copy()
w1[abs(w1)>theta_a]=0
return w1
def Outliers(WR):
#IQR = stats.iqr(WR)
#Q1 = np.median(WR)-IQR/2.0
#Q3 = np.median(WR)+IQR/2.0
Q1 = np.quantile(WR,0.25)
Q3 = np.quantile(WR,0.75)
IQR = Q3-Q1
ll = Q1-1.5*IQR
ul = Q3+1.5*IQR
return ll,ul
def ipr2thr(r,beta=0.1,k1=None,k2=100,c=100):
'''
theta_a = k2*np.exp(-beta*c*r/(2.0*k2)) for c=100
'''
theta_a = k2*np.exp(-beta*c*r/(2.0*k2))
if k1 is not None and theta_a<k1:
theta_a = k1
return theta_a
def Wfilter(x,wv='db3',thr_method='ipr',IPR=[25,75],beta=0.1,k1=10,k2=100,theta_a=np.inf,est_wmax=100,
bf=2,gf=0.8,OptMode ='soft',factor=1.0,show_plot=False,wpd_mode='symmetric',wpd_maxlevel=None,
WPD=True,packetwise=False,lvl=[],fs=128.0):
'''
Wavelet filtering using ATAR Algorithm
for more details, check:
Bajaj, Nikesh, et al. "Automatic and tunable algorithm for EEG artifact removal using wavelet decomposition with applications in
predictive modeling during auditory tasks." Biomedical Signal Processing and Control 55 (2020): 101624.
------------------
input
-----
x: single channel EEG signal shape - (n,)
Threshold Computation method:
thr_method : None (default), 'ipr'
: provided with theta_a, bf , gf
: where:-
: theta_b = bf*theta_a -- used for Linear Attenuation
: theta_g = gf*theta_a -- used for Soft thresholding
Operating modes:
OptMode = ['soft','elim','linAtten']
: default 'soft'
Wavelet Decomposition modes:
wpd_mode = ['zero', 'constant', 'symmetric', 'periodic', 'smooth', 'periodization']
default 'symmetric'
Wavelet family:
wv = ['db3'.....'db38', 'sym2.....sym20', 'coif1.....coif17', 'bior1.1....bior6.8', 'rbio1.1...rbio6.8', 'dmey']
:'db3'(default)
IPR: Inter-percentile range - [25,75] is interquartile range, a special case of IPR
output
------
xR: Corrected signal of same shape (n,)
'''
verbose=False
if verbose:
print('WPD:',WPD,' wv:',wv,' IPR:',IPR,' beta:',beta,' method:',method,' OptMode:',OptMode)
print('k1-k2:',[k1,k2])
if WPD: # Wavelet Packet Decomposition
wp = wt.WaveletPacket(x, wavelet=wv, mode=wpd_mode,maxlevel=wpd_maxlevel)
wr = [wp[node.path].data for node in wp.get_level(wp.maxlevel, 'natural') ]
WR = np.hstack(wr)
nodes = [node for node in wp.get_level(wp.maxlevel, 'natural')]
else: # Wavelet Transform
wr = wt.wavedec(x,wavelet=wv, mode=wpd_mode,level=wpd_maxlevel)
WR = np.hstack(wr)
nodes = np.arange(len(wr))
if not(packetwise):
if thr_method=='ipr':
if k2 is None: k2=100
r = stats.iqr(WR,rng=IPR)
theta_a = ipr2thr(r,beta=beta,k1=k1,k2=k2,c=est_wmax)
elif thr_method is not None:
print('Method for computing threshold is not defined')
theta_b = bf*theta_a
theta_g = gf*theta_a
removList=[]
for i in range(len(nodes)):
#for node in wp.get_level(wp.maxlevel, 'natural'):
c = wp[nodes[i].path].data if WPD else wr[i]
if len(lvl)==0 or i in lvl:
if packetwise:
if thr_method=='ipr':
if k2 is None: k2=100
r = stats.iqr(c,rng=IPR)
theta_a = ipr2thr(r,beta=beta,k1=k1,k2=k2,c=est_wmax)
elif thr_method is not None:
print('Method for computing threshold is not defined')
theta_b = bf*theta_a
theta_g = gf*theta_a
if OptMode=='soft':
c = SoftThresholding(c,theta_a,theta_g)
elif OptMode=='linAtten':
c = LinearAttenuanating(c,theta_a,theta_b)
elif OptMode=='elim':
c = Elimination(c,theta_a)
else:
print('Operating mode was not in list..\n No wavelet filtering is applied')
pass
if WPD:
wp[nodes[i].path].data = c
else:
wr[i] = c
#Reconstruction
if WPD:
xR = wp.reconstruct(update=False)
else:
xR = wt.waverec(wr, wavelet = wv)
if show_plot:
plt.figure(figsize=(11,6))
plt.subplot(211)
plt.plot(WR,'b',alpha=0.8,label='Coef.')
plt.ylabel('Wavelete Coefficients')
ytiW =[np.min(WR),np.max(WR)]
#print('maxlevel :',wp.maxlevel)
if WPD: wr = [wp[node.path].data for node in wp.get_level(wp.maxlevel, 'natural') ]
WRi = np.hstack(wr)
plt.plot(WRi,'r',alpha=0.9,label='Filtered Coff.')
ytiW = ytiW+[np.min(WRi),np.max(WRi)]
plt.yticks(ytiW)
plt.grid()
plt.legend()
plt.xlim([0,len(WRi)])
plt.subplot(212)
if WPD:
t = np.arange(len(wp.data))/fs
plt.plot(t,wp.data,'b',alpha=0.8,label='signal')
else:
t = np.arange(len(x))/fs
plt.plot(t,x,'b',alpha=0.8,label='signal')
plt.plot(t,xR,'r',alpha=0.8,label='corrected')
plt.ylabel('Signal')
plt.yticks([np.min(xR),np.min(x),0,np.max(xR),np.max(x)])
plt.xlim([t[0],t[-1]])
plt.legend()
plt.grid()
plt.show()
return xR
def ATAR_1Ch(x,wv='db3',winsize=128,thr_method='ipr',IPR=[25,75],beta=0.1,k1=None,k2 =100,est_wmax=100,
theta_a=np.inf,bf=2,gf=0.8,OptMode ='soft',factor=1.0,wpd_mode='symmetric',wpd_maxlevel=None,
verbose=False, window=['hamming',True],hopesize=None, ReconMethod='custom',packetwise=False,WPD=True,lvl=[],fs=128.0):
'''
''
ATAR: - Automatic and Tunable Artifact Removal Algorithm
========================================================
Apply ATAR on short windows of signal (single channel):
Signal is decomposed in smaller overlapping windows and reconstructed after correcting using overlap-add method.
----
for more details, check:
Ref: <NAME>, et al. "Automatic and tunable algorithm for EEG artifact removal using wavelet decomposition with applications in predictive modeling during auditory tasks." Biomedical Signal Processing and Control 55 (2020): 101624.
------------------
Wfilter(x,wv='db3',method=None,IPR=[25,75],beta=0.1,k1=None,k2 =100,
theta_a=np.inf,bf=2,gf=0.8,OptMode ='soft',factor=1.0,showPlot=False,wpd_mode='symmetric',wpd_maxlevel=None)
input
-----
X: input single-channel signal of shape (n,)
Threshold Computation method:
method : None (default), 'ipr'
: provided with theta_a, bf , gf
: theta_b = bf*theta_a
: theta_g = gf*theta_a
Operating modes:
OptMode = ['soft','elim','linAtten']
: default 'soft'
: use 'elim' with global
Wavelet Decomposition modes:
wpd_mode = ['zero', 'constant', 'symmetric', 'periodic', 'smooth', 'periodization']
default 'symmetric'
Wavelet family:
wv = ['db3'.....'db38', 'sym2.....sym20', 'coif1.....coif17', 'bior1.1....bior6.8', 'rbio1.1...rbio6.8', 'dmey']
:'db3'(default)
Reconstruction Methon
ReconMethod : None, 'custom', 'HamWin'
for 'custom': window[0] is used and applied after denoising is window[1] is True else
windowing applied before denoising
output
------
XR: corrected signal of same shape as input X
'''
if ReconMethod is None:
win=np.arange(winsize)
xR=[]
pf=0
while win[-1]<=x.shape[0]:
if verbose:
if 100*win[-1]/float(x.shape[0])>=pf+1:
pf = 100*win[-1]/float(x.shape[0])
pbar = '|'+'#'*int(pf)+' '*(99-int(pf))+'|'
print(str(np.round(pf,2))+'%'+pbar,end='\r', flush=True)
xi = x[win]
xr = Wfilter(x,wv=wv,thr_method=thr_method,IPR=IPR,beta=beta,k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,
OptMode =OptMode,show_plot=False,wpd_mode=wpd_mode,wpd_maxlevel=wpd_maxlevel,
packetwise=False,WPD=WPD,lvl=lvl,fs=fs)
xR.append(xr)
win+=winsize
xR = np.hstack(xR)
elif ReconMethod =='HamWin':
xt = np.hstack([np.zeros(winsize//2),x,np.zeros(winsize//2)])
xR = np.zeros(xt.shape)
wh = signal.windows.hamming(winsize+1)[:winsize]
win = np.arange(winsize)
while win[-1]<=xt.shape[0]:
if verbose:
pf = 100*win[-1]/float(x.shape[0])
pbar = '|'+'#'*int(pf)+' '*(99-int(pf))+'|'
print(str(np.round(pf,2))+'%'+pbar,end='\r', flush=True)
xi = xt[win]*wh
xr = Wfilter(xi,wv=wv,thr_method=thr_method,IPR=IPR,beta=beta,k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,
OptMode =OptMode,factor=factor,show_plot=False,wpd_mode=wpd_mode,wpd_maxlevel=wpd_maxlevel,
packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs)
xR[win]+= xr
win+=winsize//2
xR = xR/1.08
xR = xR[winsize//2:-winsize//2]
elif ReconMethod =='custom':
if hopesize is None: hopesize = winsize//2
M = winsize
H = hopesize
hM1 = (M+1)//2
hM2 = M//2
xt = np.hstack([np.zeros(hM2),x,np.zeros(hM1)])
pin = hM1
pend = xt.size-hM1
wh = signal.get_window(window[0],M)
#if len(window)>1: AfterApply = window[1]
#else: AfterApply = False
AfterApply = window[1] if len(window)>1 else False
if verbose: print('Windowing after apply : ',AfterApply)
xR = np.zeros(xt.shape)
pf=0
while pin<=pend:
if verbose:
if 100*pin/float(pend)>=pf+1:
pf = 100*pin/float(pend)
pbar = '|'+'#'*int(pf)+' '*(99-int(pf))+'|'
print(str(np.round(pf,2))+'%'+pbar,end='\r', flush=True)
xi = xt[pin-hM1:pin+hM2]
if not(AfterApply): xi *=wh
xr = Wfilter(xi,wv=wv,thr_method=thr_method,IPR=IPR,beta=beta,k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,
OptMode =OptMode,factor=factor,show_plot=False,wpd_mode=wpd_mode,wpd_maxlevel=wpd_maxlevel,
packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs)
if AfterApply: xr *=wh
xR[pin-hM1:pin+hM2] += H*xr ## Overlap Add method
pin += H
xR = xR[hM2:-hM1]/sum(wh)
if verbose:
pf = 100
pbar = '|'+'#'*int(pf)+' '*(99-int(pf))+'|'
print(str(np.round(pf,2))+'%'+pbar,end='\r', flush=True)
print('\n')
return xR
def ATAR_mCh(X,wv='db3',winsize=128,thr_method ='ipr',IPR=[25,75],beta=0.1,k1=10,k2 =100,est_wmax=100,
theta_a=np.inf,bf=2,gf=0.8,OptMode ='soft',wpd_mode='symmetric',wpd_maxlevel=None,factor=1.0,
verbose=False, window=['hamming',True],hopesize=None, ReconMethod='custom',packetwise=False,WPD=True,lvl=[],fs=128.0,use_joblib=False):
'''
.
ATAR: - Automatic and Tunable Artifact Removal Algorithm
========================================================
Apply ATAR on short windows of signal (multiple channels:):
Signal is decomposed in smaller overlapping windows and reconstructed after correcting using overlap-add method.
------
for more details, check:
Ref: Bajaj, Nikesh, et al. "Automatic and tunable algorithm for EEG artifact removal using wavelet decomposition with applications in predictive modeling during auditory tasks." Biomedical Signal Processing and Control 55 (2020): 101624.
----------------
input
-----
X: input multi-channel signal of shape (n,ch)
Wavelet family:
wv = ['db3'.....'db38', 'sym2.....sym20', 'coif1.....coif17', 'bior1.1....bior6.8', 'rbio1.1...rbio6.8', 'dmey']
:'db3'(default)
Threshold Computation method:
thr_method : None (default), 'ipr'
: None: fixed threshold theta_a is applied
: ipr : applied with theta_a, bf , gf, beta, k1, k2 and OptMode
: theta_b = bf*theta_a
: theta_g = gf*theta_a
Operating modes:
OptMode = ['soft','elim','linAtten']
: default 'soft'
: use 'elim' with globalgood
Wavelet Decomposition modes:
wpd_mode = ['zero', 'constant', 'symmetric', 'periodic', 'smooth', 'periodization']
default 'symmetric'
Reconstruction Method - Overlap-Add method
ReconMethod : None, 'custom', 'HamWin'
for 'custom': window[0] is used and applied after denoising is window[1] is True else
windowing applied before denoising
output
------
XR: corrected signal of same shape as input X
'''
if hopesize is None: hopesize=winsize//2
assert thr_method in [ None, 'ipr']
assert OptMode in ['soft','linAtten','elim']
if verbose:
print('WPD Artifact Removal')
print('WPD:',WPD,' Wavelet:',wv,', Method:',thr_method,', OptMode:',OptMode)
if thr_method=='ipr': print('IPR=',IPR,', Beta:',beta, ', [k1,k2]=',[k1,k2])
if thr_method is None: print('theta_a: ',theta_a)
print('Reconstruction Method:',ReconMethod, ', Window:',window,', (Win,Overlap)=',(winsize,hopesize))
if len(X.shape)>1:
if use_joblib:
XR = np.array(Parallel(n_jobs=-1)(delayed(ATAR_1Ch)(X[:,i],wv=wv,winsize=winsize, thr_method=thr_method, IPR=IPR,
beta=beta, k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,OptMode=OptMode, factor=factor, wpd_mode=wpd_mode,
wpd_maxlevel=wpd_maxlevel,verbose=verbose, window=window,hopesize=hopesize,
ReconMethod=ReconMethod,packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs) for i in range(X.shape[1]))).T
else:
XR =np.array([ATAR_1Ch(X[:,i],wv=wv,winsize=winsize, thr_method=thr_method, IPR=IPR,
beta=beta, k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,OptMode=OptMode, factor=factor, wpd_mode=wpd_mode,
wpd_maxlevel=wpd_maxlevel,verbose=0, window=window,hopesize=hopesize,
ReconMethod=ReconMethod,packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs) for i in range(X.shape[1])]).T
else:
XR =ATAR_1Ch(X,wv=wv,winsize=winsize, thr_method=thr_method, IPR=IPR,
beta=beta, k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,OptMode=OptMode, factor=factor, wpd_mode=wpd_mode,
wpd_maxlevel=wpd_maxlevel, verbose=verbose, window=window,hopesize=hopesize,
ReconMethod=ReconMethod,packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs)
return XR
def ATAR(X,wv='db3',winsize=128,thr_method ='ipr',IPR=[25,75],beta=0.1,k1=10,k2 =100,est_wmax=100,
theta_a=np.inf,bf=2,gf=0.8,OptMode ='soft',wpd_mode='symmetric',wpd_maxlevel=None,factor=1.0,
verbose=False, window=['hamming',True],hopesize=None, ReconMethod='custom',packetwise=False,WPD=True,lvl=[],fs=128.0,use_joblib=False):
'''
.
ATAR: - Automatic and Tunable Artifact Removal Algorithm
========================================================
Apply ATAR on short windows of signal (multiple channels - if provided on axis 1:):
Signal is decomposed in smaller overlapping windows and reconstructed after correcting using overlap-add method.
------
for more details, check:
Ref: Bajaj, Nikesh, et al. "Automatic and tunable algorithm for EEG artifact removal using wavelet decomposition with applications in predictive modeling during auditory tasks." Biomedical Signal Processing and Control 55 (2020): 101624.
----------------
input
-----
X: input multi-channel signal of shape (n,ch)
Wavelet family:
wv = ['db3'.....'db38', 'sym2.....sym20', 'coif1.....coif17', 'bior1.1....bior6.8', 'rbio1.1...rbio6.8', 'dmey']
:'db3'(default)
Threshold Computation method:
thr_method : None (default), 'ipr'
: None: fixed threshold theta_a is applied
: ipr : applied with theta_a, bf , gf, beta, k1, k2 and OptMode
: theta_b = bf*theta_a
: theta_g = gf*theta_a
Operating modes:
OptMode = ['soft','elim','linAtten']
: default 'soft'
: use 'elim' with globalgood
Wavelet Decomposition modes:
wpd_mode = ['zero', 'constant', 'symmetric', 'periodic', 'smooth', 'periodization']
default 'symmetric'
Reconstruction Method - Overlap-Add method
ReconMethod : None, 'custom', 'HamWin'
for 'custom': window[0] is used and applied after denoising is window[1] is True else
windowing applied before denoising
output
------
XR: corrected signal of same shape as input X
'''
if hopesize is None: hopesize=winsize//2
assert thr_method in [ None, 'ipr']
assert OptMode in ['soft','linAtten','elim']
if verbose:
print('WPD Artifact Removal')
print('WPD:',WPD,' Wavelet:',wv,', Method:',thr_method,', OptMode:',OptMode)
if thr_method=='ipr': print('IPR=',IPR,', Beta:',beta, ', [k1,k2]=',[k1,k2])
if thr_method is None: print('theta_a: ',theta_a)
print('Reconstruction Method:',ReconMethod, ', Window:',window,', (Win,Overlap)=',(winsize,hopesize))
if len(X.shape)>1:
if use_joblib:
XR = np.array(Parallel(n_jobs=-1)(delayed(ATAR_1Ch)(X[:,i],wv=wv,winsize=winsize, thr_method=thr_method, IPR=IPR,
beta=beta, k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,OptMode=OptMode, factor=factor, wpd_mode=wpd_mode,
wpd_maxlevel=wpd_maxlevel,verbose=verbose, window=window,hopesize=hopesize,
ReconMethod=ReconMethod,packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs) for i in range(X.shape[1]))).T
else:
XR =np.array([ATAR_1Ch(X[:,i],wv=wv,winsize=winsize, thr_method=thr_method, IPR=IPR,
beta=beta, k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,OptMode=OptMode, factor=factor, wpd_mode=wpd_mode,
wpd_maxlevel=wpd_maxlevel,verbose=0, window=window,hopesize=hopesize,
ReconMethod=ReconMethod,packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs) for i in range(X.shape[1])]).T
else:
XR =ATAR_1Ch(X,wv=wv,winsize=winsize, thr_method=thr_method, IPR=IPR,
beta=beta, k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,OptMode=OptMode, factor=factor, wpd_mode=wpd_mode,
wpd_maxlevel=wpd_maxlevel, verbose=verbose, window=window,hopesize=hopesize,
ReconMethod=ReconMethod,packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs)
return XR
def ATAR_mCh_noParallel(X,wv='db3',winsize=128,thr_method ='ipr',IPR=[25,75],beta=0.1,k1=10,k2 =100,est_wmax=100,
theta_a=np.inf,bf=2,gf=0.8,OptMode ='soft',wpd_mode='symmetric',wpd_maxlevel=None,factor=1.0,
verbose=False, window=['hamming',True],hopesize=None, ReconMethod='custom',packetwise=False,WPD=True,lvl=[],fs=128.0):
'''
''
ATAR: - Automatic and Tunable Artifact Removal Algorithm
========================================================
Apply ATAR on short windows of signal (multiple channels:): - Without using Joblib - in case that creates issue in some systems and IDE
Signal is decomposed in smaller overlapping windows and reconstructed after correcting using overlap-add method.
------
for more details, check:
Ref: Bajaj, Nikesh, et al. "Automatic and tunable algorithm for EEG artifact removal using wavelet decomposition with applications in predictive modeling during auditory tasks." Biomedical Signal Processing and Control 55 (2020): 101624.
----------------
input
-----
X: input multi-channel signal of shape (n,ch)
Wavelet family:
wv = ['db3'.....'db38', 'sym2.....sym20', 'coif1.....coif17', 'bior1.1....bior6.8', 'rbio1.1...rbio6.8', 'dmey']
:'db3'(default)
Threshold Computation method:
thr_method : None (default), 'ipr'
: None: fixed threshold theta_a is applied
: ipr : applied with theta_a, bf , gf, beta, k1, k2 and OptMode
: theta_b = bf*theta_a
: theta_g = gf*theta_a
Operating modes:
OptMode = ['soft','elim','linAtten']
: default 'soft'
: use 'elim' with globalgood
Wavelet Decomposition modes:
wpd_mode = ['zero', 'constant', 'symmetric', 'periodic', 'smooth', 'periodization']
default 'symmetric'
Reconstruction Method - Overlap-Add method
ReconMethod : None, 'custom', 'HamWin'
for 'custom': window[0] is used and applied after denoising is window[1] is True else
windowing applied before denoising
output
------
XR: corrected signal of same shape as input X
'''
if hopesize is None: hopesize=winsize//2
assert thr_method in [ None, 'ipr']
assert OptMode in ['soft','linAtten','elim']
if verbose:
print('WPD Artifact Removal')
print('WPD:',WPD,' Wavelet:',wv,', Method:',thr_method,', OptMode:',OptMode)
if thr_method=='ipr': print('IPR=',IPR,', Beta:',beta, ', [k1,k2]=',[k1,k2])
if thr_method is None: print('theta_a: ',theta_a)
print('Reconstruction Method:',ReconMethod, ', Window:',window,', (Win,Overlap)=',(winsize,hopesize))
if len(X.shape)>1:
XR =np.array([
ATAR_1Ch(X[:,i],wv=wv,winsize=winsize, thr_method=thr_method, IPR=IPR,
beta=beta, k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,OptMode=OptMode, factor=factor, wpd_mode=wpd_mode,
wpd_maxlevel=wpd_maxlevel,verbose=0, window=window,hopesize=hopesize,
ReconMethod=ReconMethod,packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs) for i in range(X.shape[1])]).T
else:
XR =ATAR_1Ch(X,wv=wv,winsize=winsize, thr_method=thr_method, IPR=IPR,
beta=beta, k1=k1,k2 =k2, theta_a=theta_a,bf=bf,gf=gf,est_wmax=est_wmax,OptMode=OptMode, factor=factor, wpd_mode=wpd_mode,
wpd_maxlevel=wpd_maxlevel, verbose=verbose, window=window,hopesize=hopesize,
ReconMethod=ReconMethod,packetwise=packetwise,WPD=WPD,lvl=lvl,fs=fs)
return XR
def Wfilter_dev(x,wv='db3',thr_method='ipr',IPR=[25,75],beta=0.1,k1=10,k2=100,theta_a=np.inf,est_wmax=100,
bf=2,gf=0.8,OptMode ='soft',factor=1.0,show_plot=False,wpd_mode='symmetric',wpd_maxlevel=None,
WPD=True,packetwise=False,lvl=[],fs=128.0):
'''
---- IN DEVELOPMENT MODE ----- AVOID USING IT FOR NOW -----
------------------------------------------------------------
Threshold Computation method:
thr_method : None (default), 'ipr', 'global','outliers','std'
: provided with theta_a, bf , gf
: where:-
: theta_b = bf*theta_a -- used for Linear Attenuation
: theta_g = gf*theta_a -- used for Soft thresholding
Operating modes:
OptMode = ['soft','elim','linAtten']
: default 'soft'
: use 'elim' with global
Wavelet Decomposition modes:
wpd_mode = ['zero', 'constant', 'symmetric', 'periodic', 'smooth', 'periodization']
default 'symmetric'
Wavelet family:
wv = ['db3'.....'db38', 'sym2.....sym20', 'coif1.....coif17', 'bior1.1....bior6.8', 'rbio1.1...rbio6.8', 'dmey']
:'db3'(default)
'''
verbose=False
if verbose:
print('WPD:',WPD,' wv:',wv,' IPR:',IPR,' beta:',beta,' method:',method,' OptMode:',OptMode)
print('k1-k2:',[k1,k2])
if WPD: # Wavelet Packet Decomposition
wp = wt.WaveletPacket(x, wavelet=wv, mode=wpd_mode,maxlevel=wpd_maxlevel)
wr = [wp[node.path].data for node in wp.get_level(wp.maxlevel, 'natural') ]
WR = np.hstack(wr)
nodes = [node for node in wp.get_level(wp.maxlevel, 'natural')]
else: # Wavelet Transform
wr = wt.wavedec(x,wavelet=wv, mode=wpd_mode,level=wpd_maxlevel)
WR = np.hstack(wr)
nodes = np.arange(len(wr))
if not(packetwise):
if thr_method=='ipr':
if k2 is None: k2=100
r = stats.iqr(WR,rng=IPR)
theta_a = ipr2thr(r,beta=beta,k1=k1,k2=k2,c=est_wmax)
elif thr_method =='global':
sig = np.median(abs(WR))/0.6745
theta_a = sig*np.sqrt(2*np.log(len(x)))
elif thr_method =='outliers':
ll,ul = Outliers(WR)
thrlw = ll
thrup = ul
_,theta_a = Outliers(np.abs(WR))
elif thr_method =='std':
theta_a = 1.5*np.std(WR)
elif thr_method is not None:
print('Method for computing threshold is not defined')
theta_b = bf*theta_a
theta_g = gf*theta_a
removList=[]
for i in range(len(nodes)):
#for node in wp.get_level(wp.maxlevel, 'natural'):
c = wp[nodes[i].path].data if WPD else wr[i]
if len(lvl)==0 or i in lvl:
if packetwise:
if thr_method=='ipr':
if k2 is None: k2=100
r = stats.iqr(c,rng=IPR)
theta_a = ipr2thr(r,beta=beta,k1=k1,k2=k2,c=est_wmax)
elif thr_method =='global':
sig = np.median(abs(c))/0.6745
theta_a = sig*np.sqrt(2*np.log(len(x)))
elif thr_method =='outliers':
ll,ul = Outliers(c)
thrlw = ll
thrup = ul
_,theta_a = Outliers(np.abs(c))
elif thr_method =='std':
theta_a = 1.5*np.std(c)
theta_b = bf*theta_a
theta_g = gf*theta_a
if OptMode=='soft':
c = SoftThresholding(c,theta_a,theta_g)
elif OptMode=='linAtten':
c = LinearAttenuanating(c,theta_a,theta_b)
elif OptMode=='elim':
if thr_method =='outliers':
c[c>thrup]=0
c[c<thrlw]=0
elif thr_method not in ['std','global']:
c = Elimination(c,theta_a)
else:
#method is None, -- apply elimination with given theta_a
c = Elimination(c,theta_a)
else:
print('Operating mode was not in list..\n No wavelet filtering is applied')
pass
if WPD:
wp[nodes[i].path].data = c
else:
wr[i] = c
#Reconstruction
if WPD:
xR = wp.reconstruct(update=False)
else:
xR = wt.waverec(wr, wavelet = wv)
if show_plot:
plt.figure(figsize=(11,6))
plt.subplot(211)
plt.plot(WR,'b',alpha=0.8,label='Coef.')
plt.ylabel('Wavelete Coefficients')
ytiW =[np.min(WR),np.max(WR)]
#print('maxlevel :',wp.maxlevel)
if WPD: wr = [wp[node.path].data for node in wp.get_level(wp.maxlevel, 'natural') ]
WRi = np.hstack(wr)
plt.plot(WRi,'r',alpha=0.9,label='Filtered Coff.')
ytiW = ytiW+[np.min(WRi),np.max(WRi)]
plt.yticks(ytiW)
plt.grid()
plt.legend()
plt.xlim([0,len(WRi)])
plt.subplot(212)
if WPD:
t = np.arange(len(wp.data))/fs
plt.plot(t,wp.data,'b',alpha=0.8,label='signal')
else:
t = np.arange(len(x))/fs
plt.plot(t,x,'b',alpha=0.8,label='signal')
plt.plot(t,xR,'r',alpha=0.8,label='corrected')
plt.ylabel('Signal')
plt.yticks([np.min(xR),np.min(x),0,
|
np.max(xR)
|
numpy.max
|
import os
import re
import sys
import math
import time
import pickle
import random
import zscore
import matplotlib
matplotlib.use('TkAgg')
import build_features
import numpy as np
import pandas as pd
import multiprocessing
import config as config_file
import matplotlib.pylab as plt
import outcome_def_pediatric_obesity
from dateutil import parser
from sklearn import metrics
from scipy.stats import norm
from datetime import timedelta
from multiprocessing import Pool
from sklearn.preprocessing import Imputer
from dateutil.relativedelta import relativedelta
random.seed(2)
g_wfl = np.loadtxt(config_file.wght4leng_girl)
b_wfl = np.loadtxt(config_file.wght4leng_boy)
def filter_training_set_forLinear(x, y, ylabel, headers, filterSTR=[], percentile=False, mrns=[], filterSTRThresh=[], print_out=True):
if filterSTR.__class__ == list:
pass
else:
filterSTR = [filterSTR]
if len(filterSTRThresh) != len(filterSTR):
filterSTRThresh = []
if len(filterSTRThresh) == 0 :
filterSTRThresh = [0.5]*len(filterSTR) #make it binary, as before.
if print_out:
print('Original cohort size is:', x.shape[0], 'num features:',len(headers))
else:
print_statements = 'Original cohort size: {0:,d}, number of features: {1:,d}\n'.format(x.shape[0], len(headers))
index_finder_anyvital = np.array([h.startswith('Vital') for h in headers])
index_finder_maternal = np.array([h.startswith('Maternal') for h in headers])
index_finder_filterstr = np.zeros(len(headers))
for i, fstr in enumerate(filterSTR):
# print(index_finder_filterstr + np.array([h.startswith(fstr) for h in headers]))
index_finder_filterstr_tmp = np.array([h.startswith(fstr) for h in headers])
if index_finder_filterstr_tmp.sum() > 1:
if print_out:
print('alert: filter returned more than one feature:', fstr)
index_finder_filterstr_tmp = np.array([h == fstr for h in headers])
print('set filter to h==', fstr)
else:
print_statements += 'alert: filter returned more than one feature: ' + str(fstr) + '\n'
index_finder_filterstr_tmp = np.array([h == fstr for h in headers])
print_statements += 'set filter to h==' + str(fstr) + '\n'
index_finder_filterstr = index_finder_filterstr + index_finder_filterstr_tmp
if print_out:
print('total number of people who have: ', np.array(headers)[index_finder_filterstr_tmp], ' is:', ( x[:,index_finder_filterstr_tmp].ravel() > filterSTRThresh[i] ).sum() )
else:
print_statements += 'total number of people who have: '+str(np.array(headers)[index_finder_filterstr_tmp])+' is: {0:,d}\n'.format((x[:,index_finder_filterstr_tmp].ravel() > filterSTRThresh[i]).sum())
index_finder_filterstr = (index_finder_filterstr > 0)
# if index_finder_filterstr.sum() > 1 and filterSTR.__class__ != list:
# print('instead of *startswith*',filterSTR,'...trying *equals to*', filterSTR)
# index_finder_filterstr = np.array([h == filterSTR for h in headers])
# import pdb
# pdb.set_trace()
if (len(filterSTR) != 0) and (percentile == False):
ix = (y > 10) & (y < 40) & (((x[:,index_finder_filterstr] > np.array(filterSTRThresh)).sum(axis=1) >= index_finder_filterstr.sum()).ravel()) & ((x[:,index_finder_maternal] != 0).sum(axis=1) >= 1)
else:
ix = (y > 10) & (y < 40) & ((x[:,index_finder_maternal] != 0).sum(axis=1) >= 1)
if print_out:
print('total number of people who have a BMI measured:', sum((y > 10) & (y < 40)))
print('total number of people who have all filtered variables:', (((x[:,index_finder_filterstr] > np.array(filterSTRThresh)).sum(axis=1) >= index_finder_filterstr.sum()).ravel()).sum())
print('total number of people who have maternal data available:', ((x[:,index_finder_maternal] != 0).sum(axis=1) > 0).sum() )
print('intersection of the three above is:', sum(ix))
print(str(ix.sum()) + ' patients selected..')
return ix, x[ix,:], y[ix], ylabel[ix], mrns[ix]
# elif percentile == False:
# ix = (y > 10) & (y < 40) & ((x[:,index_finder_anyvital] != 0).sum(axis=1) >= 1)
# print(ix.sum())
# if (percentile == True) & (len(filterSTR) != 0):
# ix = (x[:,index_finder_filterstr].ravel() == True)
# elif percentile == True:
# ix = (x[:,index_finder_filterstr].ravel() >= False)
else:
print_statements += 'total number of people who have a BMI measured: {0:,d}\n'.format(sum((y > 10) & (y < 40)))
print_statements += 'total number of people who have all filtered variables: {0:,d}\n'.format((((x[:,index_finder_filterstr] > np.array(filterSTRThresh)).sum(axis=1) >= index_finder_filterstr.sum()).ravel()).sum())
print_statements += 'total number of people who have maternal data available: {0:,d}\n'.format(((x[:,index_finder_maternal] != 0).sum(axis=1) > 0).sum())
print_statements += 'intersection of the three above is: {0:,d}\n'.format(sum(ix))
print_statements += '{0:,d} patients selected..\n\n'.format(ix.sum())
return ix, x[ix,:], y[ix], ylabel[ix], mrns[ix], print_statements
def train_regression(x, y, ylabel, percentile, modelType, feature_headers, mrns):
import sklearn
if modelType == 'lasso':
import sklearn.linear_model
from sklearn.linear_model import Lasso
if modelType == 'mlp':
from sklearn.neural_network import MLPRegressor
if modelType == 'randomforest':
from sklearn.ensemble import RandomForestRegressor
if modelType == 'temporalCNN':
import cnn
if modelType == 'gradientboost':
from sklearn.ensemble import GradientBoostingRegressor
if modelType == 'lars':
from sklearn import linear_model
N = x.shape[0]
ixlist = np.arange(0,N)
random.shuffle(ixlist)
ix_train = ixlist[0:int(N*2/3)]
ix_test = ixlist[int(N*2/3):]
xtrain = x[ix_train]
ytrain = y[ix_train]
xtest = x[ix_test]
ytest = y[ix_test]
ytestlabel = ylabel[ix_test]
ytrainlabel = ylabel[ix_train]
mrnstrain = mrns[ix_train]
mrnstest = mrns[ix_test]
best_alpha = -1
best_score = -10000
if modelType == 'lasso':
hyperparamlist = [0.001, 0.005, 0.01, 0.1] #[alpha]
if modelType == 'mlp':
hyperparamlist = [(10,), (50,), (10,10), (50,10), (100,)] #[hidden_layer_sizes]
if modelType == 'randomforest':
hyperparamlist = [(est,minSplit,minLeaf) for est in [3000] for minSplit in [2] for minLeaf in (1,2,5,7)] #(2000,2), (2000,4), (2000,10) #[n_estimators, min_samples_split, min_samples_leaf]
if modelType == 'temporalCNN':
hyperparamlist = [(0.1)]
if modelType == 'gradientboost':
hyperparamlist = [(1500, 4, 2, 0.01,'lad'), (2500, 4, 2, 0.01,'lad'), (3500, 4, 2, 0.01,'lad')] #[n_estimators, max_depth, min_samples_split, learning_rate, loss]
if modelType == 'lars':
hyperparamlist = [0.001, 0.01, 0.1]
for alpha_i in hyperparamlist:
if modelType == 'lasso':
clf = Lasso(alpha=alpha_i)
if modelType == 'mlp':
clf = MLPRegressor(hidden_layer_sizes=alpha_i, solver="lbfgs", verbose=True)
if modelType == 'randomforest':
clf = RandomForestRegressor(random_state=0, n_estimators=alpha_i[0], min_samples_split=alpha_i[1], min_samples_leaf=alpha_i[2], n_jobs=-1)
if modelType == 'gradientboost':
clf = GradientBoostingRegressor(n_estimators=alpha_i[0], max_depth=alpha_i[1], min_samples_split=alpha_i[2], learning_rate=alpha_i[3], loss=alpha_i[4])
if modelType == 'lars':
clf = linear_model.LassoLars(alpha=alpha_i)
# if modelType == 'temporalCNN':
# xcnndataTrain, xcnndataTest = xtrain.reshape(, xtest # need to be of size |vitals| x |time| x
# clf = cnn.TemporalCNN(5, 8, 8, 64, 1)
# return (clf, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, 0, 0)
clf.fit(xtrain, ytrain)
auc_test = metrics.explained_variance_score(ytest, clf.predict(xtest)) #roc_auc_score(ytestlabel, clf.predict(xtest))
print('CV R^2 for alpha:', alpha_i, 'is:', auc_test)
if auc_test > best_score:
best_score = auc_test #np.sqrt(((clf.predict(xtest)-ytest)**2).mean())
best_alpha = alpha_i
print('best alpha via CV:', best_alpha)
if modelType == 'lasso':
clf = Lasso(alpha=best_alpha)
if modelType == 'mlp':
clf = MLPRegressor(hidden_layer_sizes=best_alpha,solver="lbfgs", verbose=True)
if modelType == 'randomforest':
clf = RandomForestRegressor(random_state=0, n_estimators=best_alpha[0], min_samples_split=best_alpha[1], min_samples_leaf=best_alpha[2], n_jobs=-1)
if modelType == 'gradientboost':
clf = GradientBoostingRegressor(n_estimators=best_alpha[0], max_depth=best_alpha[1], min_samples_split=best_alpha[2], learning_rate=best_alpha[3], loss=best_alpha[4])
if modelType == 'lars':
clf = linear_model.LassoLars(alpha=best_alpha)
clf.fit(xtrain,ytrain)
# print('R^2 score train:',clf.score(xtrain,ytrain))
# print('RMSE score train: {0:4.3f}'.format(np.sqrt(((clf.predict(xtrain)-ytrain)**2).mean())))
fpr, tpr, thresholds = metrics.roc_curve(ytrainlabel, clf.predict(xtrain))
print('AUC train: {0:4.3f}'.format(metrics.auc(fpr, tpr)) + ' Explained Variance Score Train: {0:4.3f}'.format(metrics.explained_variance_score(ytrain, clf.predict(xtrain)))) #http://scikit-learn.org/stable/modules/model_evaluation.html#explained-variance-score
fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, clf.predict(xtest))
auc_test = metrics.auc(fpr, tpr); r2test = clf.score(xtest,ytest)
# print('R^2 score test:',clf.score(xtest,ytest))
# print('RMSE score test: {0:4.3f}'.format(np.sqrt(((clf.predict(xtest)-ytest)**2).mean())))
print('AUC test: {0:4.3f}'.format(metrics.auc(fpr, tpr))+' Explained Variance Score Test: {0:4.3f}'.format(metrics.explained_variance_score(ytest, clf.predict(xtest))))
return (clf, xtrain, ytrain, xtest, ytest, ytrainlabel, ytestlabel, auc_test, r2test, mrnstrain, mrnstest, ix_train, ix_test)
def train_regression_single(args):
"""
Functionally the same as train_regression, but intended to be used with multiprocessing.Pool()
"""
run, x, xtest, y, ytest, ylabel, ytestlabel, percentile, modelType = args
import sklearn
if modelType == 'lasso':
import sklearn.linear_model
from sklearn.linear_model import Lasso
if modelType == 'mlp':
from sklearn.neural_network import MLPRegressor
if modelType == 'randomforest':
from sklearn.ensemble import RandomForestRegressor
if modelType == 'temporalCNN':
import cnn
if modelType == 'gradientboost':
from sklearn.ensemble import GradientBoostingRegressor
if modelType == 'lars':
from sklearn import linear_model
N = x.shape[0]
ixlist = np.arange(0,N)
random.shuffle(ixlist)
ix_train = ixlist[0:int(N*2/3)]
ix_val = ixlist[int(N*2/3):]
xtrain = x[ix_train]
ytrain = y[ix_train]
xval = x[ix_val]
yval = y[ix_val]
yvallabel = ylabel[ix_val]
ytrainlabel = ylabel[ix_train]
best_alpha = -1
best_score = -10000
if modelType == 'lasso':
hyperparamlist = [0.001, 0.005, 0.01, 0.1] #[alpha]
if modelType == 'mlp':
hyperparamlist = [(10,), (50,), (10,10), (50,10), (100,)] #[hidden_layer_sizes]
if modelType == 'randomforest':
# Expanded search
# hyperparamlist = [(est,minSplit,minLeaf,maxDepth) for est in [1000,2000,3000] for minSplit in [2,4,6] for minLeaf in (1,2,5,7) for maxDepth in (20,50,70,100,None)] #(2000,2), (2000,4), (2000,10) #[n_estimators, min_samples_split, min_samples_leaf]
# Limited search
hyperparamlist = [(est,minSplit,minLeaf) for est in [3000] for minSplit in [2] for minLeaf in (1,2,5,7)] #(2000,2), (2000,4), (2000,10) #[n_estimators, min_samples_split, min_samples_leaf]
if modelType == 'temporalCNN':
hyperparamlist = [(0.1)]
if modelType == 'gradientboost':
#[loss, learning_rate, n_estimators, max_depth, min_samples_split]
# Expanded search
# hyperparamlist = [(loss, learn, est, maxDepth, minLeaf) for loss in ('lad','huber') for learn in (0.001, 0.01, 0.1) for est in (500,1000,2000,3000) for maxDepth in (20,50,70,None) for minLeaf in (2,4,6)]
# Limited search
hyperparamlist = [(1500, 4, 2, 0.01,'lad'), (2500, 4, 2, 0.01,'lad'), (3500, 4, 2, 0.01,'lad')] #[n_estimators, max_depth, min_samples_split, learning_rate, loss]
if modelType == 'lars':
hyperparamlist = [0.001, 0.01, 0.1]
results = ''
for alpha_i in hyperparamlist:
if modelType == 'lasso':
clf = Lasso(alpha=alpha_i)
if modelType == 'mlp':
clf = MLPRegressor(hidden_layer_sizes=alpha_i, solver="lbfgs", verbose=True)
if modelType == 'randomforest':
# Expanded search
# clf = RandomForestRegressor(random_state=0, n_estimators=alpha_i[0], min_samples_split=alpha_i[1], min_samples_leaf=alpha_i[2], max_depth=alpha_i[3], n_jobs=-1)
# Limited Search
clf = RandomForestRegressor(random_state=0, n_estimators=alpha_i[0], min_samples_split=alpha_i[1], min_samples_leaf=alpha_i[2], n_jobs=-1)
if modelType == 'gradientboost':
# Expanded search
# clf = GradientBoostingRegressor(loss=alpha_i[0], learning_rate=alpha_i[1], n_estimators=alpha_i[2], max_depth=alpha_i[3], min_samples_split=alpha_i[4])
# Limited search
clf = GradientBoostingRegressor(n_estimators=alpha_i[0], max_depth=alpha_i[1], min_samples_split=alpha_i[2], learning_rate=alpha_i[3], loss=alpha_i[4])
if modelType == 'lars':
clf = linear_model.LassoLars(alpha=alpha_i)
# if modelType == 'temporalCNN':
# xcnndataTrain, xcnndataTest = xtrain.reshape(, xtest # need to be of size |vitals| x |time| x
# clf = cnn.TemporalCNN(5, 8, 8, 64, 1)
# return (clf, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, 0, 0)
clf.fit(xtrain, ytrain)
exp_var_test = metrics.explained_variance_score(ytest, clf.predict(xtest)) #roc_auc_score(ytestlabel, clf.predict(xtest))
results += 'Bootstrap CV {0:d}, alpha = {1:s}, R^2 train: {2:4.3f}, R^2 test: {3:4.3f}\n'.format(run, str(alpha_i), clf.score(xtrain,ytrain), clf.score(xval,yval))
if exp_var_test > best_score:
best_score = exp_var_test #np.sqrt(((clf.predict(xtest)-ytest)**2).mean())
best_alpha = alpha_i
results += 'best alpha via bootstrap CV: {0:s}\n'.format(str(best_alpha))
if modelType == 'lasso':
clf = Lasso(alpha=best_alpha)
if modelType == 'mlp':
clf = MLPRegressor(hidden_layer_sizes=best_alpha,solver="lbfgs", verbose=True)
if modelType == 'randomforest':
# Expanded search
# clf = RandomForestRegressor(random_state=0, n_estimators=best_alpha[0], min_samples_split=best_alpha[1], min_samples_leaf=best_alpha[2],max_depth=best_alpha[3], n_jobs=-1)
# Limited search
clf = RandomForestRegressor(random_state=0, n_estimators=best_alpha[0], min_samples_split=best_alpha[1], min_samples_leaf=best_alpha[2], n_jobs=-1)
if modelType == 'gradientboost':
# Expanded search
# clf = GradientBoostingRegressor(loss=best_alpha[0], learning_rate=best_alpha[1], n_estimators=best_alpha[2], max_depth=best_alpha[3], min_samples_split=best_alpha[4])
# Limited search
clf = GradientBoostingRegressor(n_estimators=best_alpha[0], max_depth=best_alpha[1], min_samples_split=best_alpha[2], learning_rate=best_alpha[3], loss=best_alpha[4])
if modelType == 'lars':
clf = linear_model.LassoLars(alpha=best_alpha)
clf.fit(xtrain,ytrain)
ytrain_pred = clf.predict(xtrain)
yval_pred = clf.predict(xval)
ytest_pred = clf.predict(xtest)
# print('R^2 score train:',clf.score(xtrain,ytrain))
# print('RMSE score train: {0:4.3f}'.format(np.sqrt(((clf.predict(xtrain)-ytrain)**2).mean())))
fpr, tpr, thresholds = metrics.roc_curve(ytrainlabel, ytrain_pred)
results += 'AUC Train: {0:4.3f}, Explained Variance Score Train: {1:4.3f}\n'.format(metrics.auc(fpr, tpr), metrics.explained_variance_score(ytrain, ytrain_pred))
fpr, tpr, thresholds = metrics.roc_curve(yvallabel, yval_pred)
auc_val = metrics.auc(fpr, tpr)
r2val = clf.score(xval,yval)
var_val = metrics.explained_variance_score(yval, yval_pred)
# print('R^2 score test:',clf.score(xtest,ytest))
# print('RMSE score test: {0:4.3f}'.format(np.sqrt(((clf.predict(xtest)-ytest)**2).mean())))
results += 'AUC Validation: {0:4.3f}, Explained Variance Score Validation: {1:4.3f}\n'.format(auc_val, var_val)
fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, ytest_pred)
auc_test = metrics.auc(fpr, tpr)
r2test = clf.score(xtest,ytest)
var_test = metrics.explained_variance_score(ytest, ytest_pred)
results += 'AUC Test: {0:4.3f}, Explained Variance Score Test: {1:4.3f}\n'.format(auc_test, var_test)
print(results)
return (clf, auc_val, auc_test, var_val, var_test, r2val, r2test, ix_train, ix_val, results, run)
def train_classification_single(args):
"""
Run a single cross validation instance for a given classifier
"""
run, x, xtest, y, ytest, ylabel, ytestlabel, percentile, modelType = args
import sklearn
if modelType == 'lasso':
import sklearn.linear_model
from sklearn.linear_model import LogisticRegression
if modelType == 'mlp':
from sklearn.neural_network import MLPRegressor
if modelType == 'randomforest':
from sklearn.ensemble import RandomForestClassifier
if modelType == 'temporalCNN':
import cnn
if modelType == 'gradientboost':
from sklearn.ensemble import GradientBoostingClassifier
if modelType == 'lars':
print('There is no LARS classifier')
return
N = x.shape[0]
ixlist = np.arange(0,N)
random.shuffle(ixlist)
ix_train = ixlist[0:int(N*2/3)]
ix_val = ixlist[int(N*2/3):]
xtrain = x[ix_train]
ytrain = y[ix_train]
xval = x[ix_val]
yval = y[ix_val]
yvallabel = ylabel[ix_val]
ytrainlabel = ylabel[ix_train]
best_alpha = -1
best_score = -10000
if modelType == 'lasso':
hyperparamlist = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] #[alpha]
if modelType == 'mlp':
hyperparamlist = [(10,), (50,), (10,10), (50,10), (100,)] #[hidden_layer_sizes]
if modelType == 'randomforest':
# Expanded search
# hyperparamlist = [(est,minSplit,minLeaf,maxDepth) for est in [1000,2000,3000] for minSplit in [2,4,6] for minLeaf in (1,2,5,7) for maxDepth in (20,50,70,100,None)] #(2000,2), (2000,4), (2000,10) #[n_estimators, min_samples_split, min_samples_leaf]
# Limited search
hyperparamlist = [(est,minSplit,minLeaf) for est in [3000] for minSplit in [2] for minLeaf in (1,2,5,7)] #(2000,2), (2000,4), (2000,10) #[n_estimators, min_samples_split, min_samples_leaf]
if modelType == 'temporalCNN':
hyperparamlist = [(0.1)]
if modelType == 'gradientboost':
# Expanded search
# hyperparamlist = [(loss, learn, est, maxDepth, minLeaf) for loss in ['deviance'] for learn in (0.001, 0.01, 0.1) for est in (500,1000,2000,3000) for maxDepth in (20,50,70,None) for minLeaf in (2,4,6)]
# Limited search
hyperparamlist = [(1500, 4, 2, 0.01,'deviance'), (2500, 4, 2, 0.01,'deviance'), (3500, 4, 2, 0.01,'deviance')] #[n_estimators, max_depth, min_samples_split, learning_rate, loss]
results = ''
for alpha_i in hyperparamlist:
if modelType == 'lasso':
clf = LogisticRegression(C=alpha_i, penalty='l1', max_iter=1000, n_jobs=-1)
if modelType == 'mlp':
clf = MLPClassifier(hidden_layer_sizes=alpha_i, solver="lbfgs", verbose=True)
if modelType == 'randomforest':
# Expanded search
# clf = RandomForestClassifier(random_state=0, n_estimators=alpha_i[0], min_samples_split=alpha_i[1], min_samples_leaf=alpha_i[2], max_depth=alpha_i[3], n_jobs=-1)
# Limited search
clf = RandomForestClassifier(random_state=0, n_estimators=alpha_i[0], min_samples_split=alpha_i[1], min_samples_leaf=alpha_i[2], n_jobs=-1)
if modelType == 'gradientboost':
# Expanded search
# clf = GradientBoostingClassifier(loss=alpha_i[0], learning_rate=alpha_i[1], n_estimators=alpha_i[2], max_depth=alpha_i[3], min_samples_split=alpha_i[4])
# Limited search
clf = GradientBoostingClassifier(n_estimators=alpha_i[0], max_depth=alpha_i[1], min_samples_split=alpha_i[2], learning_rate=alpha_i[3], loss=alpha_i[4])
# if modelType == 'temporalCNN':
# xcnndataTrain, xcnndataTest = xtrain.reshape(, xtest # need to be of size |vitals| x |time| x
# clf = cnn.TemporalCNN(5, 8, 8, 64, 1)
# return (clf, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, 0, 0)
clf.fit(xtrain, ytrainlabel)
ytrain_pred = clf.predict(xtrain)
yval_pred = clf.predict(xval)
acc_train = metrics.accuracy_score(ytrainlabel, ytrain_pred)
acc_val = metrics.accuracy_score(yvallabel, yval_pred)
mcc_train = metrics.matthews_corrcoef(ytrainlabel, ytrain_pred)
mcc_val = metrics.matthews_corrcoef(yvallabel, yval_pred)
results += 'Bootstrap CV {0:d}, alpha = {1:s}, Accuracy train: {2:4.3f}, MCC train: {3:4.3f}, Accuracy Validation: {3:4.3f}, MCC Validation: {4:4.3f}\n'.format(run, str(alpha_i), acc_train, mcc_train, acc_val, mcc_val)
if mcc_val > best_score:
best_score = mcc_val #np.sqrt(((clf.predict(xtest)-ytest)**2).mean())
best_alpha = alpha_i
results += 'best alpha via bootstrap CV: {0:s}\n'.format(str(best_alpha))
if modelType == 'lasso':
clf = LogisticRegression(C=alpha_i, penalty='l1', max_iter=1000, n_jobs=-1)
if modelType == 'mlp':
clf = MLPClassifier(hidden_layer_sizes=best_alpha,solver="lbfgs", verbose=True)
if modelType == 'randomforest':
# Expanded search
# clf = RandomForestClassifier(random_state=0, n_estimators=best_alpha[0], min_samples_split=best_alpha[1], min_samples_leaf=best_alpha[2], max_depth=best_alpha[3], n_jobs=-1)
# Limited search
clf = RandomForestClassifier(random_state=0, n_estimators=best_alpha[0], min_samples_split=best_alpha[1], min_samples_leaf=best_alpha[2], n_jobs=-1)
if modelType == 'gradientboost':
# Expanded search
# clf = GradientBoostingClassifier(loss=best_alpha[0], learning_rate=best_alpha[1], n_estimators=best_alpha[2], max_depth=best_alpha[3], min_samples_split=best_alpha[4])
# Limited search
clf = GradientBoostingClassifier(n_estimators=best_alpha[0], max_depth=best_alpha[1], min_samples_split=best_alpha[2], learning_rate=best_alpha[3], loss=best_alpha[4])
clf.fit(xtrain,ytrainlabel)
ytrain_pred = clf.predict(xtrain)
yval_pred = clf.predict(xval)
ytest_pred = clf.predict(xtest)
fpr, tpr, thresholds = metrics.roc_curve(ytrainlabel, clf.predict_proba(xtrain)[:,1])
auc_train = metrics.auc(fpr, tpr)
acc_train = metrics.accuracy_score(ytrainlabel, ytrain_pred)
mcc_train = metrics.matthews_corrcoef(ytrainlabel, ytrain_pred)
results += 'AUC Train: {0:4.3f}, Accuracy Validation: {1:4.3f}, MCC Train: {2:4.3f}\n'.format(auc_train, acc_train, mcc_train)
fpr, tpr, thresholds = metrics.roc_curve(yvallabel, clf.predict_proba(xval)[:,1])
auc_val = metrics.auc(fpr,tpr)
acc_val = metrics.accuracy_score(yvallabel, yval_pred)
mcc_val = metrics.matthews_corrcoef(yvallabel, yval_pred)
results += 'AUC Validation: {0:4.3f}, Accuracy Validation: {1:4.3f}, MCC Validation: {2:4.3f}\n'.format(auc_val, acc_val, mcc_val)
fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, clf.predict_proba(xtest)[:,1])
auc_test = metrics.auc(fpr,tpr)
acc_test = metrics.accuracy_score(ytestlabel, ytest_pred)
mcc_test = metrics.matthews_corrcoef(ytestlabel, ytest_pred)
results += 'AUC Test: {0:4.3f}, Accuracy Test: {1:4.3f}, MCC Test: {2:4.3f}\n'.format(auc_test, acc_test, mcc_test)
print(results)
return (clf, auc_val, auc_test, acc_val, acc_test, mcc_val, mcc_test, ix_train, ix_val, results, run)
def normalize(x, filter_percentile_more_than_percent=5, mu=[], std=[], bin_ix=[]):
unobserved = (x == 0)*1.0
if len(bin_ix) == 0:
bin_ix = ( x.min(axis=0) == 0 ) & ( x.max(axis=0) == 1)
xcop = x * 1.0
xcop[xcop==0] = np.nan
if len(mu) == 0:
mu = np.nanmean(xcop, axis=0)
mu[bin_ix] = 0.0
mu[np.isnan(mu)] = 0.0
if len(std) == 0:
std = np.nanstd(xcop, axis=0)
std[std==0]=1.0
std[bin_ix]=1.0
std[np.isnan(std)]=1.0
normed_x = (x != 0) * ((x - mu)/ std*1.0)
normed_x[abs(normed_x)>filter_percentile_more_than_percent] = 0
return normed_x, mu, std, bin_ix, unobserved
def variable_subset(x, varsubset, h, print_out=True):
if not print:
print_statements = 'subsetting variables that are only: ' + str(varsubset) + '\n'
hix = np.array([hi.split(':')[0].strip() in varsubset or hi in varsubset for hi in h])
print_statements += 'from {0:,d} variables to {1:,.2f}\n'.format(x.shape[1], sum(hix))
x = x[:, hix]
h = np.array(h)[hix]
# print(h, x)
return x, h, print_statements
else:
print('subsetting variables that are only:', varsubset)
hix = np.array([hi.split(':')[0].strip() in varsubset or hi in varsubset for hi in h])
print('from ', x.shape[1] ,' variables to ', sum(hix))
x = x[:, hix]
h = np.array(h)[hix]
# print(h, x)
return x, h
def add_temporal_features(x2, feature_headers, num_clusters, num_iters, y2, y2label, dist_type='eucledian', cross_valid=True, mux=None, stdx=None, do_impute=False, subset=[]):
if isinstance(feature_headers, list):
feature_headers = np.array(feature_headers)
header_vital_ix = np.array([h.startswith('Vital') for h in feature_headers])
headers_vital = feature_headers[header_vital_ix]
x2_vitals = x2[:, header_vital_ix]
mu_vital = mux[header_vital_ix]
std_vital = stdx[header_vital_ix]
import timeseries
xnew, hnew, muxnew, stdxnew = timeseries.construct_temporal_data(x2_vitals, headers_vital, y2, y2label, mu_vital, std_vital, subset)
centroids, assignments, trendArray, standardDevCentroids, cnt_clusters, distances = timeseries.k_means_clust(xnew, num_clusters, num_iters, hnew, distType=dist_type, cross_valid=cross_valid)
trendArray[trendArray!=0] = 1
trend_headers = ['Trend:'+str(i)+' -occ:'+str(cnt_clusters[i]) for i in range(0, len(centroids))]
return np.hstack([x2, trendArray]), np.hstack([feature_headers , np.array(trend_headers)]), centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew
def filter_correlations_via(corr_headers, corr_matrix, corr_vars_exclude, print_out=True):
ix_header = np.ones((len(corr_headers)), dtype=bool)
# if len(corr_headers) == 1:
# if print_out:
# return corr_headers, np.array([[corr_matrix]]), ix_header
# else:
# return corr_headers, corr_matrix, ix_header
if len(corr_headers) == 1:
corr_matrix = np.array([[corr_matrix]])
for ind, item in enumerate(corr_headers):
if (item in corr_vars_exclude) or sum([item.startswith(ii) for ii in corr_vars_exclude]) > 0 :
ix_header[ind] = False
if print_out:
print('filtered correlated features to: {0:,d}'.format(ix_header.sum()))
return corr_headers[ix_header], corr_matrix[:,ix_header], ix_header
else:
print_statements = 'filtered correlated features to: {0:,d}'.format(ix_header.sum())
return corr_headers[ix_header], corr_matrix[:,ix_header], ix_header, print_statements
def autoencoder_impute(x, bin_ix, hidden_nodes=100):
try:
import auto_encoder
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
except:
print('imputation requires pytorch. please install and make sure you can import it')
raise
cont_ix = (np.array(bin_ix) == False)
non_zero_ix = (x.sum(axis=0) != 0)
old_shape = x.shape
bin_ix = np.array(bin_ix)[non_zero_ix].tolist()
cont_ix = np.array(cont_ix)[non_zero_ix].tolist()
x = x[:, non_zero_ix]
x_cont = x[:,cont_ix]
x_bin = x[:,bin_ix]
print(sum(bin_ix), sum(cont_ix), hidden_nodes)
autoencoder = auto_encoder.AutoencoderConinBinar(x_bin.shape[1], x_cont.shape[1], hidden_nodes)
optimizer = optim.SGD(autoencoder.parameters(), lr=0.5)
np.random.seed(0)
lossfuncBin = nn.BCELoss()
lossfunccont = nn.MSELoss()
loss_list = []
for epoch in range(1, 200):
autoencoder.train()
for ix in range(len(x)):
databin = Variable(torch.from_numpy(x_bin[ix]).float())
datacont = Variable(torch.from_numpy(x_cont[ix]).float())
databoth = Variable(torch.from_numpy(np.hstack([x_bin[ix], x_cont[ix]]))).float()
optimizer.zero_grad()
xoutBin, xoutCont = autoencoder(databoth)
loss = lossfuncBin(xoutBin, databin) + lossfunccont(xoutCont, datacont)
loss_list.append(loss)
loss.backward()
optimizer.step()
autoencoder.eval()
xout = np.zeros(x.shape)
for ix in range(len(x)):
databoth = Variable(torch.from_numpy(np.hstack([x_bin[ix], x_cont[ix]]))).float()
outbin, outcont = autoencoder(databoth)
xout[ix,bin_ix] = outbin.data.numpy()
xout[ix,cont_ix] = outcont.data.numpy()
xfinal = np.zeros(old_shape)
xfinal[:,non_zero_ix] = xout
return xfinal
def filter_min_occurrences(x2,feature_headers, min_occur=0, print_out=True):
"""
Filter columns that have less than min_occur ocurrences
"""
feature_filter = (np.count_nonzero(x2, axis=0) >= min_occur)
feature_headers = np.array(feature_headers)[feature_filter].tolist()
x2 = x2[:,feature_filter]
if print_out:
print('{0:,d} features filtered with number of occurrences less than {1:,d}'.format(feature_filter.sum(), min_occur))
return x2, feature_headers
else:
'{0:,d} features filtered with number of occurrences less than {1:,d}\n'.format(feature_filter.sum(), min_occur)
return x2, feature_headers, statements
def run_lasso_single(args):
from sklearn.linear_model import Lasso
run, xtrain, xtest, ytrain, ytest, ytrainlabel, ytestlabel, hyperparamlist = args
best_score = -1
best_alpha = -1
for alpha_i in hyperparamlist:
clf = Lasso(alpha=alpha_i, max_iter=1000)
clf.fit(xtrain, ytrain)
auc_test = metrics.explained_variance_score(ytest, clf.predict(xtest)) #roc_auc_score(ytestlabel, clf.predict(xtest))
if auc_test > best_score:
best_score = auc_test #np.sqrt(((clf.predict(xtest)-ytest)**2).mean())
best_alpha = alpha_i
clf = Lasso(alpha=best_alpha)
clf.fit(xtrain,ytrain)
# model_weights_array = np.array([clf.coef_ for clf in outputs])
# model_weights_mean = model_weights_array.mean(axis=0)
# model_weights_std = model_weights_array.std(axis=0)
# model_weights_conf_term = (1.96/np.sqrt(iters)) * model_weights_std
fpr, tpr, thresholds = metrics.roc_curve(ytrainlabel, clf.predict(xtrain))
to_print = 'Run {0:d} has {1:,d} non-zero features and {2:,d} zero-weight features\n'.format(run, (clf.coef_ != 0).sum(), (clf.coef_ == 0).sum())
# to_print += 'R^2 score train: {0:4.3f}\n'.format(clf.score(xtrain,ytrain))
# to_print += 'RMSE score train: {0:4.3f}\n'.format(np.sqrt(((clf.predict(xtrain)-ytrain)**2).mean()))
to_print += 'AUC Train: {0:4.3f}, Explained Variance Score Train: {1:4.3f}\n'.format(metrics.auc(fpr, tpr), metrics.explained_variance_score(ytrain, clf.predict(xtrain))) #http://scikit-learn.org/stable/modules/model_evaluation.html#explained-variance-score
fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, clf.predict(xtest))
# to_print += 'R^2 score train: {0:4.3f}\n'.format(clf.score(xtest,ytest))
# to_print += 'RMSE score train: {0:4.3f}\n'.format(np.sqrt(((clf.predict(xtest)-ytest)**2).mean()))
to_print += 'AUC Test: {0:4.3f}, Explained Variance Score Test: {1:4.3f}\n'.format(metrics.auc(fpr, tpr), metrics.explained_variance_score(ytest, clf.predict(xtest))) #http://scikit-learn.org/stable/modules/model_evaluation.html#explained-variance-score
print(to_print)
return clf.coef_
def lasso_filter(x, y, ylabel, feature_headers, print_out=True):
"""
Filter any columns that have zeroed out feature weights
"""
N = x.shape[0]
ixlist = np.arange(0,N)
ix_train = ixlist
random.shuffle(ix_train)
ix_train = ix_train[0:int(N*0.8)]
iters=10
best_alpha = -1
best_score = -10000
hyperparamlist = [0.001, 0.005, 0.01, 0.1] #[alpha]
arguments = []
for it in range(iters):
ix_filt = ix_train
random.shuffle(ix_filt)
ix_filt = ix_filt[0:int(N*0.9)]
tr = ix_filt[0:int(len(ix_filt)*0.7)]
te = ix_filt[int(len(ix_filt)*0.7):]
arguments.append([it,x[tr,:],x[te,:],y[tr],y[te],ylabel[tr],ylabel[te], hyperparamlist])
node_count = multiprocessing.cpu_count()
node_count = math.ceil(node_count*0.8)
if len(arguments) > node_count:
num_batches = math.ceil(float(len(arguments))/node_count)
outputs = []
for i in range(num_batches):
sub_args = arguments[i*node_count:(i+1)*node_count] if i < num_batches-1 else arguments[i*node_count:]
nodes = node_count if i < num_batches-1 else len(arguments) - (i * node_count)
print('Running batch {0:d} of {1:d}'.format(i+1, num_batches))
with Pool(node_count) as p:
output = p.map(run_lasso_single, sub_args)
for out in output:
outputs.append(out)
else:
with Pool(node_count) as p:
outputs = p.map(run_lasso_single, arguments)
return np.array(outputs)
def prepare_data_for_analysis(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, x1, y1, y1label, feature_headers, mrns, agex_low, agex_high, months_from, months_to, outcome='obese', percentile=False, filterSTR=['Gender:1'], filterSTRThresh=[0.5], variablesubset=['Vital'],variable_exclude=['Trend'], num_clusters=16, num_iters=100, dist_type='euclidean', corr_vars_exclude=['Vital'], do_impute=True, mrnForFilter=[], add_time=False, bin_ix=[], do_normalize=True, min_occur=0, lasso_selection=False, binarize_diagnosis=True, get_char_tables=False, feature_info=True, subset=np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False]), delay_print=False): #filterSTR='Gender:0 male'
"""
Transforms the data to be run for ML analyses. Returns x2, y2, y2label, mrns2, ix_filter, and feature_headers2.
NOTE: use ix_filter to capture relavent data points from original array as the new dimensions will be differentself.
#### PARAMETERS ####
For the below features if not using set value to {}
data_dic: data dictionary of newborns with each child's data as value for some provided key
data_dic_mom: data dictionary of maternal data at birth with child mrn as key and data as value
data_dic_hist_moms: historical maternal data dictionary with maternal mrn as the key and data as value
lat_lon_dic: geocoded data dictionary of maternal addresses
env_dic: aggregated census data
x1: data array
y1: data to be predicted
y1label: obesity label for each child
feature_headers: list of features that matches the column space of x1
mrns: list of mrns that matches that corresponds to x1
NOTE: the following four parameters must have matching values for creation of any precreated data sets (x1, y1, y1label, feature_headers, and mrns)
agex_low: lower bound on child age a prediction should be made from
agex_high: upper bound on child age a prediction should be made from
months_from: lower bound on child age for prediction
months_to: upper bound on child age for prediction
outcome: default = 'obese'. obesity threshold for bmi/age percentile for outcome class.
Source: https://www.cdc.gov/obesity/childhood/defining.html
'overweight': 0.85 <= bmi percentile < 0.95
'obese': 0.95 <= bmi percentile <= 1.0
'extreme': 0.99 <= bmi percentile <= 1.0
NOTE: only required if creating the data at this stage
percentile: default False; filter to ensure certain types of features exist for each data point
filterSTR: default ['Gender:1']; filter specific features to have vaules greater than 'filterSTRThresh' for each filterSTR
filterSTRThresh: default [0.5]; filter out data points with values less than the provided amount for each filterSTR feature
variablesubset: default []; use only specified list of feature(s) (can be exact match or feature category as long as each item is the start of a feature name)
variable_exclude: not used
num_clusters: default 16; number of kmeans clusters to use for timeseries data
num_iters: default 100; number of iterations for kmeans clusters for timeseries data
dist_type: default 'euclidean'; distance metric for kmeans clusters for timeseries data
corr_vars_exclude: default ['Vital']; features to exclude from correlation results
do_impute: default 'True'; impute missing data
mrnForFilter: default []; filter data by mrn values
add_time: default False; use timeseries analyses
bin_ix: default []; list of binary features - will be determined if none provided
do_normalize: default True; normalize the data
min_occur: default 0; number of occurrences required for feature to be used
lasso_selection: defautl False; use LASSO regression to determine the most important features
binarize_diagnosis: default True; binarize any diagnosis features that are not already binary
get_char_tables: defaut False; save the Table 1 and 2 output to file
subset: default np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False]); used to determine timeseries subset
delay_print: default False; print everything at the end -- created for when creating data using multiprocessing and don't want jumbled results
"""
if any([len(x)==0 for x in (x1,y1,y1label,feature_headers,mrns)]):
reporting = 'At least one required data not provided out of x1, y1, y1label, feature_headers, or mrns.\n'
try:
reporting += 'Creating data from the provided data dictionaries\n'
x1, y1, y1label, feature_headers, mrns = build_feature2.call_build_function(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, agex_low, agex_high, months_from, months_to, percentile, prediction=outcome, mrnsForFilter=mrnForFilter)
original_data = (x1, y1, y1label, feature_headers, mrns)
except:
reporting += 'Not all of the required data was provided. Exiting analysis.\n'
print(reporting)
return
else:
reporting = 'Using pre-prepared data\n'
if not delay_print:
print(reporting)
if binarize_diagnosis:
bin_ix = np.array([(h.startswith('Diagnosis:') or h.startswith('Maternal Diagnosis:') or h.startswith('Newborn Diagnosis:')) for h in feature_headers])
reporting += str(bin_ix.sum()) + ' features are binary\n'
x1[:,bin_ix] = (x1[:,bin_ix] > 0) * 1.0
if delay_print:
ix_filter, x2, y2, y2label, mrns, print_statements = filter_training_set_forLinear(x1, y1, y1label, feature_headers, filterSTR, percentile, mrns, filterSTRThresh, print_out=not delay_print)
reporting += print_statements
else:
ix_filter, x2, y2, y2label, mrns = filter_training_set_forLinear(x1, y1, y1label, feature_headers, filterSTR, percentile, mrns, filterSTRThresh, print_out=not delay_print)
if get_char_tables:
print_charac_table(x2, y2, y2label, feature_headers)
newdir = time.strftime("table_stats_%Y%m%d_")+str(months_from)+'to'+str(months_to)+'months_'+str(agex_low)+'to'+str(agex_high)+'years'
if not os.path.exists(newdir):
os.mkdir(newdir)
get_stat_table(x2, y2, y2label, feature_headers, folder=newdir)
if do_impute or do_normalize or add_time:
x2, mux, stdx, bin_ix, unobserved = normalize(x2, bin_ix=bin_ix)
if do_impute:
x2 = autoencoder_impute(x2, bin_ix)
if add_time:
x2, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew = add_temporal_features(x2, feature_headers, num_clusters, num_iters, y2, y2label, dist_type, True, mux, stdx, do_impute, subset)
else:
centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew = ['NaN']*7
if min_occur > 0:
if delay_print:
x2, feature_headers, print_statements = filter_min_occurrences(x2, feature_headers, min_occur, print_out=not delay_print)
reporting += print_statements
else:
x2, feature_headers = filter_min_occurrences(x2, feature_headers, min_occur, print_out=not delay_print)
if len(variablesubset) != 0:
if delay_print:
x2, feature_headers, print_statements, print_statements = variable_subset(x2, variablesubset, feature_headers, print_out=not delay_print)
reporting += print_statements
else:
x2, feature_headers = variable_subset(x2, variablesubset, feature_headers, print_out=not delay_print)
if lasso_selection:
model_weights = lasso_filter(x2, y2, y2label, feature_headers, print_out=not delay_print)
model_weights_mean = model_weights.mean(axis=0)
feature_filter = (model_weights_mean != 0)
if delay_print:
reporting+= '{0:,d} features with zero weights being filtered. {1:,d} features remaining.\n'.format(len(feature_headers)-feature_filter.sum(), feature_filter.sum())
else:
print('{0:,d} features with 0 weights being filtered. {1:,d} features remaining.'.format(len(feature_headers)-feature_filter.sum(), feature_filter.sum()))
feature_headers = np.array(feature_headers)[feature_filter].tolist()
x2 = x2[:,feature_filter]
corr_headers = np.array(feature_headers)
corr_matrix = np.corrcoef(x2.transpose())
if delay_print:
corr_headers_filtered, corr_matrix_filtered, ix_corr_headers, print_statements = filter_correlations_via(corr_headers, corr_matrix, corr_vars_exclude, print_out=not delay_print)
reporting += print_statements
reporting += 'corr matrix is filtered to size: '+ str(corr_matrix_filtered.shape) + '\n'
else:
corr_headers_filtered, corr_matrix_filtered, ix_corr_headers = filter_correlations_via(corr_headers, corr_matrix, corr_vars_exclude, print_out=not delay_print)
print('corr matrix is filtered to size: '+ str(corr_matrix_filtered.shape))
if delay_print:
reporting += 'output is: average: {0:4.3f}, min: {1:4.3f}, max: {2:4.3f}\n'.format(y2.mean(), y2.min(), y2.max())
reporting += 'total patients: {0:,d}, positive: {1:,d}, negative: {2:,d}\n'.format(y2.shape[0], y2label.sum(), y2.shape[0]-y2label.sum())
reporting += 'normalizing output...\n'
else:
print('output is: average: {0:4.3f}, min: {1:4.3f}, max: {2:4.3f}'.format(y2.mean(), y2.min(), y2.max()))
print('total patients: {0:,d}, positive: {1:,.2f}, negative: {2:,.2f}'.format(y2.shape[0], y2label.sum(), y2.shape[0]-y2label.sum()))
print('normalizing output...')
y2 = (y2-y2.mean())/y2.std()
reporting += 'Predicting BMI at age: '+str(agex_low)+ ' to '+str(agex_high)+ ' years, from data in ages: '+ str(months_from)+' - '+str(months_to) + ' months\n'
if filterSTR != '':
reporting += 'filtering patients with: '+str(filterSTR)+'\n'
reporting += 'total size: {0:,d} x {1:,d}'.format(x2.shape[0], x2.shape[1])
print(reporting)
if (ix_filter.sum() < 50):
print('Not enough subjects. Next.')
return (filterSTR, [])
return x2, y2, y2label, mrns, ix_filter, feature_headers, corr_headers_filtered, corr_matrix_filtered, ix_corr_headers
def train_regression_model_for_bmi(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, x1, y1, y1label, feature_headers, mrns, agex_low, agex_high, months_from, months_to, outcome='obese', modelType='lasso', percentile=False, filterSTR=['Gender:1'], filterSTRThresh=[0.5], variablesubset=['Vital'],variable_exclude=['Trend'], num_clusters=16, num_iters=100, dist_type='euclidean', corr_vars_exclude=['Vital'], return_data_for_error_analysis=False, return_data=False, return_data_transformed=False, return_train_test_data=False, do_impute=True, mrnForFilter=[], add_time=False, bin_ix=[], do_normalize=True, binarize_diagnosis=True, get_char_tables=False, feature_info=True, subset=np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False])): #filterSTR='Gender:0 male'
"""
Train regression model for predicting obesity outcome
#### PARAMETERS ####
For the below features if not using set value to {}
data_dic: data dictionary of newborns with each child's data as value for some provided key
data_dic_mom: data dictionary of maternal data at birth with child mrn as key and data as value
data_dic_hist_moms: historical maternal data dictionary with maternal mrn as the key and data as value
lat_lon_dic: geocoded data dictionary of maternal addresses
env_dic: aggregated census data
x1: data array
y1: data to be predicted
y1label: obesity label for each child
feature_headers: list of features that matches the column space of x1
mrns: list of mrns that matches that corresponds to x1
NOTE: the following four parameters must have matching values for creation of any precreated data sets (x1, y1, y1label, feature_headers, and mrns)
agex_low: lower bound on child age a prediction should be made from
agex_high: upper bound on child age a prediction should be made from
months_from: lower bound on child age for prediction
months_to: upper bound on child age for prediction
outcome: default = 'obese'. obesity threshold for bmi/age percentile for outcome class.
Source: https://www.cdc.gov/obesity/childhood/defining.html
'overweight': 0.85 <= bmi percentile < 0.95
'obese': 0.95 <= bmi percentile <= 1.0
'extreme': 0.99 <= bmi percentile <= 1.0
NOTE: only required if creating the data at this stage
modelType: default 'lasso'
'lasso' - sklearn.linear_model.Lasso
'mlp' - sklearn.neural_network.MLPRegressor
'randomforest' - sklearn.ensemble.RandomForestRegressor
'temporalCNN' - cnn -- NOT IMPLEMENTED
'gradientboost' - sklearn.ensemble.GradientBoostingRegressor
'lars' - sklearn.linear_model
percentile: default False; filter to ensure certain types of features exist for each data point
filterSTR: default ['Gender:1']; filter specific features to have vaules greater than 'filterSTRThresh' for each filterSTR
filterSTRThresh: default [0.5]; filter out data points with values less than the provided amount for each filterSTR feature
variablesubset: default []; use only specified list of feature(s) (can be exact match or feature category as long as each item is the start of a feature name)
variable_exclude: not used
num_clusters: default 16; number of kmeans clusters to use for timeseries data
num_iters: default 100; number of iterations for kmeans clusters for timeseries data
dist_type: default 'euclidean'; distance metric for kmeans clusters for timeseries data
corr_vars_exclude: default ['Vital']; features to exclude from correlation results
return_data_for_error_analysis: default False; return last trained model with data to analyze model errors
return_data: default False; return X, y, y_label, feature_headers, and mrns created in the data creation phase
NOTE: this is not the imputed, normalized, binarized, etc. data. 'feature_headers' still returned otherwise.
return_data_transformed: default False; if True and return_data==True the transformed data will be returned in place of the original, unaltered data set.
return_train_test_data: default False; if True and return_data==TRue the train and test data used in the final analysis will be returned for error analysis
do_impute: default 'True'; impute missing data
mrnForFilter: default []; filter data by mrn values
add_time: default False; use timeseries analyses
bin_ix: default []; list of binary features - will be determined if none provided
do_normalize: default True; normalize the data
binarize_diagnosis: default True; binarize any diagnosis features that are not already binary
get_char_tables: defaut False; save the Table 1 and 2 output to file
feature_info: default True; output model feature characteristics post analysis
subset: default np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False]); used to determine timeseries subset
"""
if modelType == 'lasso' or modelType == 'randomforest' or modelType == 'gradientboost' or modelType == 'lars':
iters = 10
model_weights_array = np.zeros((iters, x2.shape[1]), dtype=float)
auc_test_list=np.zeros((iters), dtype=float); r2testlist = np.zeros((iters), dtype=float);
randix_track = np.zeros((int(x2.shape[0]*0.9), iters))
ix_train_track = np.zeros((int(int(x2.shape[0]*0.9)*2/3), iters))
ix_test_track = np.zeros((int(x2.shape[0]*0.9)-int(int(x2.shape[0]*0.9)*2/3), iters))
for iteration in range(0, iters):
randix = list(range(0, x2.shape[0]))
random.shuffle(randix)
randix = randix[0:int(len(randix)*0.9)]
datax = x2[randix,:]; datay=y2[randix]; dataylabel = y2label[randix]; mrnx = mrns[randix]
(model, xtrain, ytrain, xtest, ytest, ytrainlabel, ytestlabel, auc_test, r2test, mrnstrain, mrnstest, ix_train, ix_test) = train_regression(datax, datay, dataylabel, percentile, modelType, feature_headers, mrnx)
model_weights_array[iteration, :] = model.coef_ if ((modelType == 'lasso') or (modelType == 'lars')) else model.feature_importances_
auc_test_list[iteration] = auc_test; r2testlist[iteration] = r2test
randix_track[:,iteration] = randix
ix_train_track[:,iteration] = ix_train
ix_test_track[:,iteration] = ix_test
model_weights = model_weights_array.mean(axis=0)
model_weights_std = model_weights_array.std(axis=0)
model_weights_conf_term = (1.96/np.sqrt(iters)) * model_weights_std
test_auc_mean = auc_test_list.mean()
test_auc_mean_ste = (1.96/np.sqrt(iters)) * auc_test_list.std()
r2test_mean = r2testlist.mean()
r2test_ste = (1.96/np.sqrt(iters)) * r2testlist.std()
if return_data_for_error_analysis == True:
print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))
print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))
print('lets analyse this')
return (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew, mrnstrain, mrnstest, mrns)
else:
(model, xtrain, ytrain, xtest, ytest, ytrainlabel, ytestlabel, auc_test, r2test, mrnstrain, mrnstest, ix_train, ix_test) = train_regression(x2, y2, y2label, percentile, modelType, feature_headers, mrnx)
model_weights_conf_term = np.zeros((x2.shape[1]), dtype=float)
test_auc_mean = auc_test; r2test_mean= r2test;
test_auc_mean_ste = 0; r2test_ste=0
print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))
print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))
if return_data_for_error_analysis == True:
print('lets analyse this')
return (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew, mrnstrain, mrnstest, mrns)
if modelType == 'mlp':
print ('you need to implement gradient to get top weights. ')
return (filterSTR, [])
sorted_ix = np.argsort(-1* abs(model_weights))
weights = model_weights[sorted_ix]
terms_sorted = model_weights_conf_term[sorted_ix]
factors = np.array(feature_headers)[sorted_ix]
x2_reordered = x2[:,sorted_ix]
xtest_reordered = xtest[:, sorted_ix]
ytestpred = model.predict(xtest)
fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, ytestpred)
operating_Thresholds = []
operating_levels = [0, 0.0001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
ix_level = 0
for ix, thr in enumerate(thresholds):
if fpr[ix] >= operating_levels[ix_level]:
operating_Thresholds.append(thr)
ix_level += 1
if ix_level == len(operating_levels):
break
operating_Thresholds = thresholds
report_metrics = 'Test set metrics:\n'
prec_list = []
recall_list = []
spec_list = []
for t in operating_Thresholds:
tp = ((ytestlabel > 0) & (ytestpred.ravel() > t)).sum()*1.0
tn = ((ytestlabel == 0) & (ytestpred.ravel() <= t)).sum()*1.0
fn = ((ytestlabel > 0) & (ytestpred.ravel() <= t)).sum()*1.0
fp = ((ytestlabel == 0) & (ytestpred.ravel() > t)).sum()*1.0
sens = tp / (tp + fn) if (tp + fn) != 0 else 0.0
spec = tn / (tn + fp) if (tn + fp) != 0 else 0.0
ppv = tp / (tp + fp) if (tp + fp) != 0 else 0.0
acc = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) != 0 else 0.0
f1 = 2*tp / (2*tp + fp + fn) if (2*tp + fp + fn) != 0 else 0.0
report_metrics += '@threshold:{0:4.3f}, sens:{1:4.3f}, spec:{2:4.3f}, ppv:{3:4.3f}, acc:{4:4.3f}, f1:{5:4.3f} total+:{6:4.3f}\n'.format(t, sens, spec, ppv, acc, f1, tp+fp)
prec_list.append(ppv)
recall_list.append(sens)
spec_list.append(spec)
print('total variables', x2.sum(axis=0).shape, ' and total subjects:', x2.shape[0])
print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))
print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))
print(report_metrics)
occurences = (x2 != 0).sum(axis=0)[sorted_ix]
zip_weights = {}
sig_headers = []
feature_categories = {}
for i in range(0, (abs(model_weights)>0).sum()):
fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, xtest_reordered[:,i].ravel())
feature_auc_indiv = metrics.auc(fpr, tpr)
corrs = corr_matrix_filtered[sorted_ix[i],:].ravel()
top_corr_ix = np.argsort(-1*abs(corrs))
corr_string = 'Correlated most with:\n'+' '.join( [str(corr_headers_filtered[top_corr_ix[j]])+ ':' + "{0:4.3f}\n".format(corrs[top_corr_ix[j]]) for j in range(0,10)] )
tp = ((y2label > 0) & (x2_reordered[:,i].ravel() > 0)).sum()*1.0
tn = ((y2label == 0) & (x2_reordered[:,i].ravel() <= 0)).sum()*1.0
fn = ((y2label > 0) & (x2_reordered[:,i].ravel() <= 0)).sum()*1.0
fp = ((y2label == 0) & (x2_reordered[:,i].ravel() > 0)).sum()*1.0
if fp*fn*tp*tn == 0:
oratio = np.nan
low_OR = np.nan
high_OR = np.nan
else:
oratio = tp*tn/(fp*fn)
se = np.sqrt(1/tp + 1/fp + 1/tn + 1/fn)
low_OR = np.exp(np.log(oratio) - 1.96 * se)
high_OR = np.exp(np.log(oratio) + 1.96 * se)
try:
feature_categories[factors[i].split(':')[0]] += weights[i]
except:
feature_categories[factors[i].split(':')[0]] = weights[i]
star = ' '
if (low_OR > 1 or high_OR < 1): #or (weights[i]+terms_sorted[i]) < 0 or (weights[i]-terms_sorted[i]) > 0
sig_headers.append(factors[i])
star = '*'
if feature_info:
print("{8} {3} | coef {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}] | OR_adj {9:4.3f} [{10:4.3f} {11:4.3f}] | occ: {4} | OR_unadj: {5:4.3f} [{6:4.3f} {7:4.3f}] | indivs AUC:{12:4.3f}".format(weights[i], weights[i]-terms_sorted[i], weights[i]+terms_sorted[i], factors[i], occurences[i], oratio, low_OR, high_OR, star, np.exp(weights[i]), np.exp(weights[i]-terms_sorted[i]), np.exp(weights[i]+terms_sorted[i]), feature_auc_indiv))
print(corr_string)
for k in feature_categories:
print (k, ":", feature_categories[k])
if return_data:
if return_data_transformed and return_train_test_data:
return (model, x2, y2, y2label, ix_filter, randix_track, ix_train_track, ix_test_track, feature_headers, xtrain, ytrain, ytrainlabel, mrnstrain, xtest, ytest, ytestlabel, mrnstest, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)
elif return_data_transformed and not return_train_test_data:
return (model, x2, y2, y2label, ix_filter, randix_track, ix_train_track, ix_test_track, feature_headers, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)
elif not return_data_transformed and return_train_test_data:
return (model, xtrain, ytrain, ytrainlabel, mrnstrain, xtest, ytest, ytestlabel, mrnstest, feature_headers, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)
else:
return (model, original_data[0], original_data[1], original_data[2], original_data[3], original_data[4], filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)
else:
return (feature_headers, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)
def train_model_for_bmi_parallel(x2, y2, y2label, feature_headers, mrns, corr_headers_filtered, corr_matrix_filtered, ix_corr_headers, test_ix=[], iters=10, modelType='lasso',regression=True, percentile=False, get_char_tables=False, feature_info=True, subset=np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False])):
"""
Train regression model for predicting obesity outcome. All subsetting of data should be performed with 'prepare_data_for_analysis()'
Returns: (model_list, randix_track, ix_train_track, ix_val_track, test_ix, results_arr, results_cols, feature_data, feature_data_cols,
auc_val_mean, auc_val_mean_ste, metric2_val_mean, metric2_val_mean_ste, metric3_val_mean, metric3_val_mean_ste,
auc_test_mean, auc_test_mean_ste, metric2_test_mean, metric2_test_mean_ste, metric3_test_mean, metric3_test_mean_ste)
NOTE: for regression == True metric2 is R^2 and metric3 is explained variance, and for
regression == False (classification) metric2 is accuracy and metric3 is the Matthews Correlation Coefficient
#### PARAMETERS ####
For the below features if not using set value to {}
x2: data array
y2: data to be predicted
y2label: obesity label for each child
feature_headers: list of features that matches the column space of x1
mrns: list of mrns that matches that corresponds to x1
test_ix: default []; list of indices to use for the test set. If not larger than 10% of the sample size, then a new set of 20% of N will be created.
iters: default 10; number of iterations in bootstrap cross validation
modelType: default 'lasso'
'lasso' - sklearn.linear_model.Lasso/LogisticRegression
'mlp' - sklearn.neural_network.MLPRegressor/Classifier -- NOT IMPLEMENTED
'randomforest' - sklearn.ensemble.RandomForestRegressor/Classifier
'temporalCNN' - cnn -- NOT IMPLEMENTED
'gradientboost' - sklearn.ensemble.GradientBoostingRegressor/Classifier
'lars' - sklearn.linear_model -- NOT VALID FOR CLASSIFICATION
regression: default True; Binary for classification or regression implementations
get_char_tables: defaut False; save the Table 1 and 2 output to file
feature_info: default True; output model feature characteristics post analysis
subset: default np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False]); used to determine timeseries
"""
if modelType == 'mlp':
print ('you need to implement gradient to get top weights. ')
return
if modelType == 'temporalCNN':
print('temporal CNN not implemented.')
return
if modelType == 'lars':
print('there is no LARS classifier')
return
if modelType == 'lasso' or modelType == 'randomforest' or modelType == 'gradientboost':
arguments = []
N = x2.shape[0]
if len(test_ix) > 0:
N_test = len(test_ix)
else:
N_test = int(N*0.2)
test_ix = list(range(0,N))
random.shuffle(test_ix)
test_ix = test_ix[:N_test]
N_subset = int(N_test*0.9)
train_size = int(N_subset*2/3)
val_size = N_subset - train_size
model_weights_array = np.zeros((iters, x2.shape[1]), dtype=float)
model_list = [''] * iters
# This will by the validation and test results for either the classification or regression algorithms
# classification: AUC, accuracy, Matthews Correlation Coefficient
# regression AUC, R^2, Explained Variance
results_cv = np.zeros((iters, 6), dtype=float)
randix_track = np.zeros((N_subset, iters), dtype=int)
ix_train_track = np.zeros((train_size, iters), dtype=int)
ix_val_track = np.zeros((val_size, iters), dtype=int)
node_count = max(2,min(math.ceil(multiprocessing.cpu_count()*0.8), multiprocessing.cpu_count()-1))
xtest = x2[test_ix,:]; ytest = y2[test_ix]; ytestlabel = y2label[test_ix]; mrnstest = mrns[test_ix]
for iteration in range(0, iters):
randix = [r for r in range(0, N) if r not in test_ix]
random.shuffle(randix)
randix = randix[0:N_subset]
datax = x2[randix,:]; datay=y2[randix]; dataylabel = y2label[randix]; mrnx = mrns[randix]
arguments.append([iteration, datax, xtest, datay, ytest, dataylabel, ytestlabel, percentile, modelType])
randix_track[:,iteration] = randix
single = train_regression_single if regression else train_classification_single
if iters > node_count:
num_batches = math.ceil(float(iters/node_count))
for i in range(num_batches):
sub_args = arguments[i*node_count:(i+1)*node_count] if i < num_batches-1 else arguments[i*node_count:]
nodes = node_count if i < num_batches-1 else len(arguments) - (i * node_count)
print('Running batch {0:d} of {1:d} with {2:d} nodes'.format(i+1, num_batches, nodes))
with Pool(node_count) as p:
outputs = p.map(single, sub_args)
for model, auc_val, auc_test, metric2_val, metric2_test, metric3_val, metric3_test, ix_train, ix_val, results, iteration in outputs:
ix_train_track[:,iteration] = ix_train
ix_val_track[:,iteration] = ix_val
results_cv[iteration,:] = [auc_val, auc_test, metric2_val, metric2_test, metric3_val, metric3_test]
model_weights_array[iteration,:] = model.coef_ if modelType in ('lasso','lars') else model.feature_importances_
model_list[iteration] = model
else:
with Pool(min(iters,node_count)) as p:
outputs = p.map(single, arguments)
for model, auc_val, auc_test, metric2_val, metric2_test, metric3_val, metric3_test, ix_train, ix_val, results, iteration in outputs:
ix_train_track[:,iteration] = ix_train
ix_val_track[:,iteration] = ix_val
results_cv[iteration,:] = [auc_val, auc_test, metric2_val, metric2_test, metric3_val, metric3_test]
model_weights_array[iteration,:] = model.coef_ if modelType in ('lasso','lars') else model.feature_importances_
model_list[iteration] = model
best = np.where(np.argsort(results_cv[:,1])==0)[0][0] # using best test AUC for producing model outputs
xtrain = x2[randix_track[:,best],:][ix_train_track[:,best],:]
ytrain = y2[randix_track[:,best]][ix_train_track[:,best]]
ytrain_label = y2label[randix_track[:,best]][ix_train_track[:,best]]
xval = x2[randix_track[:,best]][ix_val_track[:,best],:]
yval = y2[randix_track[:,best]][ix_val_track[:,best]]
yval_label = y2label[randix_track[:,best]][ix_val_track[:,best]]
model = model_list[best]
model_weights = model_weights_array.mean(axis=0)
model_weights_std = model_weights_array.std(axis=0)
model_weights_conf_term = (1.96/np.sqrt(iters)) * model_weights_std
auc_val_mean, auc_test_mean, metric2_val_mean, metric2_test_mean, metric3_val_mean, metric3_test_mean = results_cv.mean(axis=0)
auc_val_mean_ste, auc_test_mean_ste, metric2_val_mean_ste, metric2_test_mean_ste, metric3_val_mean_ste, metric3_test_mean_ste = results_cv.std(axis=0) * (1.96/np.sqrt(iters))
########################
#### REVISIT THIS PART - not essential currently as no other models implemented
else:
(model, xtrain, ytrain, xtest, ytest, ytrainlabel, ytestlabel, auc_test, acc_test, mrnstrain, mrnstest, ix_train, ix_test) = train_regression(x2, y2, y2label, percentile, modelType, feature_headers, mrnx)
model_weights_conf_term = np.zeros((x2.shape[1]), dtype=float)
test_auc_mean = auc_test
acc_test_mean= acc_test
test_auc_mean_ste = 0
acc_test_ste=0
print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f}, {2:4.3f}]'.format(auc_test_mean, auc_test_mean - auc_test_mean_ste, auc_test_mean + auc_test_mean_ste))
if regression:
print('->R^2 test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(metric2_test_mean, metric2_test_mean - metric2_test_mean_ste, metric2_test_mean + metric2_test_mean_ste))
print('->Explained Variance test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(metric3_test_mean, metric3_test_mean - metric3_test_mean_ste, metric3_test_mean + metric3_test_mean_ste))
else:
print('->Accuracy test: {0:4.3f} 95% CI: [{1:4.3f}, {2:4.3f}]'.format(metric2_test_mean, metric2_test_mean - metric2_test_mean_ste, metric2_test_mean + metric2_test_mean_ste))
print('->MCC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(metric3_test_mean, metric3_test_mean - metric3_test_mean_ste, metric3_test_mean + metric3_test_mean_ste))
########################
sorted_ix = np.argsort(-1 * abs(model_weights))
weights = model_weights[sorted_ix]
terms_sorted = model_weights_conf_term[sorted_ix]
factors = np.array(feature_headers)[sorted_ix]
x2_reordered = x2[:,sorted_ix]
xtrain_reordered = xtrain[:,sorted_ix]
xtest_reordered = xtest[:,sorted_ix]
ytestpred = model.predict(xtest) if regression else model.predict_proba(xtest)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, ytestpred)
operating_Thresholds = []
operating_levels = [0, 0.0001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 0.99, 0.999, 1]
ix_level = 0
for ix, thr in enumerate(thresholds):
if fpr[ix] >= operating_levels[ix_level]:
operating_Thresholds.append(thr)
ix_level += 1
if ix_level == len(operating_levels):
break
operating_Thresholds = np.array(thresholds)
N = operating_Thresholds.reshape(-1,1).shape[0]
TP = np.zeros(N)
TN = np.zeros(N)
FN = np.zeros(N)
FP =
|
np.zeros(N)
|
numpy.zeros
|
import numpy as np
import tensorflow as tf
from collections import defaultdict
class Greedy_Tracker(object):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
self.network_type = cfg_tracker.network_type
self.cls_thr = cfg_tracker.nn_gating_thr
self.det_ratio_thr = cfg_tracker.det_ratio
self.N_miss_max = cfg_tracker.N_miss_max
self.img_height = cfg_tracker.IMAGE_HEIGHT
self.img_width = cfg_tracker.IMAGE_WIDTH
self.all_tracks = defaultdict(lambda: defaultdict(defaultdict))
self.track_num = 0
self.model_info = {}
self.model_info['app_hidden_dim'] = cfg_train.APP_HIDDEN_DIM
self.model_info['mot_hidden_dim'] = cfg_train.MOT_HIDDEN_DIM
self.model_info['mot_input_dim'] = cfg_train.MOT_INPUT_DIM
self.result = []
self.cfg_train = cfg_train
self.cfg_tracker = cfg_tracker
self.sess = session
self.tf_ops = tf_ops
self.tf_plh = tf_placeholders
self.neg_mem_indices = self.precompute_neg_mem_indices()
def precompute_neg_mem_indices(self):
# get indices for online negative examples (i.e. other tracks in the scene) for each track
# NOTE: need to be set again when the code is used for tracking more objects
max_track_num = 200
max_det_num = 200
neg_mem_ind = np.zeros((max_track_num, max_det_num, max_track_num-1, 2))
for i in range(100):
for j in range(100):
xy_ind_tmp = np.zeros((max_track_num - 1, 2))
x_ind_tmp = np.arange(max_track_num, dtype=np.int32)
xy_ind_tmp[:, 0] = x_ind_tmp[x_ind_tmp != i]
xy_ind_tmp[:, 1] = j
neg_mem_ind[i, j, :, :] = xy_ind_tmp
return neg_mem_ind
def build_neg_mem_indices(self, track_num, det_num):
if track_num > 1:
neg_mem_inds = self.neg_mem_indices[:track_num, :det_num, :(track_num-1), :]
elif track_num == 1:
neg_mem_inds = None
else:
raise NotImplementedError
return neg_mem_inds
def get_lstm_states(self, h_np, c_np, cur_detbb_num, is_track_state):
h_np = np.reshape(h_np, (cur_detbb_num, cur_detbb_num, -1))
c_np = np.reshape(c_np, (cur_detbb_num, cur_detbb_num, -1))
if is_track_state == True:
h_np = np.transpose(h_np, (1, 0, 2))
c_np = np.transpose(c_np, (1, 0, 2))
# loop can be commented out later to improve processing time
# check lstm states
h_np = np.reshape(h_np , (cur_detbb_num * cur_detbb_num, -1))
for kkk in range(1, cur_detbb_num):
assert(np.array_equal(h_np[kkk*cur_detbb_num:(kkk+1)*cur_detbb_num, :], \
h_np[:cur_detbb_num, :]))
h_np = h_np[:cur_detbb_num, :]
# check lstm states
c_np = np.reshape(c_np , (cur_detbb_num * cur_detbb_num, -1))
for kkk in range(1, cur_detbb_num):
assert(np.array_equal(c_np[kkk*cur_detbb_num:(kkk+1)*cur_detbb_num, :], \
c_np[:cur_detbb_num, :]))
c_np = c_np[:cur_detbb_num, :]
return (h_np, c_np)
def get_lstm_states_new(self, h_np, c_np, cur_detbb_num):
h_np = np.reshape(h_np, (cur_detbb_num, -1))
c_np = np.reshape(c_np, (cur_detbb_num, -1))
h_np = h_np[:cur_detbb_num, :]
c_np = c_np[:cur_detbb_num, :]
return (h_np, c_np)
def get_lstm_states_for_matched_tracks(self, matching, model_dim, h_np, c_np, trk_num, det_num):
inds_sel1 = []
track_i_sel = []
# select lstm states for matched tracks
if len(matching) > 0:
h_np_tmp = np.zeros((len(matching), model_dim))
c_np_tmp = np.zeros((len(matching), 2 * model_dim))
h_np = np.reshape(h_np, (trk_num, det_num, -1))
c_np = np.reshape(c_np, (trk_num, det_num, -1))
for kkk in range(0, len(matching)):
track_i = int(matching[kkk][0, 0])
detbb_i = int(matching[kkk][0, 1])
h_np_tmp[kkk, :] = h_np[track_i, detbb_i, :]
c_np_tmp[kkk, :] = c_np[track_i, detbb_i, :]
inds_sel1.append(detbb_i)
track_i_sel.append(track_i)
h_np = h_np_tmp
c_np = c_np_tmp
else:
h_np = []
c_np = []
return (h_np, c_np, inds_sel1, track_i_sel)
def precompute_app_features(self, imgs, bbs):
cur_detbb_num = np.shape(imgs)[0]
assert(cur_detbb_num == np.shape(bbs)[0])
feed_dict = {
self.tf_plh['detbb_num']: cur_detbb_num,
self.tf_plh['images']:imgs,
self.tf_plh['is_training']: False,
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['valid_app_data']: np.ones((cur_detbb_num, 1, 1), dtype=np.int32),
self.tf_plh['indices_for_mapping']: np.reshape(np.arange(cur_detbb_num * 1, dtype=np.int32), (-1, 1)),
self.tf_plh['image_batch_shape']: np.array([cur_detbb_num * 1, self.cfg_train.APP_LAYER_DIM])
}
app_embed_np = self.sess.run(self.tf_ops['app_embed'], feed_dict=feed_dict)
return app_embed_np
def initialize_tracks(
self,
h,
c,
memory,
bbs,
bbs_norm,
det_ids,
frame,
hidden_dim,
is_dummy,
network
):
h = np.reshape(h, (-1, hidden_dim))
if network == 'app_blstm':
assert(np.shape(memory)[0] == np.shape(h)[0])
assert(np.shape(memory)[0] == np.shape(c)[0])
assert(np.array_equal(h, c[:, hidden_dim:]))
assert(np.shape(h)[0] == np.shape(c)[0])
if is_dummy == False:
for i in range(0, np.shape(h)[0]):
self.track_num += 1
# 1 x d
self.all_tracks[self.track_num]['h_states'] = h[i, :]
# 1 x d
self.all_tracks[self.track_num]['c_states'] = c[i, :]
self.all_tracks[self.track_num]['real_det_num'] = 1
self.all_tracks[self.track_num]['miss_det_num'] = 0
self.all_tracks[self.track_num]['last_miss_det_num'] = 0
self.all_tracks[self.track_num]['bb'] = bbs[det_ids[i], :]
self.all_tracks[self.track_num]['bb_norm'] = bbs_norm[det_ids[i], :]
self.all_tracks[self.track_num]['frame'] = frame
self.all_tracks[self.track_num]['th'] = [self.cls_thr]
if network == 'app_blstm':
# 1 x 1 x d
self.all_tracks[self.track_num]['mem'] = memory[i, :, :]
self.result.append((frame, det_ids[i], 1.0, self.track_num))
elif is_dummy == True:
ct = -1
for i in range(0, np.shape(memory)[0]):
ct -= 1
# 1 x d
self.all_tracks[ct]['h_states'] = h[i, :]
# 1 x d
self.all_tracks[ct]['c_states'] = c[i, :]
self.all_tracks[ct]['real_det_num'] = 1
self.all_tracks[ct]['miss_det_num'] = 0
self.all_tracks[ct]['last_miss_det_num'] = 0
self.all_tracks[ct]['bb'] = bbs[det_ids[i], :]
self.all_tracks[ct]['bb_norm'] = bbs_norm[det_ids[i], :]
self.all_tracks[ct]['frame'] = frame
self.all_tracks[ct]['th'] = [self.cls_thr]
if network == 'app_blstm':
# 1 x 1 x d
self.all_tracks[ct]['mem'] = memory[i, :, :]
else:
raise NotImplementedError
def delete_dummy_tracks(self, frame):
for i in self.all_tracks.keys():
if i < 0:
del self.all_tracks[i]
for i in self.all_tracks.keys():
assert(i > 0)
def update_tracks(
self,
h,
c,
memory,
bbs,
bbs_norm,
track_ids,
matching,
matching_score,
frame,
hidden_dim,
network,
missdet_tracks
):
h = np.reshape(h, (-1, hidden_dim))
if np.shape(c)[0] != 0:
if network == 'app_blstm':
assert((np.shape(memory)[0] == np.shape(h)[0]))
assert((np.shape(memory)[0] == np.shape(c)[0]))
assert(np.array_equal(h, c[:, hidden_dim:]))
assert(len(matching) == len(matching_score))
track_ids_sel1 = []
for i in range(0, len(matching)):
track_i = int(matching[i][0, 0])
detbb_i = int(matching[i][0, 1])
if network == 'app_blstm':
self.all_tracks[track_ids[track_i]]['mem'] = memory[i, :, :]
self.all_tracks[track_ids[track_i]]['h_states'] = h[i, :]
self.all_tracks[track_ids[track_i]]['c_states'] = c[i, :]
self.all_tracks[track_ids[track_i]]['real_det_num'] += 1
self.all_tracks[track_ids[track_i]]['last_miss_det_num'] = 0
self.all_tracks[track_ids[track_i]]['bb'] = bbs[detbb_i, :]
self.all_tracks[track_ids[track_i]]['bb_norm'] = bbs_norm[detbb_i, :]
self.all_tracks[track_ids[track_i]]['frame'] = frame
self.all_tracks[track_ids[track_i]]['th'] = self.all_tracks[track_ids[track_i]]['th'] \
+ [matching_score[i]]
self.result.append((frame, detbb_i, 1.0, track_ids[track_i]))
track_ids_sel1.append(track_ids[track_i])
# update non matched tracks with dummy detections
track_ids_sel2 = np.setdiff1d(track_ids, track_ids_sel1)
if network == 'mot_lstm' and len(track_ids_sel2) > 0:
assert(np.array_equal(track_ids_sel2, missdet_tracks['track_ids']))
for i in range(0, len(track_ids_sel2)):
# skip dummy track
if track_ids_sel2[i] < 0:
continue
self.all_tracks[track_ids_sel2[i]]['miss_det_num'] += 1
self.all_tracks[track_ids_sel2[i]]['last_miss_det_num'] += 1
self.result.append((frame, None, None, track_ids_sel2[i]))
if network == 'mot_lstm' and len(track_ids_sel2) > 0:
self.all_tracks[track_ids_sel2[i]]['h_states'] = missdet_tracks['h_states'][i, :]
self.all_tracks[track_ids_sel2[i]]['c_states'] = missdet_tracks['c_states'][i, :]
assert(track_ids_sel2[i] == missdet_tracks['track_ids'][i])
def compute_iou(self, bb_p, bb_n):
bb_px_min = bb_p[0]
bb_py_min = bb_p[1]
bb_pw = bb_p[2]
bb_ph = bb_p[3]
bb_px_max = bb_px_min + bb_pw
bb_py_max = bb_py_min + bb_ph
bb_nx_min = bb_n[0]
bb_ny_min = bb_n[1]
bb_nw = bb_n[2]
bb_nh = bb_n[3]
bb_nx_max = bb_nx_min + bb_nw
bb_ny_max = bb_ny_min + bb_nh
bb_p_area = (bb_px_max - bb_px_min)*(bb_py_max - bb_py_min)
bb_n_area = (bb_nx_max - bb_nx_min)*(bb_ny_max - bb_ny_min)
x1 = np.maximum(bb_px_min, bb_nx_min)
y1 = np.maximum(bb_py_min, bb_ny_min)
x2 = np.minimum(bb_px_max, bb_nx_max)
y2 = np.minimum(bb_py_max, bb_ny_max)
w = np.maximum(0.0, x2 - x1)
h = np.maximum(0.0, y2 - y1)
intersection = np.multiply(w, h)
union = np.add(bb_p_area, bb_n_area) - intersection
IoU = np.divide(intersection, union)
return IoU
def solve_greedy_matching(self, softmax, m_states, track_num, detbb_num, track_ids, bbs, frame):
col1 = np.arange(track_num)
col2 = np.arange(detbb_num)
col1 = np.expand_dims(col1, axis=1)
col2 = np.expand_dims(col2, axis=0)
col1 = np.reshape(np.tile(col1, (1, detbb_num)), (-1, 1))
col2 = np.reshape(np.tile(col2, (track_num, 1)), (-1, 1))
track_detbb_pair_ind = np.concatenate((col1, col2), axis=1)
assert(np.shape(track_detbb_pair_ind)[0] == track_num * detbb_num)
motion_gating_mask = np.ones((track_num, detbb_num, 1))
if self.cfg_tracker.IS_NAIVE_GATING_ON == True:
for i in range(0, track_num):
bb_p = self.all_tracks[track_ids[i]]['bb']
bb_n = bbs
if track_ids[i] < 0:
motion_gating_mask[i, :, 0] = 0
else:
fr_diff = (frame - self.all_tracks[track_ids[i]]['frame'])
motion_gating_mask[i, :, 0] = self.naive_motion_gating(bb_p, bb_n, fr_diff)
motion_gating_mask = np.reshape(motion_gating_mask, (track_num * detbb_num, 1))
# (N1 * N2) x 1
softmax_pos = softmax[:, 1]
softmax_pos = np.reshape(softmax_pos, (-1, 1))
softmax_pos_org = softmax_pos
softmax_pos = np.multiply(softmax_pos, motion_gating_mask)
matching = []
matching_score = []
while True:
max_p = np.amax(softmax_pos, axis=0)
max_i = np.argmax(softmax_pos, axis=0)
assert(softmax_pos[max_i] == max_p)
assert(np.shape(softmax_pos)[0] == np.shape(track_detbb_pair_ind)[0])
if max_p > self.cls_thr:
matching.append(track_detbb_pair_ind[max_i, :])
matching_score.append(softmax_pos_org[max_i])
del_ind1 = track_detbb_pair_ind[:, 1] == track_detbb_pair_ind[max_i, 1]
del_ind2 = track_detbb_pair_ind[:, 0] == track_detbb_pair_ind[max_i, 0]
del_ind = np.where(np.logical_or(del_ind1, del_ind2))[0]
track_detbb_pair_ind_tmp = np.delete(track_detbb_pair_ind, del_ind, axis=0)
softmax_pos = np.delete(softmax_pos, del_ind, axis=0)
softmax_pos_org = np.delete(softmax_pos_org, del_ind, axis=0)
assert(len(np.where(track_detbb_pair_ind_tmp[:, 1] == track_detbb_pair_ind[max_i, 1])[0]) == 0)
assert(len(np.where(track_detbb_pair_ind_tmp[:, 0] == track_detbb_pair_ind[max_i, 0])[0]) == 0)
track_detbb_pair_ind = track_detbb_pair_ind_tmp
# out of the loop when there is no good match left
else:
break
# out of the loop when all detections are taken
if np.shape(track_detbb_pair_ind)[0] == 0:
break
return (matching, matching_score)
def pick_imgs(self, imgs, imgs_inds):
imgs_sel = np.zeros((len(imgs_inds), self.img_height, self.img_width, 3))
for i in range(0, len(imgs_inds)):
imgs_sel[i, :, :, :] = imgs[imgs_inds[i], :, :, :]
return imgs_sel
def pick_dets(self, dets, dets_inds):
dets_sel = np.zeros((len(dets_inds), self.model_info['mot_input_dim']))
for i in range(0, len(dets_inds)):
dets_sel[i, :] = dets[dets_inds[i], :]
return dets_sel
def get_gating_result(self, x_diff, y_diff, w_diff, h_diff, gating_factor):
# NOTE: These parameters are tuned for the MOT Challenge datasets.
x_diff_th = 3.5
y_diff_th = 2.0
w_diff_th = 1.8
h_diff_th = 1.8
return np.logical_and(np.logical_and(x_diff < x_diff_th, y_diff < y_diff_th),
np.logical_and(w_diff < w_diff_th, h_diff < h_diff_th))
def naive_motion_gating(self, bb_p, bb_n, gating_factor):
bb_px = bb_p[0]
bb_py = bb_p[1]
bb_pw = bb_p[2]
bb_ph = bb_p[3]
bb_nx = bb_n[:, 0]
bb_ny = bb_n[:, 1]
bb_nw = bb_n[:, 2]
bb_nh = bb_n[:, 3]
x_diff = np.divide(np.abs(bb_px - bb_nx), bb_pw)
y_diff = np.divide(np.abs(bb_py - bb_ny), bb_ph)
w_diff = np.maximum(np.divide(bb_pw, bb_nw), np.divide(bb_nw, bb_pw))
h_diff = np.maximum(np.divide(bb_ph, bb_nh), np.divide(bb_nh, bb_ph))
return self.get_gating_result(x_diff, y_diff, w_diff, h_diff, gating_factor)
def get_result(self):
return self.result
class Greedy_Tracker_APP_BLSTM(Greedy_Tracker):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
super(Greedy_Tracker_APP_BLSTM, self).__init__(cfg_tracker, cfg_train, tf_ops, tf_placeholders, session)
def run(self, bbs, bbs_norm, imgs, frame_num):
# first frame
if len(self.all_tracks.keys()) == 0 and imgs is not None:
mem_np = self.initialize_track_mems(imgs, bbs)
h_np, c_np, memory_np = mem_np
cur_detbb_num = np.shape(imgs)[0]
self.initialize_tracks(
h_np,
c_np,
memory_np,
bbs,
bbs_norm,
np.array(range(cur_detbb_num)),
frame_num,
self.model_info['app_hidden_dim'],
is_dummy=False,
network='app_blstm'
)
elif len(self.all_tracks.keys()) != 0:
bookkeeping = {}
self.data_association(imgs, bbs, bbs_norm, frame_num, bookkeeping)
self.update_existing_tracks(bbs, bbs_norm, frame_num, bookkeeping)
self.start_new_tracks(imgs, bbs, bbs_norm, frame_num, bookkeeping)
def initialize_track_mems(self, imgs, bbs):
cur_detbb_num = np.shape(imgs)[0]
assert(cur_detbb_num ==
|
np.shape(bbs)
|
numpy.shape
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.