prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
BezierGAN for capturing the airfoil manifold
Author(s): <NAME> (<EMAIL>)
"""
import numpy as np
import tensorflow as tf
#from shape_plot import plot_grid
from surrogate.surrogate_model import Model as SM
EPSILON = 1e-7
class BezierGAN(object):
def __init__(self, latent_dim=5, noise_dim=100, n_points=64, bezier_degree=16, bounds=(0.0, 1.0),
lambda0=1., lambda1=0.01):
self.latent_dim = latent_dim
self.noise_dim = noise_dim
self.n_points = n_points
self.X_shape = (n_points, 2, 1)
self.bezier_degree = bezier_degree
self.bounds = bounds
self.lambda0 = lambda0
self.lambda1 = lambda1
def generator(self, c, z, reuse=tf.AUTO_REUSE, training=True):
depth_cpw = 32*8
dim_cpw = int((self.bezier_degree+1)/8)
kernel_size = (4,3)
# noise_std = 0.01
with tf.variable_scope('Generator', reuse=reuse):
if self.noise_dim == 0:
cz = c
else:
cz = tf.concat([c, z], axis=-1)
cpw = tf.layers.dense(cz, 1024)
cpw = tf.layers.batch_normalization(cpw, momentum=0.9)#, training=training)
cpw = tf.nn.leaky_relu(cpw, alpha=0.2)
cpw = tf.layers.dense(cpw, dim_cpw*3*depth_cpw)
cpw = tf.layers.batch_normalization(cpw, momentum=0.9)#, training=training)
cpw = tf.nn.leaky_relu(cpw, alpha=0.2)
cpw = tf.reshape(cpw, (-1, dim_cpw, 3, depth_cpw))
cpw = tf.layers.conv2d_transpose(cpw, int(depth_cpw/2), kernel_size, strides=(2,1), padding='same')
cpw = tf.layers.batch_normalization(cpw, momentum=0.9)#, training=training)
cpw = tf.nn.leaky_relu(cpw, alpha=0.2)
# cpw += tf.random_normal(shape=tf.shape(cpw), stddev=noise_std)
cpw = tf.layers.conv2d_transpose(cpw, int(depth_cpw/4), kernel_size, strides=(2,1), padding='same')
cpw = tf.layers.batch_normalization(cpw, momentum=0.9)#, training=training)
cpw = tf.nn.leaky_relu(cpw, alpha=0.2)
# cpw += tf.random_normal(shape=tf.shape(cpw), stddev=noise_std)
cpw = tf.layers.conv2d_transpose(cpw, int(depth_cpw/8), kernel_size, strides=(2,1), padding='same')
cpw = tf.layers.batch_normalization(cpw, momentum=0.9)#, training=training)
cpw = tf.nn.leaky_relu(cpw, alpha=0.2)
# cpw += tf.random_normal(shape=tf.shape(cpw), stddev=noise_std)
# Control points
cp = tf.layers.conv2d(cpw, 1, (1,2), padding='valid') # batch_size x (bezier_degree+1) x 2 x 1
cp = tf.nn.tanh(cp)
cp = tf.squeeze(cp, axis=-1, name='control_point') # batch_size x (bezier_degree+1) x 2
# Weights
w = tf.layers.conv2d(cpw, 1, (1,3), padding='valid')
w = tf.nn.sigmoid(w) # batch_size x (bezier_degree+1) x 1 x 1
w = tf.squeeze(w, axis=-1, name='weight') # batch_size x (bezier_degree+1) x 1
# Parameters at data points
db = tf.layers.dense(cz, 1024)
db = tf.layers.batch_normalization(db, momentum=0.9)#, training=training)
db = tf.nn.leaky_relu(db, alpha=0.2)
db = tf.layers.dense(db, 256)
db = tf.layers.batch_normalization(db, momentum=0.9)#, training=training)
db = tf.nn.leaky_relu(db, alpha=0.2)
db = tf.layers.dense(db, self.X_shape[0]-1)
db = tf.nn.softmax(db) # batch_size x (n_data_points-1)
# db = tf.random_gamma([tf.shape(cz)[0], self.X_shape[0]-1], alpha=100, beta=100)
# db = tf.nn.softmax(db) # batch_size x (n_data_points-1)
ub = tf.pad(db, [[0,0],[1,0]], constant_values=0) # batch_size x n_data_points
ub = tf.cumsum(ub, axis=1)
ub = tf.minimum(ub, 1)
ub = tf.expand_dims(ub, axis=-1) # 1 x n_data_points x 1
# Bezier layer
# Compute values of basis functions at data points
num_control_points = self.bezier_degree + 1
lbs = tf.tile(ub, [1, 1, num_control_points]) # batch_size x n_data_points x n_control_points
pw1 = tf.range(0, num_control_points, dtype=tf.float32)
pw1 = tf.reshape(pw1, [1, 1, -1]) # 1 x 1 x n_control_points
pw2 = tf.reverse(pw1, axis=[-1])
lbs = tf.add(tf.multiply(pw1, tf.log(lbs+EPSILON)), tf.multiply(pw2, tf.log(1-lbs+EPSILON))) # batch_size x n_data_points x n_control_points
lc = tf.add(tf.lgamma(pw1+1), tf.lgamma(pw2+1))
lc = tf.subtract(tf.lgamma(tf.cast(num_control_points, dtype=tf.float32)), lc) # 1 x 1 x n_control_points
lbs = tf.add(lbs, lc) # batch_size x n_data_points x n_control_points
bs = tf.exp(lbs)
# Compute data points
cp_w = tf.multiply(cp, w)
dp = tf.matmul(bs, cp_w) # batch_size x n_data_points x 2
bs_w = tf.matmul(bs, w) # batch_size x n_data_points x 1
dp = tf.div(dp, bs_w) # batch_size x n_data_points x 2
dp = tf.expand_dims(dp, axis=-1, name='fake_image') # batch_size x n_data_points x 2 x 1
return dp, cp, w, ub, db
def discriminator(self, x, reuse=tf.AUTO_REUSE, training=True):
depth = 64
dropout = 0.4
kernel_size = (4,2)
with tf.variable_scope('Discriminator', reuse=reuse):
x = tf.layers.conv2d(x, depth*1, kernel_size, strides=(2,1), padding='same')
x = tf.layers.batch_normalization(x, momentum=0.9)#, training=training)
x = tf.nn.leaky_relu(x, alpha=0.2)
x = tf.layers.dropout(x, dropout, training=training)
x = tf.layers.conv2d(x, depth*2, kernel_size, strides=(2,1), padding='same')
x = tf.layers.batch_normalization(x, momentum=0.9)#, training=training)
x = tf.nn.leaky_relu(x, alpha=0.2)
x = tf.layers.dropout(x, dropout, training=training)
x = tf.layers.conv2d(x, depth*4, kernel_size, strides=(2,1), padding='same')
x = tf.layers.batch_normalization(x, momentum=0.9)#, training=training)
x = tf.nn.leaky_relu(x, alpha=0.2)
x = tf.layers.dropout(x, dropout, training=training)
x = tf.layers.conv2d(x, depth*8, kernel_size, strides=(2,1), padding='same')
x = tf.layers.batch_normalization(x, momentum=0.9)#, training=training)
x = tf.nn.leaky_relu(x, alpha=0.2)
x = tf.layers.dropout(x, dropout, training=training)
x = tf.layers.conv2d(x, depth*16, kernel_size, strides=(2,1), padding='same')
x = tf.layers.batch_normalization(x, momentum=0.9)#, training=training)
x = tf.nn.leaky_relu(x, alpha=0.2)
x = tf.layers.dropout(x, dropout, training=training)
x = tf.layers.conv2d(x, depth*32, kernel_size, strides=(2,1), padding='same')
x = tf.layers.batch_normalization(x, momentum=0.9)#, training=training)
x = tf.nn.leaky_relu(x, alpha=0.2)
x = tf.layers.dropout(x, dropout, training=training)
x = tf.layers.flatten(x)
x = tf.layers.dense(x, 1024)
x = tf.layers.batch_normalization(x, momentum=0.9)#, training=training)
x = tf.nn.leaky_relu(x, alpha=0.2)
d = tf.layers.dense(x, 1)
q = tf.layers.dense(x, 128)
# q = tf.layers.batch_normalization(q, momentum=0.9)#, training=training)
q = tf.nn.leaky_relu(q, alpha=0.2)
q_mean = tf.layers.dense(q, self.latent_dim)
q_logstd = tf.layers.dense(q, self.latent_dim)
q_logstd = tf.maximum(q_logstd, -16)
# Reshape to batch_size x 1 x latent_dim
q_mean = tf.reshape(q_mean, (-1, 1, self.latent_dim))
q_logstd = tf.reshape(q_logstd, (-1, 1, self.latent_dim))
q = tf.concat([q_mean, q_logstd], axis=1, name='predicted_latent') # batch_size x 2 x latent_dim
return d, q
def compute_diversity_loss(self, x, y):
x = tf.layers.flatten(x)
y = tf.squeeze(y)
r = tf.reduce_sum(tf.square(x), axis=1, keepdims=True)
D = r - 2*tf.matmul(x, tf.transpose(x)) + tf.transpose(r)
S = tf.exp(-0.5*tf.square(D)) # similarity matrix (rbf)
# S = 1/(1+D)
if self.lambda0 == 'naive':
eig_val, _ = tf.self_adjoint_eig(S)
loss = -tf.reduce_mean(tf.log(tf.maximum(eig_val, EPSILON)))-10*tf.reduce_mean(y)
Q = None
L = None
else:
Q = tf.tensordot(tf.expand_dims(y, 1), tf.expand_dims(y, 0), 1) # quality matrix
if self.lambda0 == 0.:
L = S
else:
L = S * tf.pow(Q, self.lambda0)
eig_val, _ = tf.self_adjoint_eig(L)
loss = -tf.reduce_mean(tf.log(tf.maximum(eig_val, EPSILON)))
return loss, D, S, Q, L
def train(self, X_train, train_steps=2000, batch_size=32, disc_lr=2e-4, gen_lr=2e-4, save_interval=0,
directory='.', surrogate_dir='.'):
X_train = np.expand_dims(X_train, axis=-1).astype(np.float32)
# Inputs
self.x = tf.placeholder(tf.float32, shape=(None,)+self.X_shape, name='real_image')
self.c = tf.placeholder(tf.float32, shape=[None, self.latent_dim], name='latent_code')
self.z = tf.placeholder(tf.float32, shape=[None, self.noise_dim], name='noise')
# Targets
q_target = tf.placeholder(tf.float32, shape=[None, self.latent_dim])
# Outputs
d_real, _ = self.discriminator(self.x)
x_fake_train, cp_train, w_train, ub_train, db_train = self.generator(self.c, self.z)
d_fake, q_fake_train = self.discriminator(x_fake_train)
self.x_fake_test, self.cp, self.w, ub, db = self.generator(self.c, self.z, training=False)
# Schedule for lambda1
p = tf.cast(5, tf.float32)
G_global_step = tf.Variable(0, name='G_global_step', trainable=False)
lambda1 = self.lambda1 * tf.cast(G_global_step/(train_steps-1), tf.float32)**p
disc_lr = tf.train.exponential_decay(disc_lr, G_global_step, 1000, 0.8, staircase=True)
gen_lr = tf.train.exponential_decay(gen_lr, G_global_step, 1000, 0.8, staircase=True)
# Combine with the surrogate model graph
with tf.Session() as sess:
surrogate_model = SM(sess, self.n_points)
surrogate_graph = surrogate_model.restore(directory=surrogate_dir)
output_node_names = 'net/y'
frozen_graph = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
surrogate_graph.as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
graph = tf.get_default_graph()
tf.graph_util.import_graph_def(frozen_graph,
input_map={'x:0': x_fake_train, 'training:0': False},
name='surrogate')
y = graph.get_tensor_by_name('surrogate/net/y:0')
mean_y = tf.reduce_mean(y)
# Losses
# Cross entropy losses for D
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real, labels=tf.ones_like(d_real)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=tf.zeros_like(d_fake)))
# Cross entropy losses for G
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=tf.ones_like(d_fake)))
dpp_loss, D, S, Q, L = self.compute_diversity_loss(x_fake_train, y*d_fake)
g_dpp_loss = g_loss + lambda1 * dpp_loss
# Regularization for w, cp, a, and b
r_w_loss = tf.reduce_mean(w_train[:,1:-1], axis=[1,2])
cp_dist = tf.norm(cp_train[:,1:]-cp_train[:,:-1], axis=-1)
r_cp_loss = tf.reduce_mean(cp_dist, axis=-1)
r_cp_loss1 = tf.reduce_max(cp_dist, axis=-1)
ends = cp_train[:,0] - cp_train[:,-1]
r_ends_loss = tf.norm(ends, axis=-1) + tf.maximum(0.0, -10*ends[:,1])
r_db_loss = tf.reduce_mean(db_train*tf.log(db_train), axis=-1)
r_loss = r_w_loss + r_cp_loss + 0*r_cp_loss1 + r_ends_loss + 0*r_db_loss
r_loss = tf.reduce_mean(r_loss)
# Gaussian loss for Q
q_mean = q_fake_train[:, 0, :]
q_logstd = q_fake_train[:, 1, :]
epsilon = (q_target - q_mean) / (tf.exp(q_logstd) + EPSILON)
q_loss = q_logstd + 0.5 * tf.square(epsilon)
q_loss = tf.reduce_mean(q_loss)
# Optimizers
d_optimizer = tf.train.AdamOptimizer(learning_rate=disc_lr, beta1=0.5)
g_optimizer = tf.train.AdamOptimizer(learning_rate=gen_lr, beta1=0.5)
# Generator variables
gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Generator')
# Discriminator variables
dis_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Discriminator')
# Training operations
d_train_real = d_optimizer.minimize(d_loss_real, var_list=dis_vars)
d_train_fake = d_optimizer.minimize(d_loss_fake + q_loss, var_list=dis_vars)
g_train = g_optimizer.minimize(g_dpp_loss + 10*r_loss + q_loss, var_list=gen_vars, global_step=G_global_step)
# for v in tf.trainable_variables():
# print(v.name)
# for v in dis_vars:
# print(v.name)
# for v in gen_vars:
# print(v.name)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# # Create summaries to monitor losses
# tf.summary.scalar('D_loss_for_real', d_loss_real)
# tf.summary.scalar('D_loss_for_fake', d_loss_fake)
# tf.summary.scalar('G_loss', g_loss)
# tf.summary.scalar('DPP_loss', dpp_loss)
# tf.summary.scalar('R_loss', r_loss)
# tf.summary.scalar('Q_loss', q_loss)
# # Merge all summaries into a single op
# merged_summary_op = tf.summary.merge_all()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Start training
self.sess = tf.Session()
# Run the initializer
self.sess.run(init)
# # op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter('{}/logs'.format(directory), graph=self.sess.graph)
for t in range(train_steps):
ind = np.random.choice(X_train.shape[0], size=batch_size, replace=False)
X_real = X_train[ind]
# y_batch = self.sess.run(y, feed_dict={x_fake_train: X_real})
# print('************************************ y before update ************************************')
# print(y_batch)
_, dlr = self.sess.run([d_train_real, d_loss_real], feed_dict={self.x: X_real})
latent = np.random.uniform(low=self.bounds[0], high=self.bounds[1], size=(batch_size, self.latent_dim))
noise = np.random.normal(scale=0.5, size=(batch_size, self.noise_dim))
X_fake = self.sess.run(self.x_fake_test, feed_dict={self.c: latent, self.z: noise})
if np.any(np.isnan(X_fake)):
ind = np.any(np.isnan(X_fake), axis=(1,2,3))
print(self.sess.run(ub, feed_dict={self.c: latent, self.z: noise})[ind])
assert not np.any(np.isnan(X_fake))
_, dlf, qdl, lrd = self.sess.run([d_train_fake, d_loss_fake, q_loss, disc_lr],
feed_dict={x_fake_train: X_fake, q_target: latent})
latent = np.random.uniform(low=self.bounds[0], high=self.bounds[1], size=(batch_size, self.latent_dim))
noise = np.random.normal(scale=0.5, size=(batch_size, self.noise_dim))
_, gl, dppl, rl, qgl, my, lbd1, lrg = self.sess.run([g_train, g_loss, dpp_loss, r_loss, q_loss, mean_y, lambda1, gen_lr],
feed_dict={self.c: latent, self.z: noise, q_target: latent})
# y_batch = self.sess.run(y, feed_dict={x_fake_train: X_real})
# print('************************************ y after update ************************************')
# print(y_batch)
# D_batch, S_batch, Q_batch, L_batch = self.sess.run([D, S, Q, L], feed_dict={self.c: latent, self.z: noise})
# print('************************************ SQL ************************************')
# print('L-S:', L_batch-S_batch)
# print(Q_batch)
# summary_str = self.sess.run(merged_summary_op, feed_dict={self.x: X_real, x_fake_train: X_fake,
# self.c: latent, self.z: noise, q_target: latent})
#
# summary_writer.add_summary(summary_str, t+1)
# Show messages
log_mesg = "%d: [D] real %f fake %f q %f lr %f" % (t+1, dlr, dlf, qdl, lrd)
log_mesg = "%s [G] fake %f dpp %f reg %f q %f y %f lambda1 %f lr %f" % (log_mesg, gl, dppl, rl, qgl, my, lbd1, lrg)
print(log_mesg)
if save_interval>0 and (t+1)%save_interval==0 or t+1==train_steps:
# Save the variables to disk.
save_path = saver.save(self.sess, '{}/model'.format(directory))
print('Model saved in path: %s' % save_path)
# print('Plotting results ...')
# plot_grid(5, gen_func=self.synthesize, d=self.latent_dim, bounds=self.bounds,
# scale=.95, scatter=True, s=1, alpha=.7, fname='{}/synthesized'.format(directory))
summary_writer.close()
def restore(self, directory='.'):
self.sess = tf.Session()
# Load meta graph and restore weights
saver = tf.train.import_meta_graph('{}/model.meta'.format(directory))
saver.restore(self.sess, tf.train.latest_checkpoint('{}/'.format(directory)))
# Access and create placeholders variables
graph = tf.get_default_graph()
self.x = graph.get_tensor_by_name('real_image:0')
self.c = graph.get_tensor_by_name('latent_code:0')
self.z = graph.get_tensor_by_name('noise:0')
self.x_fake_test = graph.get_tensor_by_name('Generator_1/fake_image:0')
self.cp = graph.get_tensor_by_name('Generator_1/control_point:0')
self.w = graph.get_tensor_by_name('Generator_1/weight:0')
def synthesize(self, latent, noise=None):
if isinstance(latent, int):
N = latent
latent = | np.random.uniform(low=self.bounds[0], high=self.bounds[1], size=(N, self.latent_dim)) | numpy.random.uniform |
import nose.tools
import numpy as np
import subprocess as sp
import hexapawn
import logging
import os
tests_logger = logging.getLogger('root')
def setup():
tests_logger.debug('Setting Up')
def teardown():
tests_logger.debug('Tearing Down')
def test_basic():
tests_logger.debug('Running Tests')
def test_state_construct():
tests_logger.debug('Testing the board constructor')
with open('tests/test1-in.txt', 'r') as f:
b = hexapawn.state.State(f.read())
assert b.turn == 'B'
assert np.array_equal(b.board, np.array([['p', 'p', 'p'], ['.', 'P', '.'], ['P', '.', 'P']]))
b = hexapawn.state.State('W\n...\nPpP\npPp\n')
assert b.turn == 'W'
assert np.array_equal(b.board, np.array([['.', '.', '.'], ['P', 'p', 'P'], ['p', 'P', 'p']]))
print('END')
def test_get_move_masks():
tests_logger.debug('Testing getting move masks for white')
b = hexapawn.state.State('W\np.p\n.Pp\nP..\n')
w = hexapawn.piece.Pawn('W', b.board.shape)
attack, move = w.get_moves(b)
valid_attack = [(np.array([1, 1]), np.array([0, 0])), (np.array([1, 1]), | np.array([0, 2]) | numpy.array |
from __future__ import print_function
import h5py
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
try:
import moxing as mox
import npu_bridge
mox.file.shift('os', 'mox')
h5py_File_class = h5py.File
class OBSFile(h5py_File_class):
def __init__(self, name, *args, **kwargs):
self._tmp_name = None
self._target_name = name
if name.startswith('obs://') or name.startswith('s3://'):
self._tmp_name = os.path.join('cache', 'h5py_tmp',
name.replace('/', '_'))
if mox.file.exists(name):
mox.file.copy(name, self._tmp_name)
name = self._tmp_name
super(OBSFile, self).__init__(name, *args, **kwargs)
def close(self):
if self._tmp_name:
mox.file.copy(self._tmp_name, self._target_name)
super(OBSFile, self).close()
setattr(h5py, 'File', OBSFile)
except:
pass
import argparse
import glob
import time
import numpy as np
import scipy.io
import tensorflow as tf
from PIL import Image
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
from network import network
_errstr = "Mode is unknown or incompatible with input array shape."
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == np.uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(np.uint8)
def toimage(arr,
high=255,
low=0,
cmin=None,
cmax=None,
pal=None,
mode=None,
channel_axis=None):
"""Takes a numpy array and returns a PIL image.
This function is only available if Python Imaging Library (PIL) is installed.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
data = | np.asarray(arr) | numpy.asarray |
#!/usr/bin/env python
u"""
spatial.py
Written by <NAME> (10/2021)
Data class for reading, writing and processing spatial data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format.
https://www.h5py.org/
PROGRAM DEPENDENCIES:
time.py: utilities for calculating time operations
UPDATE HISTORY:
Written 10/2021
"""
import os
import re
import io
import copy
import gzip
import h5py
import time
import uuid
import logging
import zipfile
import netCDF4
import numpy as np
class spatial(object):
"""
Data class for reading, writing and processing spatial data
"""
np.seterr(invalid='ignore')
def __init__(self, **kwargs):
#-- set default keyword arguments
kwargs.setdefault('spacing',[None,None])
kwargs.setdefault('nlat',None)
kwargs.setdefault('nlon',None)
kwargs.setdefault('extent',[None]*4)
kwargs.setdefault('fill_value',None)
#-- set default class attributes
self.data=None
self.mask=None
self.lon=None
self.lat=None
self.time=None
self.fill_value=kwargs['fill_value']
self.attributes=dict()
self.extent=kwargs['extent']
self.spacing=kwargs['spacing']
self.shape=[kwargs['nlat'],kwargs['nlon'],None]
self.ndim=None
self.filename=None
def case_insensitive_filename(self,filename):
"""
Searches a directory for a filename without case dependence
"""
#-- check if filename is open file object
if isinstance(filename, io.IOBase):
self.filename = copy.copy(filename)
else:
#-- tilde-expand input filename
self.filename = os.path.expanduser(filename)
#-- check if file presently exists with input case
if not os.access(self.filename,os.F_OK):
#-- search for filename without case dependence
basename = os.path.basename(filename)
directory = os.path.dirname(os.path.expanduser(filename))
f = [f for f in os.listdir(directory) if re.match(basename,f,re.I)]
if not f:
errmsg = '{0} not found in file system'.format(filename)
raise FileNotFoundError(errmsg)
self.filename = os.path.join(directory,f.pop())
return self
def from_ascii(self, filename, date=True, **kwargs):
"""
Read a spatial object from an ascii file
Inputs: full path of input ascii file
Options:
ascii file contains date information
keyword arguments for ascii input
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- set default parameters
kwargs.setdefault('verbose',False)
kwargs.setdefault('compression',None)
kwargs.setdefault('columns',['lon','lat','data','time'])
kwargs.setdefault('header',0)
#-- open the ascii file and extract contents
logging.info(self.filename)
if (kwargs['compression'] == 'gzip'):
#-- read input ascii data from gzip compressed file and split lines
with gzip.open(self.filename,'r') as f:
file_contents = f.read().decode('ISO-8859-1').splitlines()
elif (kwargs['compression'] == 'zip'):
#-- read input ascii data from zipped file and split lines
base,_ = os.path.splitext(self.filename)
with zipfile.ZipFile(self.filename) as z:
file_contents = z.read(base).decode('ISO-8859-1').splitlines()
elif (kwargs['compression'] == 'bytes'):
#-- read input file object and split lines
file_contents = self.filename.read().splitlines()
else:
#-- read input ascii file (.txt, .asc) and split lines
with open(self.filename,'r') as f:
file_contents = f.read().splitlines()
#-- compile regular expression operator for extracting numerical values
#-- from input ascii files of spatial data
regex_pattern = r'[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[EeD][+-]?\d+)?'
rx = re.compile(regex_pattern, re.VERBOSE)
#-- output spatial dimensions
if (None not in self.extent):
self.lat = np.linspace(self.extent[3],self.extent[2],self.shape[0])
self.lon = np.linspace(self.extent[0],self.extent[1],self.shape[1])
else:
self.lat = np.zeros((self.shape[0]))
self.lon = np.zeros((self.shape[1]))
#-- output spatial data
self.data = np.zeros((self.shape[0],self.shape[1]))
self.mask = np.zeros((self.shape[0],self.shape[1]),dtype=bool)
#-- remove time from list of column names if not date
columns = [c for c in kwargs['columns'] if (c != 'time')]
#-- extract spatial data array and convert to matrix
#-- for each line in the file
header = kwargs['header']
for line in file_contents[header:]:
#-- extract columns of interest and assign to dict
#-- convert fortran exponentials if applicable
d = {c:r.replace('D','E') for c,r in zip(columns,rx.findall(line))}
#-- convert line coordinates to integers
ilon = np.int64(np.float64(d['lon'])/self.spacing[0])
ilat = np.int64((90.0-np.float64(d['lat']))//self.spacing[1])
self.data[ilat,ilon] = np.float64(d['data'])
self.mask[ilat,ilon] = False
self.lon[ilon] = np.float64(d['lon'])
self.lat[ilat] = np.float64(d['lat'])
#-- if the ascii file contains date variables
if date:
self.time = np.array(d['time'],dtype='f')
#-- get spacing and dimensions
self.update_spacing()
self.update_extents()
self.update_dimensions()
self.update_mask()
return self
def from_netCDF4(self, filename, **kwargs):
"""
Read a spatial object from a netCDF4 file
Inputs: full path of input netCDF4 file
Options:
netCDF4 file contains date information
keyword arguments for netCDF4 reader
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- set default parameters
kwargs.setdefault('date',True)
kwargs.setdefault('verbose',False)
kwargs.setdefault('compression',None)
kwargs.setdefault('varname','z')
kwargs.setdefault('lonname','lon')
kwargs.setdefault('latname','lat')
kwargs.setdefault('timename','time')
#-- read data from netCDF5 file
data = ncdf_read(self.filename, **kwargs)
#-- copy variables to spatial object
self.data = data['data'].copy()
if '_FillValue' in data['attributes']['data'].keys():
self.fill_value = data['attributes']['data']['_FillValue']
self.mask = np.zeros(self.data.shape, dtype=bool)
self.lon = data['lon'].copy()
self.lat = data['lat'].copy()
#-- if the netCDF4 file contains date variables
if kwargs['date']:
self.time = data['time'].copy()
#-- update attributes
self.attributes.update(data['attributes'])
#-- get spacing and dimensions
self.update_spacing()
self.update_extents()
self.update_dimensions()
self.update_mask()
return self
def from_HDF5(self, filename, **kwargs):
"""
Read a spatial object from a HDF5 file
Inputs: full path of input HDF5 file
Options:
keyword arguments for HDF5 reader
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- set default parameters
kwargs.setdefault('date',True)
kwargs.setdefault('verbose',False)
kwargs.setdefault('compression',None)
kwargs.setdefault('varname','z')
kwargs.setdefault('lonname','lon')
kwargs.setdefault('latname','lat')
kwargs.setdefault('timename','time')
#-- read data from HDF5 file
data = hdf5_read(self.filename, **kwargs)
#-- copy variables to spatial object
self.data = data['data'].copy()
if '_FillValue' in data['attributes']['data'].keys():
self.fill_value = data['attributes']['_FillValue']
self.mask = np.zeros(self.data.shape, dtype=bool)
self.lon = data['lon'].copy()
self.lat = data['lat'].copy()
#-- if the HDF5 file contains date variables
if kwargs['date']:
self.time = data['time'].copy()
#-- update attributes
self.attributes.update(data['attributes'])
#-- get spacing and dimensions
self.update_spacing()
self.update_extents()
self.update_dimensions()
self.update_mask()
return self
def from_file(self, filename, format=None, **kwargs):
"""
Read a spatial object from a specified format
Inputs: full path of input file
Options:
file format (ascii, netCDF4, HDF5)
file contains date information
**kwargs: keyword arguments for input readers
"""
#-- set filename
self.case_insensitive_filename(filename)
#-- set default verbosity
kwargs.setdefault('verbose',False)
#-- read from file
if (format == 'ascii'):
#-- ascii (.txt)
return spatial().from_ascii(filename, **kwargs)
elif (format == 'netCDF4'):
#-- netcdf (.nc)
return spatial().from_netCDF4(filename, **kwargs)
elif (format == 'HDF5'):
#-- HDF5 (.H5)
return spatial().from_HDF5(filename, **kwargs)
def from_list(self, object_list, **kwargs):
"""
Build a sorted spatial object from a list of other spatial objects
Inputs: list of spatial object to be merged
Options:
spatial objects contain date information
sort spatial objects by date information
clear the spatial list from memory
"""
#-- set default keyword arguments
kwargs.setdefault('date',True)
kwargs.setdefault('sort',True)
kwargs.setdefault('clear',False)
#-- number of spatial objects in list
n = len(object_list)
#-- indices to sort data objects if spatial list contain dates
if kwargs['date'] and kwargs['sort']:
list_sort = np.argsort([d.time for d in object_list],axis=None)
else:
list_sort = np.arange(n)
#-- extract dimensions and grid spacing
self.spacing = object_list[0].spacing
self.extent = object_list[0].extent
self.shape = object_list[0].shape
#-- create output spatial grid and mask
self.data = np.zeros((self.shape[0],self.shape[1],n))
self.mask = np.zeros((self.shape[0],self.shape[1],n),dtype=bool)
self.fill_value = object_list[0].fill_value
self.lon = object_list[0].lon.copy()
self.lat = object_list[0].lat.copy()
#-- create list of files and attributes
self.filename = []
self.attributes = []
#-- output dates
if kwargs['date']:
self.time = np.zeros((n))
#-- for each indice
for t,i in enumerate(list_sort):
self.data[:,:,t] = object_list[i].data[:,:].copy()
self.mask[:,:,t] |= object_list[i].mask[:,:]
if kwargs['date']:
self.time[t] = np.atleast_1d(object_list[i].time)
#-- append filename to list
if getattr(object_list[i], 'filename'):
self.filename.append(object_list[i].filename)
#-- append attributes to list
if getattr(object_list[i], 'attributes'):
self.attributes.append(object_list[i].attributes)
#-- update the dimensions
self.update_dimensions()
self.update_mask()
#-- clear the input list to free memory
if kwargs['clear']:
object_list = None
#-- return the single spatial object
return self
def from_dict(self, d, **kwargs):
"""
Convert a dict object to a spatial object
Inputs: dictionary object to be converted
"""
#-- assign variables to self
for key in ['lon','lat','data','error','time']:
try:
setattr(self, key, d[key].copy())
except (AttributeError, KeyError):
pass
#-- create output mask for data
self.mask = np.zeros_like(self.data,dtype=bool)
#-- get spacing and dimensions
self.update_spacing()
self.update_extents()
self.update_dimensions()
self.update_mask()
return self
def to_ascii(self, filename, date=True, **kwargs):
"""
Write a spatial object to ascii file
Inputs: full path of output ascii file
Options:
spatial objects contain date information
keyword arguments for ascii output
"""
self.filename = os.path.expanduser(filename)
#-- set default verbosity
kwargs.setdefault('verbose',False)
logging.info(self.filename)
#-- open the output file
fid = open(self.filename, 'w')
if date:
file_format = '{0:10.4f} {1:10.4f} {2:12.4f} {3:10.4f}'
else:
file_format = '{0:10.4f} {1:10.4f} {2:12.4f}'
#-- write to file for each valid latitude and longitude
ii,jj = np.nonzero((self.data != self.fill_value) & (~self.mask))
for ln,lt,dt in zip(self.lon[jj],self.lat[ii],self.data[ii,jj]):
print(file_format.format(ln,lt,dt,self.time), file=fid)
#-- close the output file
fid.close()
def to_netCDF4(self, filename, **kwargs):
"""
Write a spatial object to netCDF4 file
Inputs: full path of output netCDF4 file
Options: spatial objects contain date information
**kwargs: keyword arguments for netCDF4 writer
"""
self.filename = os.path.expanduser(filename)
#-- set default verbosity and parameters
kwargs.setdefault('date',True)
kwargs.setdefault('verbose',False)
kwargs.setdefault('varname','z')
kwargs.setdefault('lonname','lon')
kwargs.setdefault('latname','lat')
kwargs.setdefault('timename','time')
kwargs.setdefault('time_units','years')
kwargs.setdefault('time_longname','Date_in_Decimal_Years')
#-- write to netCDF4
ncdf_write(self.data, self.lon, self.lat, self.time,
**kwargs)
def to_HDF5(self, filename, **kwargs):
"""
Write a spatial object to HDF5 file
Inputs: full path of output HDF5 file
Options: spatial objects contain date information
**kwargs: keyword arguments for HDF5 writer
"""
self.filename = os.path.expanduser(filename)
#-- set default verbosity and parameters
kwargs.setdefault('date',True)
kwargs.setdefault('verbose',False)
kwargs.setdefault('varname','z')
kwargs.setdefault('lonname','lon')
kwargs.setdefault('latname','lat')
kwargs.setdefault('timename','time')
kwargs.setdefault('time_units','years')
kwargs.setdefault('time_longname','Date_in_Decimal_Years')
#-- write to HDF5
hdf5_write(self.data, self.lon, self.lat, self.time,
**kwargs)
def to_file(self, filename, format=None, date=True, **kwargs):
"""
Write a spatial object to a specified format
Inputs: full path of output file
Options:
file format (ascii, netCDF4 or HDF5)
spatial object contains date information
keyword arguments for output writers
"""
#-- set default verbosity
kwargs.setdefault('verbose',False)
#-- write to file
if (format == 'ascii'):
#-- ascii (.txt)
self.to_ascii(filename, date=date, **kwargs)
elif (format == 'netCDF4'):
#-- netcdf (.nc)
self.to_netCDF4(filename, date=date, **kwargs)
elif (format == 'HDF5'):
#-- HDF5 (.H5)
self.to_HDF5(filename, date=date, **kwargs)
def to_masked_array(self):
"""
Convert a spatial object to a masked numpy array
"""
return np.ma.array(self.data, mask=self.mask,
fill_value=self.fill_value)
def update_spacing(self):
"""
Calculate the step size of spatial object
"""
#-- calculate degree spacing
dlat = np.abs(self.lat[1] - self.lat[0])
dlon = np.abs(self.lon[1] - self.lon[0])
self.spacing = (dlon,dlat)
return self
def update_extents(self):
"""
Calculate the bounds of spatial object
"""
self.extent[0] = np.min(self.lon)
self.extent[1] = np.max(self.lon)
self.extent[2] = np.min(self.lat)
self.extent[3] = np.max(self.lat)
def update_dimensions(self):
"""
Update the dimensions of the spatial object
"""
self.shape = | np.shape(self.data) | numpy.shape |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # for removing unnecessary warnings
from absl import logging
logging._warn_preinit_stderr = 0
logging.warning('...')
import tensorflow as tf
import numpy as np
import pandas as pd
from scipy import interpolate
from tensorflow import keras
import evidential_deep_learning as edl
import time
import random
import matplotlib.pyplot as plt
import joblib
import pathlib
# from PythonCode.KF.ENUtransform import ENUtoWGS84
import src.clustering.COStransforms as ct
from src.regression.process import scale_lon_lat_cog_sog, scale_lon_lat_cog_sog_cluster,inverse_transform_lon, inverse_transform_lat, inverse_transform_cog, inverse_transform_sog
from src.regression.load_data_regress import load_test_data, load_test_interpolated_data, load_test_data_ENU, load_test_data_gedsar, load_test_data_vk2mmsi
########### check a whole track ##############
from src.evidential_deep_learning.layers.dense import DenseNormalGamma
from src.evidential_deep_learning.losses.continuous import EvidentialRegression
# edl.losses.EvidentialRegression does work, since this custom loss function has a get_config method
# that has to be changed in the argument, add **kwarg
# Also DenseNormalGamma is imported locally otherwise saved model does not recognise this layer
vb_dir = os.path.dirname(__file__)
data_dir = os.path.join(vb_dir, "resources/")
results_dir = os.path.join(vb_dir, "results/")
ANO_DATA = False
cluster = True
dataset = "1" # 1-> jan 2016, 2-> july 2019, 3=> Jan 2020, 4=> rostock_degsar
if cluster:
features = ['x', 'y', 'cog', 'sog', 'cluster'] # by default we convert to ENU
model_name = "edl_graph_True_ds"+dataset+".h5"
else:
features = ['x', 'y', 'cog', 'sog']
model_name = "edl_graph_False_ds"+dataset+".h5"
dim = len(features)
INPUT_LEN = 10 # same as timesteps
TARGET_LEN = 1
track_to_check = 0, #167 # 43, 167, 202
# path = "/home/sing_sd/Desktop/evidential/src/regression/resources/"
# model = joblib.load(model_name)
# custom_objects = {'DenseNormalGamma':edl.layers.DenseNormalGamma}
# with keras.utils.custom_object_scope(custom_objects):
# model=keras.models.load_model(path+model_name)
def EvidentialRegressionLoss(true, pred):
return EvidentialRegression(true, pred, coeff=1e-2)
# layer_config = layer.get_config()
# new_layer = keras.layers.Dense.from_config(layer_config)
model = keras.models.load_model(data_dir+model_name, custom_objects={'DenseNormalGamma': DenseNormalGamma, "EvidentialRegressionLoss": EvidentialRegressionLoss})
plt.rcParams.update({'font.size': 12})
fig, ax = plt.subplots(figsize=(6, 8))
def main():
fig1, ax1 = plt.subplots(4, figsize=(6, 8))
# plot_uncertainties_rg_left_right_center()
# original_data, test_data, test_data_interpolated = load_test_data(INPUT_LEN, TARGET_LEN, features, dim, track_to_check)
original_data, test_data, test_data_interpolated = load_test_data_gedsar(INPUT_LEN, TARGET_LEN, features, dim, ANO_DATA, cluster)
# original_data, test_data, test_data_interpolated = load_test_data_vk2mmsi(INPUT_LEN, TARGET_LEN, features, dim, ANO_DATA)
ax.plot(original_data[:INPUT_LEN, 0], original_data[:INPUT_LEN, 1], '.k', label='Original trajectory')
plt.pause(0.001)
test_predict = 0
test_predict_all = np.zeros(shape=(len(test_data)-INPUT_LEN, dim))
uncertainty_all = np.zeros(shape=(len(test_data)-INPUT_LEN, dim))
total_distance_error = []
predicted_track = pd.DataFrame(columns=['x', 'y'])
for data_index in range(len(test_data)-INPUT_LEN):
if np.isnan(test_data[data_index, (INPUT_LEN-1)*dim]):
for i in range(INPUT_LEN):
if data_index+i < len(test_data):
test_data[data_index + i, (INPUT_LEN-1-i)*dim : (INPUT_LEN-i)*dim] = test_predict[0, 0: dim]
X_test = test_data[data_index, :].reshape(1, INPUT_LEN, dim)
if not cluster:
X_test[0] = scale_lon_lat_cog_sog(X_test[0])
else:
X_test[0, :, 0: dim] = scale_lon_lat_cog_sog_cluster(X_test[0, :, 0:dim])
# start_time = time.time()
test_predict = model.predict(X_test)
# print(time.time()-start_time)
# invert predictions
y_pred = tf.reshape(test_predict, shape=(test_predict.__len__(), TARGET_LEN * dim * 4)) # n_stds=4
uncertianty = get_uncertainty(y_pred) # sigma, alpha, v
mu, v, alpha, beta = tf.split(y_pred, 4, axis=-1)
plot_v_alpha_beta(ax1,data_index,v,alpha, beta)
test_predict = | np.array(y_pred[:, 0:dim]) | numpy.array |
from collections import defaultdict
import numpy as np
from sklearn import svm
def makefeatx(fx, fy, bx, by, img):
tmpx = | np.hstack((fx, bx)) | numpy.hstack |
__author__ = '<NAME> <<EMAIL>>'
__contributors__ = "<NAME>, <NAME>, <NAME>"
__version__ = "0.91"
# Copyright (c) 2016, Hong Kong University of Science and Technology (HKUST)
# All rights reserved.
# ===============================================================================
# GLOBAL IMPORTS:
import random
import operator
import time
import numpy as np
from multiprocessing import Pool
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.utils.validation import check_is_fitted
from metrics.pairwise import pairwise_distances
from functools import reduce
# ===============================================================================
# LOCAL IMPORTS:
#import knn as knnn
# ===============================================================================
def FaissNearestNeighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True, GPU=False):
dimension = X.shape[1]
if GPU is True:
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
res = faiss.StandardGpuResources() # use a single GPU
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
# make it an IVF GPU index
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
assert not index_gpu.is_trained
index_gpu.train(X)
assert index_gpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
index_gpu.add(X)
n_samples = 10
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_gpu.search(X[samples], k) # sanity check
while np.max(D[:, k - 1]) < eps:
k = k * 2
D, I = index_gpu.search(X[samples], k)
# print(np.max(D[:, k - 1]), k, eps)
index_gpu.nprobe = nprobe
D, I = index_gpu.search(X, k) # actual search
else:
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
assert not index_cpu.is_trained
index_cpu.train(X)
assert index_cpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
index_cpu.add(X)
n_samples = 10
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_cpu.search(X[samples], k) # sanity check
while np.max(D[:, k - 1]) < eps:
k = k * 2
D, I = index_cpu.search(X[samples], k)
# print(np.max(D[:, k - 1]), k, eps)
index_cpu.nprobe = nprobe
D, I = index_cpu.search(X, k) # actual search
if return_distance is True:
return D, I
else:
return D
def run_knn(X, n_neighbors=100, n_samples=1000, metric='rmsd', algorithm='vp_tree'):
# X = check_array(X, accept_sparse='csr')
#print "Calculating pairwise ", metric, " distances of ", n_samples, " samples..."
t0 = time.time()
if metric is "rmsd":
samples = random.sample(list(X), n_samples)
whole_samples= reduce(operator.add, (samples[i] for i in range(len(samples))))
else:
whole_samples = random.sample(list(X), n_samples)
sample_dist_metric = pairwise_distances( whole_samples, whole_samples, metric=metric )
t1 = time.time()
#print "time:", t1-t0,
#print "Done."
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later
#print "Calculating knn..."
t0 = time.time()
if metric is 'rmsd':
shape_x = | np.shape(X.xyz) | numpy.shape |
import os
import re
import numpy as np
import scipy.io as sio
from scipy.fftpack import fft
import pandas as pd
from .movie import Movie, FullFieldFlashMovie
pd.set_option('display.width', 1000)
pd.set_option('display.max_columns', 100)
#################################################
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
##################################################
def compute_FFT_OneCycle(FR, TF, downsample):
one_cyc = np.int(((1000. / downsample) / TF))
FR_cyc = list(chunks(FR, one_cyc))
if (TF == 15. or TF == 8.):
FR_cyc = FR_cyc[:-1]
FR_cyc_avg = np.mean(FR_cyc, axis=0)
y = FR_cyc_avg
AMP = 2 * np.abs(fft(y) / len(y))
F0 = 0.5 * AMP[0]
assert (F0 - np.mean(y) < 1.e-4)
F1 = AMP[1]
return F0, F1
##################################################
def create_ff_mov(frame_rate, tst, tend, xrng, yrng):
ff_mov_on = FullFieldFlashMovie( | np.arange(xrng) | numpy.arange |
import cv2
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
binary_warped = mpimg.imread('warped_example.jpg')
left_fit = np.array([ 2.13935315e-04, -3.77507980e-01, 4.76902175e+02])
right_fit = np.array([4.17622148e-04, -4.93848953e-01, 1.11806170e+03])
def fit_poly(img_shape, leftx, lefty, rightx, righty):
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fitx, right_fitx, ploty
def search_around_poly(binary_warped):
margin = 100
nonzero = binary_warped.nonzero()
nonzeroy = | np.array(nonzero[0]) | numpy.array |
# newly added libraries
import copy
import wandb
import time
import math
import csv
import shutil
from tqdm import tqdm
import torch
import numpy as np
import pandas as pd
from client import Client
from config import *
import scheduler as sch
class FedAvgTrainer(object):
def __init__(self, dataset, model, device, args):
self.device = device
self.args = args
[client_num, _, _, train_data_global, _, train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num] = dataset
# record the client number of the dataset
self.client_num = client_num
self.class_num = class_num
# setup dataset
self.data_shape = list(train_data_global[0][0].size())
self.train_data_local_num_dict = train_data_local_num_dict
self.test_data_local_dict = test_data_local_dict
self.train_data_local_dict = train_data_local_dict
if args.partition_method == "noniid":
logger.info("-----------non-i.i.d transform----------")
# generate the non i.i.d dataset
self.gene_non_iid_dataset(train_data_global, "tmp")
# read the non i.i.d dataset
self.read_non_iid_dataset("tmp")
# rm the tmp directory
shutil.rmtree(os.path.join('.', 'tmp'))
self.client_list = []
self.setup_clients(train_data_local_num_dict, train_data_local_dict, test_data_local_dict)
# initialize the recorder of invalid dataset
self.invalid_datasets = dict()
# time counter starts from the first line
self.time_counter = channel_data['Time'][0]
# initialize the cycle_num here
self.cycle_num = 0
# initialize the scheduler function
if self.args.method == "sch_pn_method_1" or self.args.method == "sch_pn_method_1_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_1()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_pn_method_2" or self.args.method == "sch_pn_method_2_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_2()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_pn_method_3" or self.args.method == "sch_pn_method_3_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_3()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_random":
self.scheduler = sch.sch_random
elif self.args.method == "sch_channel":
self.scheduler = sch.sch_channel
elif self.args.method == "sch_rrobin":
self.scheduler = sch.sch_rrobin
elif self.args.method == "sch_loss":
self.scheduler = sch.sch_loss
else:
self.scheduler = sch.sch_random
self.model = model
self.model_global = model(self.args, model_name=self.args.model, output_dim=self.class_num)
self.model_global.train()
def setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict):
logger.debug("############setup_clients (START)#############")
for client_idx in range(client_num_per_round):
c = Client(client_idx, train_data_local_dict[client_idx], test_data_local_dict[client_idx],
train_data_local_num_dict[client_idx], self.args, self.device)
self.client_list.append(c)
logger.debug("############setup_clients (END)#############")
def train(self):
"""
Global initialized values
"""
# maintain a lst for local losses
local_loss_lst = np.zeros((1, client_num_in_total))
# maintain a lst for local acc
_, dataset_acc_lst = self.local_test_on_all_clients(self.model_global, 0, True, False)
local_acc_lst = dataset_acc_lst[np.arange(client_num_in_total) % self.client_num]
# counting days
counting_days, reward = 0, 0
# initialize values for calculating iteration num
delta, rho, beta, rho_flag, beta_flag = np.random.rand(1)[0], np.random.rand(1)[0], np.random.rand(1)[0], True, True
# Initialize values for calculating FPF2 index
local_itr_lst = torch.zeros(self.args.comm_round, int(client_num_in_total)).to(self.device) # historical local iterations.
G_mat = torch.zeros(int(client_num_in_total)).to(self.device) # initial the value of G with zero
# if weight size is larger than THRESHOLD_WEIGHT_SIZE we will use a simpler method to calculate FPF
weight_size = sum([self.model_global.cpu().state_dict()[para].numpy().ravel().shape[0] for para in self.model_global.state_dict().keys()])
if weight_size < THRESHOLD_WEIGHT_SIZE:
A_mat = torch.ones(weight_size).to(self.device) # initial the value of A with ones.
local_w_diffs = torch.zeros((int(client_num_in_total), weight_size)).to(self.device)
else:
logger.warning("The weight size of the model {} is too large. Thus, we turn to use a more simple method to calculate FPF.".format(self.args.model))
LRU_itr_lst = torch.zeros(int(client_num_in_total)).to(self.device) # store the iteration gap for each client.
# show weight size for the model.
logger.debug("weight size: {}".format(weight_size))
"""
starts training, entering the loop of command round.
"""
Inform = {}
traffic = 0
for round_idx in range(self.args.comm_round):
logger.info("################Communication round : {}".format(round_idx))
# set the time_counter
self.time_counter = np.array(channel_data['Time'][channel_data['Time'] >= self.time_counter])[0]
logger.info("time_counter: {}".format(self.time_counter))
self.model_global.train()
# get client_indexes from scheduler
reward, loss_a, loss_c = 0, 0, 0
if (self.args.method)[:6] == "sch_pn":
if self.args.method[-5:] == "empty" or round_idx == 0:
client_indexes, local_itr = self.scheduler.sch_pn_empty(round_idx, self.time_counter)
else:
client_indexes, local_itr, (reward, loss_a, loss_c) = self.scheduler.sch_pn(round_idx, self.time_counter, loss_locals, FPF2_idx_lst, local_loss_lst, )
else:
if self.args.method == "sch_loss":
if round_idx == 0:
loss_locals = []
client_indexes, local_itr = self.scheduler(round_idx, self.time_counter, loss_locals)
else:
client_indexes, local_itr = self.scheduler(round_idx, self.time_counter)
# write to the scheduler csv
with open(scheduler_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['time counter', 'client index', 'iteration'])
csv_writer.writerow([self.time_counter, str(client_indexes), local_itr])
file.flush()
logger.info("client_indexes = " + str(client_indexes))
traffic += len(client_indexes)
# write one line to trainer_csv
trainer_csv_line = [round_idx, self.time_counter, str(client_indexes), traffic]
# contribute to time counter
self.tx_time(list(client_indexes)) # transmit time
# store the last model's training parameters.
last_w = copy.deepcopy(self.model_global.cpu().state_dict())
# local Initialization
w_locals, loss_locals, beta_locals, rho_locals, cycle_locals = [], [], [], [], []
"""
for scalability: following the original FedAvg algorithm, we uniformly sample a fraction of clients in each round.
Instead of changing the 'Client' instances, our implementation keeps the 'Client' instances and then updates their local dataset
"""
for idx in range(len(client_indexes)):
# update dataset
client = self.client_list[idx]
client_idx = client_indexes[idx]
dataset_idx = client_idx % self.client_num
if dataset_idx in self.invalid_datasets.keys():
current_idx = self.invalid_datasets[dataset_idx]
else:
current_idx = dataset_idx
while True:
client.update_local_dataset(current_idx, self.train_data_local_dict[current_idx],
self.test_data_local_dict[current_idx],
self.train_data_local_num_dict[current_idx])
# train on new dataset
# add a new parameter "local_itr" to the funciton "client.train()"
# add a new return value "time_interval" which is the time consumed for training model in client.
w, loss, local_beta, local_rho, local_acc, local_cycle = client.train(net=copy.deepcopy(self.model_global).to(self.device), local_iteration = local_itr)
if loss != None and local_beta != None and local_rho != None and local_acc != None:
if dataset_idx != current_idx:
self.invalid_datasets[dataset_idx] = current_idx
break
current_idx = np.random.randint(self.class_num)
logger.warning("changing dataset for {} to {}".format(client_idx, current_idx))
# record current cycle
cycle_locals.append([client.get_sample_number(), local_cycle])
# record current w into w_locals
w_locals.append((client.get_sample_number(), copy.deepcopy(w)))
# record current loss into loss_locals
loss_locals.append(loss)
# record local beta into beta_locals
beta_locals.append(local_beta)
# record local beta into rho_locals
rho_locals.append(local_rho)
# update the local_loss_lst
local_loss_lst[0, client_idx] = loss
# update local_w_diffs
if weight_size < THRESHOLD_WEIGHT_SIZE:
local_w_diffs[client_idx, :] = torch.cat([w[para].reshape((-1, )) - last_w[para].reshape((-1, )) for para in self.model_global.state_dict().keys()]).to(self.device)
# update local_acc_lst
local_acc_lst[client_idx] = local_acc
# loss
logger.info('Client {:3d}, loss {:.3f}'.format(client_idx, loss))
# update global weights
w_glob = self.aggregate(w_locals)
# copy weight to net_glob
self.model_global.load_state_dict(w_glob)
# update the time counter
if list(client_indexes):
self.time_counter += math.ceil(LOCAL_TRAINING_TIME)
logger.debug("time_counter after training: {}".format(self.time_counter))
trainer_csv_line += [self.time_counter-trainer_csv_line[1], np.var(local_loss_lst), str(loss_locals), np.var(loss_locals), np.var(local_acc_lst)]
# print loss
if not loss_locals:
logger.info('Round {:3d}, Average loss None'.format(round_idx))
trainer_csv_line.append('None')
else:
loss_avg = sum(loss_locals) / len(loss_locals)
logger.info('Round {:3d}, Average loss {:.3f}'.format(round_idx, loss_avg))
trainer_csv_line.append(loss_avg)
if cycle_locals:
cycle_locals = np.asarray(cycle_locals)
logger.info('Elapsed cycles {:.3f}'.format(np.sum(cycle_locals[:, 0] * cycle_locals[:, 1]) / np.sum(cycle_locals[:, 0])))
# local test on all client.
if round_idx % self.args.frequency_of_the_test == 0 or round_idx == self.args.comm_round - 1:
test_acc, _ = self.local_test_on_all_clients(self.model_global, round_idx, EVAL_ON_TRAIN, True)
trainer_csv_line.append(test_acc)
# write headers for csv
with open(trainer_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['round index', 'time counter', 'client index', 'traffic', 'train time', 'fairness',
'local loss', "local loss var", "local acc var", 'global loss', 'test accuracy'])
csv_writer.writerow(trainer_csv_line)
file.flush()
# log on wandb
Inform["reward"] = reward
wandb.log(Inform)
Inform = {
"reward": reward, "loss_a": loss_a,
"loss_c": loss_c, "round": round_idx,
"traffic": traffic,
"beta": beta, "rho": rho, "delta": delta,
"cum_time": trainer_csv_line[1]+self.cycle_num*59361,
"local_itr": local_itr,
"client_num": len(client_indexes),
"C3": (rho*delta)/beta,
"local_loss_var": np.var(loss_locals),
"local_acc_var": np.var(local_acc_lst)
}
# update FPF index list
if weight_size < THRESHOLD_WEIGHT_SIZE:
FPF2_idx_lst = torch.norm(local_w_diffs * A_mat, dim = 1) / G_mat
else:
FPF2_idx_lst = LRU_itr_lst / G_mat
FPF2_idx_lst = FPF2_idx_lst.cpu().numpy()
FPF2_idx_lst[np.bitwise_or(np.isnan(FPF2_idx_lst), np.isinf(FPF2_idx_lst))] = 0
# FPF2_idx_lst = FPF2_idx_lst / max(FPF2_idx_lst)
FPF2_idx_lst[np.bitwise_or(np.isnan(FPF2_idx_lst), np.isinf(FPF2_idx_lst))] = 0
# write FPF index list to csv
with open(FPF_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['time counter'] + ["car_"+str(i) for i in range(client_num_in_total)])
csv_writer.writerow([trainer_csv_line[1]]+FPF2_idx_lst.tolist())
file.flush()
# update beta & delta & rho
if w_locals and loss_locals:
sample_nums = np.array([sample_num for sample_num, _ in w_locals])
local_w_diff_norms = np.array([torch.norm(torch.cat([w[para].reshape((-1, )) - w_glob[para].reshape((-1, )) for para in self.model_global.state_dict().keys()])).item() for _, w in w_locals])
# calculate delta
delta_tmp = np.sum(sample_nums * local_w_diff_norms) / np.sum(sample_nums) / self.args.lr
if (not np.isnan(delta_tmp) and not np.isinf(delta_tmp)):
delta = delta_tmp
# update rho
rho_tmp = np.sum(sample_nums * np.array(rho_locals)) / np.sum(sample_nums)
if rho_tmp > rho or rho_flag:
if (not np.isnan(rho_tmp) and not np.isinf(rho_tmp)) and rho_tmp < THRESHOLD_RHO:
rho, rho_flag = rho_tmp, False
# update beta
beta_tmp = np.sum(sample_nums * np.array(beta_locals)) / np.sum(sample_nums)
if beta_tmp > beta or beta_flag:
if (not np.isnan(beta_tmp) and not np.isinf(beta_tmp)) and beta_tmp < THRESHOLD_BETA:
beta, beta_flag = beta_tmp, False
if self.args.method == "sch_pn_method_1" or self.args.method == "sch_pn_method_1_empty":
self.scheduler.calculate_itr_method_1(delta)
elif self.args.method == "sch_pn_method_2" or self.args.method == "sch_pn_method_2_empty":
self.scheduler.calculate_itr_method_2(rho, beta, delta)
elif self.args.method == "sch_pn_method_3" or self.args.method == "sch_pn_method_3_empty":
self.scheduler.calculate_itr_method_3(rho, beta, delta)
if weight_size < THRESHOLD_WEIGHT_SIZE:
# update local_w_diffs
global_w_diff = torch.cat([w_glob[para].reshape((-1, )) - last_w[para].reshape((-1, )) for para in self.model_global.state_dict().keys()]).to(self.device)
local_w_diffs[list(set(list(range(client_num_in_total))) - set(list(client_indexes))), :] -= global_w_diff
# update A_mat
A_mat = A_mat * (1 - 1/G2) + (global_w_diff) / G2 / global_w_diff.mean()
# Update local_itr_lst
if list(client_indexes) and local_itr > 0: # only if client_idx is not empty and local_iter > 0, then I will update following values
local_itr_lst[round_idx, list(client_indexes)] = float(local_itr)
if weight_size >= THRESHOLD_WEIGHT_SIZE:
LRU_itr_lst += float(local_itr)
LRU_itr_lst[list(client_indexes)] = 0
# update G_mat
G_mat = G_mat * (1 - 1 / G1) + local_itr_lst[round_idx, :] / G1
# if current time_counter has exceed the channel table, I will simply stop early
if self.time_counter >= time_cnt_max[counting_days]:
counting_days += 1
if counting_days % RESTART_DAYS == 0:
if self.args.method == "find_constant" and loss_locals:
w_optimal, loss_optimal = self.central_train()
w = torch.cat([param.view(-1) for param in self.model_global.parameters()])
w_diff_optimal = torch.norm(w.cpu() - w_optimal.cpu())
logger.info("The norm of difference between w_optmal & w: {}".format(w_diff_optimal.item()))
logger.info("The norm of difference between loss & loss_optimal: {}".format(loss_avg - loss_optimal))
break
logger.info("################reinitialize model")
self.model_global = self.model(self.args, model_name=self.args.model, output_dim=self.class_num)
delta, rho, beta, rho_flag, beta_flag = np.random.rand(1)[0], | np.random.rand(1) | numpy.random.rand |
from unittest import TestCase
import numpy as np
from numpy.testing import assert_allclose
from skfem import BilinearForm, asm, solve, condense, project
from skfem.mesh import MeshTri, MeshTet, MeshHex, MeshQuad, MeshLine
from skfem.assembly import InteriorBasis, FacetBasis, Dofs
from skfem.element import (ElementVectorH1, ElementTriP2, ElementTriP1,
ElementTetP2, ElementHexS2, ElementHex2,
ElementQuad2, ElementLineP2)
class TestCompositeSplitting(TestCase):
def runTest(self):
"""Solve Stokes problem, try splitting and other small things."""
m = MeshTri()
m.refine()
m.define_boundary('centreline', lambda x: x[0] == .5,
boundaries_only=False)
m.refine(3)
e = ElementVectorH1(ElementTriP2()) * ElementTriP1()
m.define_boundary('up', lambda x: x[1] == 1.)
m.define_boundary('rest', lambda x: x[1] != 1.)
basis = InteriorBasis(m, e)
self.assertEqual(
basis.get_dofs(m.boundaries['centreline']).all().size,
(2 + 1) * (2**(1 + 3) + 1) + 2 * 2**(1 + 3))
self.assertEqual(basis.find_dofs()['centreline'].all().size,
(2 + 1) * (2**(1 + 3) + 1) + 2 * 2**(1 + 3))
@BilinearForm
def bilinf(u, p, v, q, w):
from skfem.helpers import grad, ddot, div
return (ddot(grad(u), grad(v)) - div(u) * q - div(v) * p
- 1e-2 * p * q)
S = asm(bilinf, basis)
D = basis.find_dofs(skip=['u^2'])
x = basis.zeros()
x[D['up'].all('u^1^1')] = .1
x = solve(*condense(S, basis.zeros(), x=x, D=D))
(u, u_basis), (p, p_basis) = basis.split(x)
self.assertEqual(len(u), m.p.shape[1] * 2 + m.facets.shape[1] * 2)
self.assertEqual(len(p), m.p.shape[1])
self.assertTrue(np.sum(p - x[basis.nodal_dofs[2]]) < 1e-8)
U, P = basis.interpolate(x)
self.assertTrue(isinstance(U.value, np.ndarray))
self.assertTrue(isinstance(P.value, np.ndarray))
self.assertTrue((basis.doflocs[:, D['up'].all()][1] == 1.).all())
class TestCompositeFacetAssembly(TestCase):
def runTest(self):
m = MeshTri()
fbasis1 = FacetBasis(m, ElementTriP1() * ElementTriP1(),
facets=m.facets_satisfying(lambda x: x[0] == 0))
fbasis2 = FacetBasis(m, ElementTriP1(),
facets=m.facets_satisfying(lambda x: x[0] == 0))
@BilinearForm
def uv1(u, p, v, q, w):
return u * v + p * q
@BilinearForm
def uv2(u, v, w):
return u * v
A = asm(uv1, fbasis1)
B = asm(uv2, fbasis2)
assert_allclose(A[0].todense()[0, ::2],
B[0].todense()[0])
class TestFacetExpansion(TestCase):
mesh_type = MeshTet
elem_type = ElementTetP2
def runTest(self):
m = self.mesh_type()
m.refine(2)
basis = InteriorBasis(m, self.elem_type())
for fun in [lambda x: x[0] == 0,
lambda x: x[0] == 1,
lambda x: x[1] == 0,
lambda x: x[1] == 1,
lambda x: x[2] == 0,
lambda x: x[2] == 1]:
arr1 = basis.find_dofs({
'kek': m.facets_satisfying(fun)
})['kek'].edge['u']
arr2 = basis.edge_dofs[:, m.edges_satisfying(fun)]
assert_allclose(arr1, arr2.flatten())
class TestFacetExpansionHexS2(TestFacetExpansion):
mesh_type = MeshHex
elem_type = ElementHexS2
class TestFacetExpansionHex2(TestFacetExpansionHexS2):
elem_type = ElementHex2
class TestInterpolatorTet(TestCase):
mesh_type = MeshTet
element_type = ElementTetP2
nrefs = 1
def runTest(self):
m = self.mesh_type()
m.refine(self.nrefs)
basis = InteriorBasis(m, self.element_type())
x = project(lambda x: x[0] ** 2, basis_to=basis)
fun = basis.interpolator(x)
X = | np.linspace(0, 1, 10) | numpy.linspace |
import numpy as np
import torch
from nmtg.data.dataset import Dataset
from nmtg.data.text_lookup_dataset import TextLookupDataset
class ParallelDataset(Dataset):
def __init__(self, src_data: TextLookupDataset, tgt_data: TextLookupDataset=None, src_lang=None, tgt_lang=None):
self.src_data = src_data # technically a duplicate, but it's fine
self.tgt_data = tgt_data
self.src_lang = src_lang
self.tgt_lang = tgt_lang
def __getitem__(self, index):
source = self.src_data[index]
res = {'id': index, 'src_indices': source, 'src_size': len(source)}
if self.src_lang is not None:
res['src_lang'] = self.src_lang
if self.tgt_data is not None:
target = self.tgt_data[index]
target_input = target[:-1]
target_output = target[1:]
res['tgt_input'] = target_input
res['tgt_output'] = target_output
res['tgt_size'] = len(target_output)
if self.tgt_lang is not None:
res['tgt_lang'] = self.tgt_lang
return res
def __len__(self):
return len(self.src_data)
def collate_samples(self, samples):
src_batch = self.src_data.collate_samples([x['src_indices'] for x in samples])
res = {'src_indices': src_batch['indices'],
'src_size': src_batch['size'],
'src_lengths': src_batch['lengths'],
'id': torch.tensor([x['id'] for x in samples])}
if 'src_lang' in samples[0]:
res['src_lang'] = [x['src_lang'] for x in samples]
if self.tgt_data is not None:
target_input = self.tgt_data.collate_samples([x['tgt_input'] for x in samples])
target_output = self.tgt_data.collate_samples([x['tgt_output'] for x in samples])
res['tgt_input'] = target_input['indices']
res['tgt_output'] = target_output['indices']
res['tgt_size'] = target_output['size']
res['tgt_lengths'] = target_output['lengths']
if 'tgt_lang' in samples[0]:
res['tgt_lang'] = [x['tgt_lang'] for x in samples]
return res
class MultiParallelDataset(Dataset):
"""
A dataset containing a parallel text in two or more languages.
When indexing, the indexing order is target language -> source language -> sentence index.
"""
def __init__(self, exclude_pairs=None, src_bos=False, tgt_lang_bos=False, **datasets):
assert len(datasets) > 1
self.datasets = datasets
self.src_bos = src_bos
self.tgt_lang_bos = tgt_lang_bos
self.languages = sorted(self.datasets.keys())
self.num_sentences = len(self.datasets[self.languages[0]])
pad_index = self.datasets[self.languages[0]].pad()
align_right = self.datasets[self.languages[0]].align_right
self.collate_fn = self.datasets[self.languages[0]].collate_samples
assert all(len(dataset) == self.num_sentences for dataset in self.datasets)
assert all(dataset.dictionary.pad() == pad_index for dataset in self.datasets)
assert all(dataset.align_right == align_right for dataset in self.datasets)
if exclude_pairs is not None:
assert all(len(pair) == 2 for pair in exclude_pairs)
assert all(lang in self.datasets for pair in exclude_pairs for lang in pair)
self.exclude_pairs = exclude_pairs if exclude_pairs is not None else []
self.pairs_per_sentence = len(self.datasets) * (len(self.datasets) - 1) - len(self.exclude_pairs)
exclude_pairs = [(self.languages.index(s), self.languages.index(t)) for s, t in self.exclude_pairs]
self._excluded_indices = sorted([s * (len(self.datasets) - 1) + (t - 1 if t >= s else t)
for s, t in exclude_pairs])
def __len__(self):
return self.num_sentences * self.pairs_per_sentence
def _get_src(self, src_lang, tgt_lang, index):
src_dataset = self.datasets[src_lang]
src_dataset.lang = tgt_lang
src_dataset.bos = self.src_bos
source = src_dataset[index]
return source
def _get_tgt(self, tgt_lang, index):
tgt_dataset = self.datasets[tgt_lang]
tgt_dataset.lang = tgt_lang if self.tgt_lang_bos else None
tgt_dataset.bos, tgt_dataset.eos = True, True
target = tgt_dataset[index]
return target
def __getitem__(self, index):
pair_index, sentence_index = divmod(index, self.num_sentences)
for excluded in self._excluded_indices:
if pair_index >= excluded:
pair_index += 1
else:
break
tgt_index, src_index = divmod(pair_index, len(self.languages) - 1)
if src_index >= tgt_index:
src_index += 1
source = self._get_src(self.languages[src_index], self.languages[tgt_index], sentence_index)
target = self._get_tgt(self.languages[tgt_index], sentence_index)
res = {'sentence_id': sentence_index,
'src_lang': self.languages[src_index], 'tgt_lang': self.languages[tgt_index],
'src_indices': source, 'src_size': len(source),
'tgt_input': target[:-1], 'tgt_output': target[1:], 'tgt_size': len(target) - 1}
return res
def collate_samples(self, samples):
src_batch = self.collate_fn([x['src_indices'] for x in samples])
tgt_input = self.collate_fn([x['tgt_input'] for x in samples])
tgt_output = self.collate_fn([x['tgt_output'] for x in samples])
res = {'src_lang': [x['src_lang'] for x in samples],
'tgt_lang': [x['tgt_lang'] for x in samples],
'src_indices': src_batch['indices'], 'src_size': src_batch['size'], 'src_lengths': src_batch['lengths'],
'tgt_input': tgt_input['indices'], 'tgt_output': tgt_output['indices'],
'tgt_size': tgt_output['size'], 'tgt_lengths': tgt_output['lengths']}
return res
def concat_lengths(self, **lengths):
assert all(lang in lengths for lang in self.languages)
src_langs, tgt_langs = zip(*self.get_pairs())
src_lengths = np.concatenate([lengths[lang] for lang in src_langs])
tgt_lengths = | np.concatenate([lengths[lang] for lang in tgt_langs]) | numpy.concatenate |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test file for GraphRunner."""
import unittest
from core.data_types import Float32, Uint32, Int32, QUANTIZED_NOT_PACKED
from core.graph import Graph, GraphRunner
from core.optimizer import Optimizer
from core.operators import Add, AveragePool, BatchNormalization, Constant, Conv, Identity, Input, \
MaxPool, Operator, Output, Transpose, QTZ_binary_mean_scaling, QTZ_linear_mid_tread_half, Reshape, Softmax
import numpy as np
from typing import Any, Dict, List, Tuple
class TestOptimizer(unittest.TestCase):
"""Test class for GraphRunner."""
def test_precompute1(self) -> None:
"""Test code for precompute optimizer."""
data1 = np.random.rand(3, 2, 2, 3)
data2 = np.random.rand(3, 2, 2, 3)
data3 = np.random.rand(3, 2, 2, 3)
graph1 = self.create_sample_graph(data1, data2, data3)
graph2 = self.create_precompute_graph(data1, data2, data3)
optim = Optimizer()
optim.precompute(graph1)
# for debug
# from frontend import TensorFlowIO
# from core.model import Model
# import os
# io = TensorFlowIO()
# tmp_dir = os.path.join('tmp')
# if not os.path.exists(tmp_dir):
# os.mkdir(tmp_dir)
# path = os.path.join('tmp', 'test_precompute.pb')
# model = Model()
# model.graph = graph1
# io.write(model, path)
self.assertEqual(graph1, graph2, 'precompute failed.')
print("Precompute test #1 passed!")
def test_precompute2(self) -> None:
"""Test code for precompute optimizer."""
data1 = np.random.rand(3, 2, 2, 3)
data2 = np.random.rand(3, 2, 2, 3)
data3 = np.random.rand(3, 2, 2, 3)
graph1 = self.create_sample_graph(data1, data2, data3)
graph2, scaling1, scaling2 = self.create_quantized_graph(data1, data2, data3)
optim = Optimizer()
optim.precompute(graph1, hard_quantized=True)
self.assertEqual(graph1, graph2, 'precompute failed.')
self.assertAlmostEqual(graph1.get_op('conv2').quantizer.scaling_factor, scaling2) # type: ignore
print("Precompute test #2 passed!")
def test_precompute3(self) -> None:
"""Test code for precompute optimizer."""
data1 = np.random.rand(3, 2, 2, 3)
data2 = np.random.rand(3, 2, 2, 3)
data3 = np.random.rand(3, 2, 2, 3)
graph1 = self.create_sample_graph3(data1, data2, data3)
graph2, scaling2, scaling3 = self.create_quantized_graph2(data1, data2, data3)
optim = Optimizer()
optim.precompute(graph1, hard_quantized=True)
self.assertEqual(graph1, graph2, 'precompute failed.')
self.assertAlmostEqual(graph1.get_op('conv2').quantizer.scaling_factor, scaling2) # type: ignore
self.assertAlmostEqual(graph1.get_op('conv3').quantizer.scaling_factor, scaling3) # type: ignore
print("Precompute test #3 passed!")
def test_transpose_NHWC(self) -> None:
"""Test code for transpose_NHWC optimizer."""
data = np.random.rand(3, 2, 2, 1)
graph1 = self.create_sample_graph2(data)
graph2 = self.create_transposed_graph(data)
optim = Optimizer()
optim.transpose_NHWC(graph1)
self.assertEqual(graph1, graph2, 'transpose to NHWC failed.')
print("Transpose_NHWC test #1 passed!")
def create_sample_graph(self, data1: np.ndarray, data2: np.ndarray, data3: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input(
'placeholder',
[1, 5, 5, 3],
Float32(),
)
# constant and internal nodes
w = Constant(
'weight',
Float32(),
data1
)
i = Identity(
'identity1',
[3, 2, 2, 3],
Float32(),
{'input': w}
)
t = Transpose(
'transpose1',
[3, 2, 2, 3],
Float32(),
{'data': i},
perm=[3, 2, 1, 0]
)
q = QTZ_binary_mean_scaling(
'qtz1',
[3, 2, 2, 3],
Float32(),
{'input': t}
)
# Conv
conv1 = Conv(
'conv1',
[1, 4, 4, 3],
Float32(),
{'X': x, 'W': q},
kernel_shape=[2, 2]
)
i2 = Identity(
'identity2',
[1, 4, 4, 3],
Float32(),
{'input': conv1}
)
s1 = Constant(
'aq_const1',
Float32(),
np.array(1)
)
s2 = Constant(
'aq_const2',
Float32(),
np.array(2)
)
aq = QTZ_linear_mid_tread_half(
'aqtz1',
[1, 4, 4, 3],
Float32(),
{'X': i2, 'Y': s1, 'Z': s2}
)
dummy = Transpose(
'dummy',
[1, 4, 4, 3],
Float32(),
{'data': aq},
perm=[0, 1, 2, 3]
)
w2 = Constant(
'weight2',
Float32(),
data2
)
q2 = QTZ_binary_mean_scaling(
'qtz2',
[3, 2, 2, 3],
Float32(),
{'input': w2}
)
conv2 = Conv(
'conv2',
[1, 3, 3, 3],
Float32(),
{'X': dummy, 'W': q2},
kernel_shape=[2, 2]
)
s3 = Constant(
'aq_const1',
Float32(),
np.array(1)
)
s4 = Constant(
'aq_const2',
Float32(),
np.array(2)
)
aq2 = QTZ_linear_mid_tread_half(
'aqtz2',
[1, 3, 3, 3],
Float32(),
{'X': conv2, 'Y': s3, 'Z': s4}
)
w3 = Constant(
'weight3',
Float32(),
data3
)
i3 = Identity(
'identity3',
[1, 3, 3, 3],
Float32(),
{'input': aq2}
)
conv3 = Conv(
'conv3',
[1, 2, 2, 3],
Float32(),
{'X': i3, 'W': w3},
kernel_shape=[2, 2]
)
# One output
y = Output(
'output',
[1, 2, 2, 3],
Float32(),
{'input': conv3}
)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
def binary_mean_scaling(self, data: np.ndarray) -> Tuple[np.float32, np.ndarray]:
return np.mean(np.abs(data)), np.sign(data).astype(np.float32)
def create_precompute_graph(self, data1: np.ndarray, data2: np.ndarray, data3: np.ndarray) -> Graph:
graph = Graph()
# two inputs
x = Input(
'placeholder',
[1, 5, 5, 3],
Float32(),
)
scaling1, qdata = self.binary_mean_scaling(data1.transpose([3, 2, 1, 0]))
w = Constant(
'weight',
Float32(),
qdata * scaling1
)
# Conv
conv1 = Conv(
'conv1',
[1, 4, 4, 3],
Float32(),
{'X': x, 'W': w},
kernel_shape=[2, 2]
)
s1 = Constant(
'aq_const1',
Float32(),
np.array(1)
)
s2 = Constant(
'aq_const2',
Float32(),
np.array(2)
)
aq = QTZ_linear_mid_tread_half(
'aqtz1',
[1, 4, 4, 3],
Float32(),
{'X': conv1, 'Y': s1, 'Z': s2}
)
dummy = Transpose(
'dummy',
[1, 4, 4, 3],
Float32(),
{'data': aq},
perm=[0, 1, 2, 3]
)
scaling2, qdata2 = self.binary_mean_scaling(data2)
w2 = Constant(
'weight2',
Float32(),
qdata2 * scaling2
)
conv2 = Conv(
'conv2',
[1, 3, 3, 3],
Float32(),
{'X': dummy, 'W': w2},
kernel_shape=[2, 2]
)
s3 = Constant(
'aq_const1',
Float32(),
np.array(1)
)
s4 = Constant(
'aq_const2',
Float32(),
np.array(2)
)
aq2 = QTZ_linear_mid_tread_half(
'aqtz2',
[1, 3, 3, 3],
Float32(),
{'X': conv2, 'Y': s3, 'Z': s4}
)
w3 = Constant(
'weight3',
Float32(),
data3
)
conv3 = Conv(
'conv3',
[1, 2, 2, 3],
Float32(),
{'X': aq2, 'W': w3},
kernel_shape=[2, 2]
)
# One output
y = Output(
'output',
[1, 2, 2, 3],
Float32(),
{'input': conv3}
)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
def create_quantized_graph(self, data: np.ndarray, data2: np.ndarray, data3: np.ndarray) \
-> Tuple[Graph, np.float32, np.float32]:
graph = Graph()
# two inputs
x = Input(
'placeholder',
[1, 5, 5, 3],
Float32(),
)
from modules.packer import Packer
packer = Packer(1, 32)
data = data.transpose([3, 2, 1, 0])
scaling, qdata = self.binary_mean_scaling(data)
shape = list(data.shape)
w = Constant(
'weight',
Float32(),
qdata * scaling,
)
q = QTZ_binary_mean_scaling(
'qtz1',
shape,
Float32(),
{'input': w}
)
q.scaling_factor = scaling
# Conv
conv1 = Conv(
'conv1',
[1, 4, 4, 3],
Float32(),
{'X': x, 'W': w},
kernel_shape=[2, 2],
)
s1 = Constant(
'aq_const1',
Float32(),
np.array(1)
)
s2 = Constant(
'aq_const2',
Float32(),
np.array(2)
)
aq = QTZ_linear_mid_tread_half(
'aqtz1',
[1, 4, 4, 3],
QUANTIZED_NOT_PACKED(),
{'X': conv1, 'Y': s1, 'Z': s2}
)
dummy = Transpose(
'dummy',
[1, 4, 4, 3],
QUANTIZED_NOT_PACKED(),
{'data': aq},
perm=[0, 1, 2, 3]
)
scaling2, qdata2 = self.binary_mean_scaling(data2)
w2 = Constant(
'weight2',
Uint32(),
packer.run(qdata2),
packed=True,
actual_shape=[3, 2, 2, 3]
)
# quantizer connected to conv2 as 'conv2.quantizer'
q2 = QTZ_binary_mean_scaling(
'qtz2',
[3, 2, 2, 3],
Uint32(),
{'input': w2}
)
q2.scaling_factor = scaling2
conv2 = Conv(
'conv2',
[1, 3, 3, 3],
Float32(),
{'X': dummy, 'W': w2},
kernel_shape=[2, 2],
quantized=True
)
conv2.quantizer = q2
s3 = Constant(
'aq_const1',
Float32(),
np.array(1)
)
s4 = Constant(
'aq_const2',
Float32(),
np.array(2)
)
aq2 = QTZ_linear_mid_tread_half(
'aqtz2',
[1, 3, 3, 3],
Float32(),
{'X': conv2, 'Y': s3, 'Z': s4}
)
w3 = Constant(
'weight3',
Float32(),
data3
)
conv3 = Conv(
'conv3',
[1, 2, 2, 3],
Float32(),
{'X': aq2, 'W': w3},
kernel_shape=[2, 2]
)
# One output
y = Output(
'output',
[1, 2, 2, 3],
Float32(),
{'input': conv3}
)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph, scaling, scaling2
def create_sample_graph2(self, data: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input(
'placeholder',
[3, 5, 5, 1],
Float32(),
dimension_format='CWHN'
)
# constant and internal nodes
w = Constant(
'weight',
Float32(),
data,
dimension_format='CWHN'
)
i = Identity(
'identity1',
[3, 2, 2, 1],
Float32(),
{'input': w},
dimension_format='CWHN'
)
q = QTZ_binary_mean_scaling(
'qtz1',
[3, 2, 2, 1],
Float32(),
{'input': i},
dimension_format='CWHN'
)
# Conv
conv = Conv(
'conv',
[3, 4, 4, 1],
Float32(),
{'X': x, 'W': q},
kernel_shape=[2, 2],
dimension_format='CWHN'
)
rs = Reshape(
'reshape',
[1, 48],
Float32(),
{'data': conv}
)
# One output
y = Output(
'output',
[1, 48],
Float32(),
{'input': rs},
)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
def create_transposed_graph(self, data: np.ndarray) -> Graph:
graph = Graph()
data = data.transpose([3, 2, 1, 0])
# input
x = Input(
'placeholder',
[1, 5, 5, 3],
Float32(),
dimension_format='NHWC'
)
# constant and internal nodes
w = Constant(
'weight',
Float32(),
data,
dimension_format='NHWC'
)
i = Identity(
'identity1',
[1, 2, 2, 3],
Float32(),
{'input': w},
dimension_format='NHWC'
)
q = QTZ_binary_mean_scaling(
'qtz1',
[1, 2, 2, 3],
Float32(),
{'input': i},
dimension_format='NHWC'
)
# Conv
conv = Conv(
'conv',
[1, 4, 4, 3],
Float32(),
{'X': x, 'W': q},
kernel_shape=[2, 2],
dimension_format='NHWC'
)
rs = Reshape(
'reshape',
[1, 48],
Float32(),
{'data': conv}
)
# One output
y = Output(
'output',
[1, 48],
Float32(),
{'input': rs},
)
# add ops to the graph
graph.add_op_and_inputs(y)
return graph
def create_sample_graph3(self, data1: np.ndarray, data2: np.ndarray, data3: np.ndarray) -> Graph:
graph = Graph()
# input
x = Input(
'placeholder',
[1, 5, 5, 3],
Float32(),
)
# constant and internal nodes
w = Constant(
'weight',
Float32(),
data1
)
q = QTZ_binary_mean_scaling(
'qtz1',
[3, 2, 2, 3],
Float32(),
{'input': w}
)
# Conv
conv1 = Conv(
'conv1',
[1, 4, 4, 3],
Float32(),
{'X': x, 'W': q},
kernel_shape=[2, 2]
)
i2 = Identity(
'identity2',
[1, 4, 4, 3],
Float32(),
{'input': conv1}
)
s1 = Constant(
'aq_const1',
Float32(),
np.array(1)
)
s2 = Constant(
'aq_const2',
Float32(),
| np.array(2) | numpy.array |
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import cunumeric as num
def test():
x = num.array([1, 2, 3])
y = num.array([4, 5, 6])
z = num.vstack((x, y))
xnp = np.array([1, 2, 3])
ynp = np.array([4, 5, 6])
znp = np.vstack((xnp, ynp))
assert | np.array_equal(z, znp) | numpy.array_equal |
from xgboost import XGBClassifier
from sklearn.metrics import classification_report, matthews_corrcoef
from sklearn import svm
from feature import Feature
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import VotingClassifier
import numpy as np
def init_data(file_name):
seqs_blosum62, label, work2vec, seqs_sr, seqs_dde, seqs_z, seqs_aac, seqs_dpc, seqs_ctdd, seqs_ctrial, seqs_ksctriad, seqs_gtpc, seqs_cksaagp, seqs_gaac, seqs_gdpc, seqs_ctdt, seqs_geary, seqs_cksaap, seqs_aaindex, seqs_paac = Feature(
file_name)
seqs_sr = np.array(seqs_sr, dtype=np.float32)
seqs_ksctriad = np.array(seqs_ksctriad, dtype=np.float32)
work2vec = np.array(work2vec, dtype=np.float32)
seqs_blosum62 = np.array(seqs_blosum62, dtype=np.float32)
seqs_ctrial = np.array(seqs_ctrial, dtype=np.float32)
seqs_gtpc = np.array(seqs_gtpc, dtype=np.float32)
seqs_cksaagp = np.array(seqs_cksaagp, dtype=np.float32)
seqs_gaac = np.array(seqs_gaac, dtype=np.float32)
seqs_cksaap = np.array(seqs_cksaap, dtype=np.float32)
seqs_aaindex = np.array(seqs_aaindex, dtype=np.float32)
seqs_paac = np.array(seqs_paac, dtype=np.float32)
seqs_gdpc = np.array(seqs_gdpc, dtype=np.float32)
# print(seqs_gtpc.shape)
seqs_ctdt = np.array(seqs_ctdt, dtype=np.float32)
seqs_ctdt = seqs_ctdt.reshape(seqs_ctdt.shape[0], -1)
seqs_dpc = np.array(seqs_dpc, dtype=np.float32)
seqs_dpc = seqs_dpc.reshape(seqs_dpc.shape[0], -1)
seqs_aac = np.array(seqs_aac, dtype=np.float32)
seqs_aac = seqs_aac.reshape(seqs_aac.shape[0], -1)
seqs_z = np.array(seqs_z, dtype=np.float32)
seqs_z = seqs_z.reshape(seqs_z.shape[0], -1)
seqs_geary = np.array(seqs_geary, dtype=np.float32)
seqs_geary = seqs_geary.reshape(seqs_geary.shape[0], -1)
seqs_dde = np.array(seqs_dde, dtype=np.float32)
seqs_dde = seqs_dde.reshape(seqs_dde.shape[0], -1)
work2vec = work2vec.reshape(work2vec.shape[0], -1)
seqs_ctrial = seqs_ctrial.reshape(seqs_ctrial.shape[0], -1)
seqs_ksctriad = seqs_ksctriad.reshape(seqs_ksctriad.shape[0], -1)
seqs_blosum62 = seqs_blosum62.reshape(seqs_blosum62.shape[0], -1)
seqs_gtpc = seqs_gtpc.reshape(seqs_gtpc.shape[0], -1)
seqs_cksaagp = seqs_cksaagp.reshape(seqs_cksaagp.shape[0], -1)
seqs_gaac = seqs_gaac.reshape(seqs_gaac.shape[0], -1)
seqs_cksaap = seqs_cksaap.reshape(seqs_cksaap.shape[0], -1)
seqs_aaindex = seqs_aaindex.reshape(seqs_aaindex.shape[0], -1)
seqs_dpc = seqs_dpc.reshape(seqs_dpc.shape[0], -1)
seqs_paac = seqs_paac.reshape(seqs_paac.shape[0], -1)
seqs_gdpc = seqs_gdpc.reshape(seqs_gdpc.shape[0], -1)
data_features = np.concatenate((seqs_blosum62, seqs_ksctriad, seqs_cksaap, seqs_aaindex), 1)
data_features1 = | np.concatenate((seqs_blosum62, seqs_aac, seqs_ctdt, seqs_aaindex), 1) | numpy.concatenate |
# Import necessary packages
import torch
import argparse
import json
import utility_fun, model_fun
import matplotlib.pyplot as plt
import numpy as np
parser = argparse.ArgumentParser(description='Using Neural Networks for Image Classifier')
parser.add_argument('--image_path', action='store',
default = 'C:/Users/mohamedelbeah/home/Image_Classifier/flowers/test/76/image_02550.jpg',
help='Enter path to image')
parser.add_argument('--save_dir', action='store',
dest='save_directory', default = 'C:/Users/mohamedelbeah/home/Image_Classifier/checkpoint.pth',
help='Enter location to save checkpoint')
parser.add_argument('--top_k', action='store',
dest='topk', type=int, default = 5,
help='Enter number of top most likely classes to view, default is 5')
parser.add_argument('--cat_to_name', action='store',
dest='cat_name_dir', default = 'C:/Users/mohamedelbeah/home/Image_Classifier/cat_to_name.json',
help='Enter path to image.')
results = parser.parse_args()
save_dir = results.save_directory
image = results.image_path
top_k = results.topk
cat_names = results.cat_name_dir
with open(cat_names, 'r') as f:
cat_to_name = json.load(f)
# Loading the model
ResNet18 = model_fun.load_checkpoint(save_dir)
# Use GPU if it's available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Performing Prediction
ResNet18.to(device)
image_tensor = utility_fun.process_image(image)
image_tensor = image_tensor.unsqueeze_(0)
# Turn off gradients
with torch.no_grad():
# set model to evaluation mode
ResNet18.eval()
logps = ResNet18.forward(image_tensor.to(device))
# Top k probabilities and classes
ps = torch.exp(logps)
probs, classes = ps.topk(top_k, dim=1)
# Convert probs and classes to arrays
probs = probs.cpu().data.numpy().squeeze()
classes = classes.cpu().data.numpy().squeeze()
# Converting topk indices into actual flower names
idx_to_class = {value: key for key, value in ResNet18.class_to_idx.items()}
labels = [idx_to_class[key] for key in classes]
flower_names = [cat_to_name[key].title() for key in labels]
# TODO: Display an image along with the top 5 classes
fig, (ax1, ax2) = plt.subplots(figsize=(10, 8), ncols=2)
utility_fun.imshow(utility_fun.process_image(image), ax=ax1)
ax1.set_title(flower_names[0])
ax1.axis('off')
ax2.barh( | np.arange(5) | numpy.arange |
# -*- coding: utf-8 -*-
"""Tests for calculating the spherical projection."""
import numpy as np
from numpy.testing import assert_allclose
from pylode.lib.spherical_harmonics import evaluate_spherical_harmonics
class TestSphericalHarmonics:
"""Test correct behavior of spherical harmonics code"""
def test_spherical_harmonics(self):
"""Start by evaluating spherical harmonics at some special points"""
vectors_zdir = np.array([[0, 0, 1], [0, 0, 2]])
lmax = 8
coeffs = evaluate_spherical_harmonics(vectors_zdir, lmax)
# spherical harmonics should be independent of length
assert np.linalg.norm(coeffs[0] - coeffs[1]) < 1e-14
# Compare to exact values of Y_lm for vectors in +z-direction
nonzero_indices = np.array([l**2 + l for l in range(lmax + 1)])
coeffs_nonzero = coeffs[0, nonzero_indices]
exact_vals = np.sqrt((2 * np.arange(lmax + 1) + 1) / 4 / np.pi)
assert np.linalg.norm(coeffs_nonzero - exact_vals) < 1e-14
# Make sure that all other values are (essentially) zero
assert abs(np.sum(coeffs[0]**2) - np.sum(exact_vals**2)) < 1e-14
def test_spherical_harmonics_x_y(self):
"""use vectors confined on x-y plane"""
rng = np.random.default_rng(3218932)
N = 10
lmax = 8
vectors_xy = | np.zeros((N, 3)) | numpy.zeros |
from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
# from raft import RAFT
from core.ours import RAFT
import evaluate
import datasets
import flow_vis
import random
from torch.utils.tensorboard import SummaryWriter
from utils.scheduler import CosineAnnealingWarmupRestarts
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
VAL_FREQ = 5000
def sequence_loss(flow_preds, flow_gt, valid, sparse_lambda=1.0, gamma=0.8, max_flow=MAX_FLOW):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds[0])
flow_loss = 0.0
sparse_loss = 0.0
# exlude invalid pixels and extremely large diplacements
mag = torch.sum(flow_gt ** 2, dim=1).sqrt()
dense_valid = (valid >= 0.5) & (mag < max_flow)
bs, _, I_H, I_W = flow_gt.shape
for i in range(n_predictions):
# i_weight = gamma ** (n_predictions - i - 1)
i_weight = 1.0
i_loss = (flow_preds[0][i] - flow_gt).abs()
# i_loss = (flow_preds[0][i] - flow_gt).square()
flow_loss += i_weight * (dense_valid[:, None] * i_loss).mean()
if sparse_lambda > 0.0:
ref, sparse_flow, _, _ = flow_preds[1][i]
scale = torch.tensor((I_W - 1, I_H - 1), dtype=torch.float32).view(1, 1, 2).to(sparse_flow.device)
flatten_gt = flow_gt.flatten(2).permute(0, 2, 1)
flatten_valid = valid.flatten(1)
coords = torch.round(ref * scale).long()
coords = torch.clamp_max(coords[..., 1] * coords[..., 0], I_H * I_W - 1)
sparse_gt = torch.gather(flatten_gt, 1, coords.unsqueeze(-1).repeat(1, 1, 2))
sparse_valid = torch.gather(flatten_valid, 1, coords)
sparse_valid = (sparse_valid >= 0.5) & (torch.sum(sparse_gt ** 2, dim=-1).sqrt() < max_flow)
sparse_i_loss = (sparse_flow * scale - sparse_gt).abs()
# sparse_i_loss = (sparse_flow * scale - sparse_gt).square()
sparse_loss += i_weight * (sparse_valid[..., None] * sparse_i_loss).mean()
loss = flow_loss + sparse_loss * sparse_lambda
epe = torch.sum((flow_preds[0][-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[dense_valid.view(-1)]
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
'loss': loss,
'flow_loss': flow_loss,
'sparse_loss': sparse_loss
}
return loss, metrics
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.wdecay)
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, round(args.num_steps * 0.8))
# scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
# pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
# scheduler = torch.optim.lr_scheduler.OneCycleLR(
# optimizer, args.lr,
# args.num_steps + 10,
# pct_start=0.05,
# cycle_momentum=False,
# anneal_strategy='cos')
# scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
# optimizer, 1000, T_mult=1, eta_min=0, last_epoch=- 1, verbose=False)
return optimizer, scheduler
class Logger:
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = None
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
# print the training status
print(training_str + metrics_str)
if self.writer is None:
self.writer = SummaryWriter()
for k in self.running_loss:
self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
self.running_loss[k] = 0.0
def push(self, metrics):
self.total_steps += 1
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter()
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def write_image(self, image1, image2, target, pred, phase="T", idx=0):
if self.writer is None:
self.writer = SummaryWriter()
_, I_H, I_W = image1.shape
scale = torch.tensor((I_W, I_H), dtype=torch.float32).view(1, 2).to(image1.device)
image1 = image1.detach().cpu().numpy()
image1 = np.transpose(image1, (1, 2, 0))
image2 = image2.detach().cpu().numpy()
image2 = np.transpose(image2, (1, 2, 0))
target = target.detach().cpu().numpy()
target = np.transpose(target, (1, 2, 0))
target_img = flow_vis.flow_to_color(target, convert_to_bgr=False)
pred_img = list()
for p_i in range(len(pred[0])):
ref, sparse_flow, masks, scores = pred[1][p_i]
coords = torch.round(ref.squeeze(0) * scale).long()
coords = coords.cpu().numpy()
confidence = np.squeeze(scores.squeeze(0).cpu().numpy())
ref_img = cv2.cvtColor(np.array(image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
for k_i in range(len(coords)):
coord = coords[k_i]
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[k_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
pred_img.append(ref_img)
this_pred = pred[0][p_i].squeeze(0).detach().cpu().numpy()
this_pred = np.transpose(this_pred, (1, 2, 0))
this_pred = flow_vis.flow_to_color(this_pred, convert_to_bgr=False)
pred_img.append(this_pred)
mask_img = list()
top_k = len(pred[0])
# top_k_indices = np.argsort(-confidence)[:top_k]
masks = masks.squeeze(0).cpu()
# masks = masks.reshape(self.num_keypoints, 1, H, W)
masks = F.interpolate(masks, size=(I_H, I_W), mode="bilinear", align_corners=False).numpy()
masks = masks.squeeze(1)
top_k_indices = np.argsort(-np.sum(masks, axis=(1, 2)))[:top_k]
for m_i in top_k_indices:
coord = coords[m_i]
# ref_img = cv2.circle(ref_img, coord, 10, (255, 0, 0), 10)
ref_img = cv2.cvtColor(np.array(image1, dtype=np.uint8), cv2.COLOR_RGB2BGR)
ref_img = cv2.circle(ref_img, coord, 10, (round(255 * confidence[m_i]), 0, 0), 10)
ref_img = cv2.cvtColor(np.array(ref_img, dtype=np.uint8), cv2.COLOR_BGR2RGB)
mask_img.append(ref_img)
masked_flow = np.expand_dims(masks[m_i], axis=-1) * this_pred
mask_img.append(masked_flow)
pred_img = | np.concatenate(pred_img, axis=1) | numpy.concatenate |
"""
Created on Wed Apr 10 10:04:29 2019
@author: <NAME> (<EMAIL>)
"""
import numpy as np
from pylab import *
import matplotlib.pyplot as plt
from copy import copy
import pandas as pd
from matplotlib import colors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from numpy.matlib import repmat
from scipy.spatial import distance_matrix
from scipy.spatial.distance import cdist, pdist
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import StratifiedKFold, train_test_split, StratifiedShuffleSplit
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
import seaborn as sns
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import shutil
import os
def cluster_quality(X, centroids):
D_k = cdist(X, centroids, 'euclidean')
cIdx = np.argmin(D_k,axis=1)
dist = np.min(D_k,axis=1)
tot_withinss = sum(dist**2) # Total within-cluster sum of squares
totss = sum(pdist(X)**2)/X.shape[0] # The total sum of squares
betweenss = totss - tot_withinss # The between-cluster sum of squares
variance_retain = betweenss/totss*100
return variance_retain
def get_centroids(arr_clusters, arr_X):
unqvals = np.unique(arr_clusters)
centroids = []
for val in unqvals:
if val > -1:
centroids.append(arr_X[np.where(arr_clusters == val)].mean(axis = 0))
return np.array(centroids)
def kmean_elbow_percentange(X, lst_kms, figname):
from scipy.spatial.distance import cdist
from scipy.spatial.distance import pdist
##### cluster data into K=1..20 clusters #####
K_MAX = len(lst_kms)
KK = range(1,K_MAX+1)
centroids = [km.cluster_centers_ for km in lst_kms]
D_k = [cdist(X, cent, 'euclidean') for cent in centroids]
cIdx = [np.argmin(D,axis=1) for D in D_k]
dist = [np.min(D,axis=1) for D in D_k]
tot_withinss = [sum(d**2) for d in dist] # Total within-cluster sum of squares
totss = sum(pdist(X)**2)/X.shape[0] # The total sum of squares
betweenss = totss - tot_withinss # The between-cluster sum of squares
##### plots #####
kIdx = 4 # Elbow
clr = cm.nipy_spectral( np.linspace(0,1,20) ).tolist()
mrk = 'os^p<dvh8>+x.'
variance_retain = betweenss/totss*100
# elbow curve
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(KK, betweenss/totss*100, marker='o', color='#212F3D')
# ax.plot(KK[kIdx], betweenss[kIdx]/totss*100, marker='o', markersize=12,
# markeredgewidth=5, markeredgecolor='r', markerfacecolor='None')
# ax.set_ylim((0,100))
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Percentage of variance explained (%)')
# plt.title('Elbow for KMeans clustering')
plt.savefig(figname, dpi=300, bbox_inch = 'tight')
plt.show()
def kmean_elbow(X, K_MAX = 20):
distortions = []
kmeans = []
for i in range(1, K_MAX + 1):
km = KMeans(n_clusters=i,
init='k-means++',
n_init=10,
max_iter=300,
random_state=0)
km.fit(X)
distortions.append(km.inertia_)
kmeans.append(km)
distortions_norm = [dist / distortions[0] for dist in distortions]
plt.plot(range(1,K_MAX + 1), distortions_norm, marker='o', color='black')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.tight_layout()
plt.savefig('KMeans_Elbow_all_ex_99.png', dpi = 300)
plt.show()
return kmeans
def kmean_elbow_distortion(lst_kms, figname):
distortions = []
K_MAX = len(lst_kms)
for km in lst_kms:
distortions.append(km.inertia_)
distortions_norm = [dist / distortions[0] for dist in distortions]
plt.plot(range(1,K_MAX + 1), distortions_norm, marker='o', color='#212F3D')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.tight_layout()
plt.savefig(figname, dpi=300)
plt.show()
def extract_closes_match(X_ref, X_data):
"""
X_ref: The reference point from which the closest point is going to be
extracted. (1D-Array)
X_data: The dataset. (2D-Array) (n_points x n_dim)
"""
n_dim = len(X_ref)
r = cdist(X_data, X_ref.reshape(1, n_dim))
i_min = np.argmin(r)
return i_min, r[i_min], X_data[i_min, :]
def ecdf(X, x):
"""Emperical Cumulative Distribution Function
X:
1-D array. Vector of data points per each feature (dimension), defining
the distribution of data along that specific dimension.
x:
Value. It is the value of the corresponding dimension.
P(X <= x):
The cumulative distribution of data points with respect to the archetype
(the probablity or how much of data in a specific dimension is covered
by the archetype).
"""
return float(len(X[X < x]) / len(X))
def calc_SSE(X_act, X_appr):
"""
This function returns the Sum of Square Errors.
"""
return ((X_act - X_appr) ** 2).sum()
def calc_SST(X_act):
"""
This function returns the Sum of Square of actual values.
"""
return (X_act ** 2).sum()
def explained_variance(X_act, X_appr, method = 'sklearn'):
if (method.lower == 'sklearn'):
from sklearn.metrics import explained_variance_score
return explained_variance_score(X_act.T, X_appr.T)
else:
SSE = calc_SSE(X_act, X_appr)
SST = calc_SST(X_act)
return (SST - SSE) / SST
def show_EV_PCA(pca):
var_exp = pca.explained_variance_ratio_ * 100
cum_var_exp = np.cumsum(var_exp)
num_comp = len(pca.explained_variance_ratio_)
fig = plt.figure()
plt.bar(range(1,num_comp + 1), var_exp, alpha=0.5, align='center',
label='individual explained variance')
plt.step(range(1,num_comp + 1), cum_var_exp, where='mid',
label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
# plt.show()
return fig
def show_PCA_in_Feat(lst_Feat, res, shape = [3,3]):
n = shape[0]
m = shape[1]
tot_plot = m * n
x = np.arange(1, len(lst_Feat) + 1)
count = 1
lst_figs = []
for i in range(res.shape[1]):
# if (i % tot_plot == 0):
# count = 1
# plt.figure(figsize=(20,12))
fig = plt.figure(figsize=(20,12))
# plt.subplot(n,m,count)
plt.bar(x, res[:,i])
plt.xticks(x, lst_Feat, rotation='vertical')
# plt.show()
count += 1
lst_figs.append(fig)
return lst_figs
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
fig = plt.figure(figsize=(6,4))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return fig
def plot_gth_pre(Y_label, Y_pre, range_set = True, tag='Train'):
from scipy.optimize import curve_fit
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
min_glob = min(Y_label.min(), Y_pre.min())
max_glob = max(Y_label.max(), Y_pre.max())
Y_label = (Y_label - min_glob) / (max_glob - min_glob).flatten()
Y_pre = (Y_pre - min_glob) / (max_glob - min_glob).flatten()
# Y_label = sc.fit_transform(Y_label.reshape(-1,1)).flatten()
# Y_pre = sc.fit_transform(Y_pre.reshape(-1,1)).flatten()
fig = plt.figure(figsize=(6,6))
# Fitting best line:
# ==================
pre_Y = Y_pre
parameter, covariance_matrix = curve_fit(funcLinear,
Y_label.astype(float),
pre_Y.flatten().astype(float))
xx = np.linspace(-0.1, 1.1, 30)
plt.plot(xx, funcLinear(xx, *parameter), color='#52BE80', linewidth = 2, label='fit')
axes = plt.gca()
if range_set:
axes.set_xlim([-0.1,1.1])
axes.set_ylim([-0.1,1.1])
else:
# min_true = np.min(Y_label) - 0.1 * np.min(Y_label)
# max_true = np.max(Y_label) + 0.1 * np.max(Y_label)
# min_pre = np.min(Y_pre) - 0.1 * np.min(Y_pre)
# max_pre = np.max(Y_pre) + 0.1 * np.max(Y_pre)
axes.set_xlim([min_glob,max_glob])
axes.set_ylim([min_glob,max_glob])
lims = [
np.min([axes.get_xlim(), axes.get_ylim()]), # min of both axes
np.max([axes.get_xlim(), axes.get_ylim()]), # max of both axes
]
# 45 degree line:
# ===============
plt.plot(lims, lims, '--', color='#A569BD', linewidth=5, alpha=0.75, zorder=0)
# Scattered Data:
# ===============
plt.scatter(Y_label,pre_Y,
marker='o',
s=20,
facecolors='#273746',
edgecolors='#273746')
plt.legend(['Best Fit','Perfect Fit', 'Data'], loc='lower right')
plt.text(0, 0.9, r'slope: %.2f'%parameter[0])
plt.text(0, 0.85, r'interception: %.2f'%parameter[1])
plt.xlabel('Normalized '+ tag +' Value')
plt.ylabel('Normalized Prediction Value')
from sklearn.metrics import r2_score
plt.title("$R^2 = %.5f$"%r2_score(Y_label,pre_Y.flatten()))
return fig
def plot_it(y_true, y_pred, filename=None):
fig = plt.figure(figsize=(6,6))
y_min = min(y_true.min(), y_pred.min())
y_max = min(y_true.max(), y_pred.max())
xx = np.linspace(y_min, y_max, 30)
lr = LinearRegression()
lr.fit(y_pred.reshape(-1,1), y_true)
y_line = lr.predict(y_pred.reshape(-1,1))
# 45 degree line:
# ===============
plt.plot(xx,xx, '--', color='#A569BD', linewidth=5, label = 'Perfect fit')
# Best straigth line:
# ===================
# plt.plot(np.unique(y_pred), np.poly1d(np.polyfit(y_pred, y_true, 1))(np.unique(y_pred)), color='#52BE80', linewidth = 2, label= 'Best fit')
# plt.plot(y_pred.flatten(), y_line, color='#52BE80', linewidth = 2, label= 'Best fit')
# plt.plot(y_line, y_pred.flatten(), color='#52BE80', linewidth = 2, label= 'Best fit')
# scattered y-true and y-pred:
# ============================
# plt.scatter(y_pred, y_true, color = '#273746', label = 'Data')
plt.scatter(y_true, y_pred, color = '#273746', label = 'Data', s = 20)
plt.xlabel('Truth')
plt.ylabel('Predicted')
plt.legend(loc='upper left')
R2 = np.round(r2_score( y_true, y_pred ), 3)
RMSE = np.round(mean_squared_error( y_true, y_pred )**0.5, 3)
MAE = np.round(mean_absolute_error( y_true, y_pred ), 3)
plt.title(f"$R^2$ = {R2}, RMSE = {RMSE}, MAE = {MAE}")
if filename:
plt.savefig(filename, dpi=300, bbox_inches='tight')
return fig
def skf_cv_scores(model, metrics, x_train, y_train, n_splits = 5, bins = 12, print_scores = False):
skf = StratifiedKFold(n_splits=n_splits, shuffle=False, random_state=42)
bins_ = np.linspace(y_train.min(), y_train.max(), bins)
y_train_binned_cv = np.digitize(y_train, bins_)
CV_scores = []
for cv_training_index, cv_testing_index in skf.split(x_train, y_train_binned_cv):
X_training_cv = x_train[cv_training_index, :]
X_testing_cv = x_train[cv_testing_index, :]
Y_training_cv = y_train[cv_training_index]
Y_testing_cv = y_train[cv_testing_index]
# fit model to training dataset
model.fit(X_training_cv, Y_training_cv)
# Test
CV_scores.append(metrics(model.predict(X_testing_cv), Y_testing_cv))
accuracy = np.mean(CV_scores)
uncertainty = np.std(CV_scores)*2
if print_scores:
print('CV Scores:', np.round(CV_scores, 3))
print('Accuracy:',np.round(accuracy, 3),'+/-',np.round(uncertainty, 3))
return CV_scores
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Reds, **kwargs):
filename = kwargs.get('filename', None)
"""
reference:
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
fig = plt.figure()
import itertools
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalised confusion matrix")
# else:
# print('Confusion matrix, without normalization')
# print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, round(cm[i,j],2), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
if (i==1) and (j==1):
plt.text(j, i, round(cm[i,j],2), horizontalalignment="center", color="white")
elif (i==0) and (j==0):
plt.text(j, i, round(cm[i,j],2), horizontalalignment="center", color="black")
else:
plt.text(j, i, round(cm[i,j],2), horizontalalignment="center", color="black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
if filename:
plt.savefig(filename, dpi=300, bbox_inches='tight')
return fig
def plot_feature_importance(ft_set, feature_importance, show_cols = 30):
fig = plt.figure(figsize=(8,6))
w_lr_sort, ft_sorted, _ = return_feature_importance(ft_set, feature_importance, show_cols = show_cols)
x_val = list(range(len(w_lr_sort)))
plt.bar(x_val, w_lr_sort, color = '#212F3D')
plt.xticks(x_val, ft_sorted, rotation='vertical')
return fig
def return_feature_importance(ft_set, feature_importance, show_cols = 30):
w_lr = copy( | np.abs(feature_importance) | numpy.abs |
__all__ = ["Scalar", "Vector", "Matrix"]
class Scalar(object):
"""
Scalar variable type.
It holds a 64-bits floating point value, stored via a zero-dimensional
``ndl``, listen to changes, and fix or unfix its value.
Parameters
----------
value : float
Initial value.
"""
__slots__ = [
"raw",
"_fixed",
"value",
"__array_interface__",
"__array_struct__",
"_bounds",
]
def __init__(self, value):
from ndarray_listener import ndl
from numpy import float64, inf
self._bounds = (-inf, +inf)
self._fixed = False
value = ndl(float64(value))
self.raw = value
self.__array_interface__ = value.__array_interface__
self.__array_struct__ = value.__array_struct__
@property
def bounds(self):
return self._bounds
@bounds.setter
def bounds(self, v):
self._bounds = v
def copy(self):
"""Return a copy."""
return Scalar(self.raw)
@property
def shape(self):
"""
Shape according to :mod:`numpy`.
"""
return self.raw.shape
@property
def ndim(_):
"""
Number of dimensions.
"""
return 0
@property
def size(self):
"""
Size according to :mod:`numpy`.
"""
return self.raw.size
def asarray(self):
"""
Return a :class:`numpy.ndarray` representation.
"""
from numpy import array
return array(self.raw)
@property
def isfixed(self):
"""
Return whether it is fixed or not.
"""
return self._fixed
def fix(self):
"""
Set it fixed.
"""
self._fixed = True
def unfix(self):
"""
Set it unfixed.
"""
self._fixed = False
def listen(self, you):
"""
Request a callback for value modification.
Parameters
----------
you : object
An instance having ``__call__`` attribute.
"""
self.raw.talk_to(you)
def __setattr__(self, name, value):
from numpy import float64
if name == "value":
try:
value = float64(value)
except TypeError:
value = value[0]
self.raw.itemset(value)
else:
Scalar.__dict__[name].__set__(self, value)
def __getattr__(self, name):
if name == "value":
name = "raw"
return Scalar.__dict__[name].__get__(self)
def __str__(self):
return "Scalar(" + str(self.raw) + ")"
def __repr__(self):
return repr(self.raw)
def __ge__(self, that):
return self.raw >= that.raw
def __gt__(self, that):
return self.raw > that.raw
def __le__(self, that):
return self.raw <= that.raw
def __lt__(self, that):
return self.raw < that.raw
def __eq__(self, that):
return self.raw == that.raw
def __ne__(self, that):
return self.raw != that.raw
class Vector(object):
"""
Vector variable type.
It holds an array of 64-bits floating point values, via an one-dimensional
``ndl``, listen to changes, and fix or unfix its values.
Parameters
----------
value : float
Initial value.
"""
__slots__ = [
"raw",
"_fixed",
"__array_interface__",
"__array_struct__",
"value",
"_bounds",
]
def __init__(self, value):
from numpy import asarray, atleast_1d, inf
from ndarray_listener import ndl
self._bounds = [(-inf, +inf)] * len(value)
self._fixed = False
value = asarray(value, float)
value = ndl(atleast_1d(value).ravel())
self.raw = value
self.__array_interface__ = value.__array_interface__
self.__array_struct__ = value.__array_struct__
@property
def bounds(self):
return self._bounds
@bounds.setter
def bounds(self, v):
self._bounds = v
def copy(self):
"""
Return a copy.
"""
return Vector(self.raw)
@property
def shape(self):
"""
Shape according to :mod:`numpy`.
"""
return self.raw.shape
@property
def ndim(self):
"""
Number of dimensions.
"""
return len(self.shape)
@property
def size(self):
"""
Size according to :mod:`numpy`.
"""
return self.raw.size
def asarray(self):
"""
Return a :class:`numpy.ndarray` representation.
"""
from numpy import array
return array(self.raw)
@property
def isfixed(self):
"""
Return whether it is fixed or not.
"""
return self._fixed
def fix(self):
"""
Set it fixed.
"""
self._fixed = True
def unfix(self):
"""
Set it unfixed.
"""
self._fixed = False
def listen(self, you):
"""
Request a callback for value modification.
Parameters
----------
you : object
An instance having ``__call__`` attribute.
"""
self.raw.talk_to(you)
def __setattr__(self, name, value):
from numpy import asarray, atleast_1d
if name == "value":
value = asarray(value)
value = atleast_1d(value).ravel()
self.raw[:] = value
else:
Vector.__dict__[name].__set__(self, value)
def __getattr__(self, name):
if name == "value":
name = "raw"
return Vector.__dict__[name].__get__(self)
def __str__(self):
return "Vector(" + str(self.raw) + ")"
def __repr__(self):
return repr(self.raw)
def __ge__(self, that):
return self.raw >= that.raw
def __gt__(self, that):
return self.raw > that.raw
def __le__(self, that):
return self.raw <= that.raw
def __lt__(self, that):
return self.raw < that.raw
def __eq__(self, that):
return self.raw == that.raw
def __ne__(self, that):
return self.raw != that.raw
class Matrix(object):
__slots__ = ["raw", "_fixed"]
def __init__(self, value):
self._fixed = False
self.raw = value
@property
def size(self):
return self.raw.size
def asarray(self):
from numpy import asarray
return | asarray([self.raw]) | numpy.asarray |
import sys,os
import numpy as np
import matplotlib.pyplot as plt
from desitarget import cuts
import fitsio
import astropy.io.fits as fits
import healpy as hp
from scipy.special import erf
from astropy.table import Table
colorcuts_function = cuts.isELG_colors
#deep DECaLS imaging, with photozs from HSC
truthf = '/project/projectdirs/desi/users/ajross/MCdata/desi_mcsyst_truth.dr7.34ra38.-7dec-3.fits'
truth = fitsio.read(truthf,1)
gmag = truth["g"]
w = gmag < 24.5
#truth = truth[w]
gmag = truth["g"]
rmag = truth["r"]
zmag = truth["z"]
photz = truth['hsc_mizuki_photoz_best']
#pixfn = '/project/projectdirs/desi/target/catalogs/dr8/0.31.1/pixweight/pixweight-dr8-0.31.1.fits' #update this to be more recent
pixfn = '/global/cfs/cdirs/desi/target/catalogs/dr9m/0.42.0/pixweight/main/resolve/dark/pixweight-dark.fits' #dr9m version
def mag2flux(mag) :
return 10**(-0.4*(mag-22.5))
def flux2mag(flux) :
mag = -2.5*np.log10(flux*(flux>0)+0.001*(flux<=0)) + 22.5
mag[(flux<=0)] = 0.
return mag
gflux = mag2flux(truth["g"])
rflux = mag2flux(truth["r"])
zflux = mag2flux(truth["z"])
w1flux = np.zeros(gflux.shape)#WISE not used in ELG selection, but still needed for code
w2flux = np.zeros(gflux.shape)
true_selection = colorcuts_function(gflux=gflux, rflux=rflux, zflux=zflux, w1flux=w1flux, w2flux=w2flux,south=True)
true_mean=np.mean(true_selection.astype(float))
print(true_mean)
grand = np.random.normal(size=gflux.shape)
rrand = np.random.normal(size=rflux.shape)
zrand = np.random.normal(size=zflux.shape)
R_G=3.214 # http://legacysurvey.org/dr8/catalogs/#galactic-extinction-coefficients
R_R=2.165
R_Z=1.211
#set up correlation matrix for fluxes
ml = np.zeros(3)
cv = np.ones((3,3))*.5 #just given them all correlation of 0.5 for now
cv[0][0] = 1.
cv[1][1] = 1.
cv[2][2] = 1.
cg = np.random.default_rng().multivariate_normal(ml,cv,len(gflux))
cg = cg.transpose()
def perturb_flux(ina,outf='test.fits'):
'''
ina should be input array containing necessary columns
the idea here is that input photometry + flux errors and their cov given by cv an output distribution consistent with Obiwan could be produced
'''
vv = | np.zeros(3) | numpy.zeros |
import numpy as np
from sklearn.metrics import confusion_matrix, r2_score
import sys
def class_acc(out, labels):
outputs = np.argmax(out, axis=1)
sys.stdout.flush()
print('\n')
print('confusion_matrix:\n', confusion_matrix(labels, outputs))
print('\n')
sys.stdout.flush()
return | np.sum(outputs == labels) | numpy.sum |
import snipar.preprocess as preprocess
import numpy as np
from snipar.gtarray import gtarray
from bgen_reader import open_bgen
from snipar.utilities import *
def match_observed_and_imputed_snps(gts_f, par_gts_f, snp_ids=None, start=0, end=None):
"""
Used in get_gts_matrix_given_ped to match observed and imputed SNPs and return SNP information on shared SNPs.
Removes SNPs that have duplicated SNP ids.
in_obs_sid contains the SNPs in the imputed genotypes that are present in the observed SNPs
obs_sid_index contains the index in the observed SNPs of the common SNPs
"""
# Match SNPs from imputed and observed and restrict to those in list
if snp_ids is None:
snp_ids = gts_f.ids
if np.unique(snp_ids).shape[0] == 1:
snp_ids = gts_f.rsids
if end is None:
end = snp_ids.shape[0]
snp_ids = snp_ids[start:end]
# Get bim info
alleles = np.array([x.split(',') for x in gts_f.allele_ids])
pos = np.array(gts_f.positions)
chromosome = np.array(gts_f.chromosomes)
# Remove duplicate ids
unique_snps, snp_indices, snp_counts = np.unique(snp_ids, return_index=True, return_counts=True)
snp_set = set(snp_ids[snp_indices[snp_counts == 1]])
if len(snp_set) < snp_ids.shape[0]:
print(str(snp_ids.shape[0]-len(snp_set))+' SNPs with duplicate IDs removed')
## Read and match SNP ids
imp_bim = convert_str_array(np.array(par_gts_f['bim_values']))
imp_bim_cols = convert_str_array(np.array(par_gts_f['bim_columns']))
# Find relevant column for SNP ids in imputed data
found_snp_ids = False
if 'rsid' in imp_bim_cols:
imp_sid = imp_bim[:,np.where(imp_bim_cols=='rsid')[0][0]]
if np.unique(imp_sid).shape[0] == 0:
found_snp_ids = False
else:
found_snp_ids = True
if not found_snp_ids:
if 'id' in imp_bim_cols:
imp_sid = imp_bim[:,np.where(imp_bim_cols=='id')[0][0]]
else:
raise(ValueError('Cannot find imputed SNP ids'))
# Get imputed allele ids
if 'allele_ids' in imp_bim_cols:
imp_alleles = np.array([x.split(',') for x in imp_bim[:,np.where(imp_bim_cols=='allele_ids')[0][0]]])
elif 'allele1' in imp_bim_cols and 'allele2' in imp_bim_cols:
imp_alleles = imp_bim[:,[np.where(imp_bim_cols=='allele1')[0][0],np.where(imp_bim_cols=='allele2')[0][0]]]
obs_sid = gts_f.ids
if np.unique(obs_sid).shape[0] == 1:
obs_sid = gts_f.rsids
obs_sid_dict = make_id_dict(obs_sid)
in_obs_sid = np.zeros((imp_sid.shape[0]), dtype=bool)
obs_sid_index = np.zeros((imp_sid.shape[0]), dtype=int)
for i in range(0, imp_sid.shape[0]):
if imp_sid[i] in obs_sid_dict and imp_sid[i] in snp_set:
in_obs_sid[i] = True
obs_sid_index[i] = obs_sid_dict[imp_sid[i]]
if np.sum(in_obs_sid) == 0:
raise ValueError('No SNPs in common between imputed and observed data')
obs_sid_index = obs_sid_index[in_obs_sid]
sid = imp_sid[in_obs_sid]
alleles = alleles[obs_sid_index, :]
imp_alleles = imp_alleles[in_obs_sid,:]
if 'Chr' in imp_bim_cols:
chr_col = np.where('Chr' == imp_bim_cols)[0][0]
else:
chr_col = 0
chromosome = imp_bim[in_obs_sid,chr_col]
pos = pos[obs_sid_index]
allele_match = | np.logical_and(alleles[:,0]==imp_alleles[:,0],alleles[:,1]==imp_alleles[:,1]) | numpy.logical_and |
import csv
from typing import Tuple, List
import numpy as np
from pylab import *
from scipy import linalg
from scipy.stats import norm
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn import linear_model
import time
import logging
from model import Model
from MatrixOperation import multiply_matrices, multiply_transpose
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=None,
level=logging.INFO)
logger = logging.getLogger('Baseline')
class Classification(object):
def __init__(self):
super(Classification, self).__init__()
@staticmethod
# todo
def compmedDist(X: np.ndarray) -> np.float64: # X:transpose of feature matrix
size1 = X.shape[0] # feature size
Xmed = X
# every value squared, and sum across row
G = sum((Xmed * Xmed), 1)
# expand (size1,) to (size1,size1), each row contains size1 sum
Q = tile(G[:, newaxis], (1, size1))
# R=Q.T
R = tile(G, (size1, 1))
dists = Q + R - 2 * dot(Xmed, Xmed.T)
dists = dists - tril(dists) # diagonal and right-top part set to 0
dists = dists.reshape(size1 ** 2, 1, order='F').copy()
return sqrt(0.5 * median(dists[dists > 0]))
@staticmethod
def kernel_Gaussian(x: ndarray, c: ndarray, sigma: np.float64) -> ndarray:
(d, nx) = x.shape
(d, nc) = c.shape
x2 = sum(x ** 2, 0)
c2 = sum(c ** 2, 0)
distance2 = tile(c2, (nx, 1)) + tile(x2[:, newaxis], (1, nc)) - 2 * dot(x.T, c)
return exp(-distance2 / (2 * (sigma ** 2)))
@staticmethod
def R_ULSIF(x_nu: ndarray, x_de: ndarray, alpha, sigma_list, lambda_list, b, fold):
# x_nu: samples from numerator
# x_de: samples from denominator
# alpha: alpha defined in relative density ratio
# sigma_list, lambda_list: parameters for model selection
#
# b: number of kernel basis
# fold: number of fold for cross validation
(d, n_nu) = x_nu.shape
(d, n_de) = x_de.shape
b = min(b, n_nu)
x_ce = x_nu[:, np.r_[0:b]]
score_cv = np.zeros((len(sigma_list), len(lambda_list)))
cv_index_nu = np.random.permutation(n_nu)
cv_split_nu = np.floor(np.r_[0:n_nu] * fold / n_nu)
cv_index_de = np.random.permutation(n_de)
cv_split_de = np.floor(np.r_[0:n_de] * fold / n_de)
# 20 iterations
iter = 1000
count = 0
mu = 0.1
k1 = 0.1
loss_old = float('inf')
thetat = None
for i in range(0, iter):
count += 1
print("current iteration:", count)
# choose sigma
for sigma_index in r_[0:size(sigma_list)]: # try different sigma
sigma = sigma_list[sigma_index]
K_de = Classification.kernel_Gaussian(x_de, x_ce, sigma).T # ndarray
K_nu = Classification.kernel_Gaussian(x_nu, x_ce, sigma).T # ndarray
score_tmp = zeros((fold, size(lambda_list)))
for k in np.r_[0:fold]: # select k-fold
Ktmp1 = K_de[:, cv_index_de[cv_split_de != k]]
Ktmp2 = K_nu[:, cv_index_nu[cv_split_nu != k]]
# Ktmp:990,990
# Ktmp = alpha / Ktmp2.shape[1] * np.dot(Ktmp2, Ktmp2.T) \
# + (1 - alpha) / Ktmp1.shape[1] * np.dot(Ktmp1, Ktmp1.T)
Ktmp = alpha / Ktmp2.shape[1] * multiply_transpose(Ktmp2) \
+ (1 - alpha) / Ktmp1.shape[1] * multiply_transpose(Ktmp1)
# mKtmp:990,
mKtmp = np.mean(K_nu[:, cv_index_nu[cv_split_nu != k]], 1)
for lambda_index in np.r_[0:size(lambda_list)]:
lbd = lambda_list[lambda_index]
# thetat_cv:990,
thetat_cv = linalg.solve(Ktmp + (lbd * eye(b)), mKtmp)
thetah_cv = thetat_cv
score_tmp[k, lambda_index] = \
alpha * np.mean(
np.dot(K_nu[:, cv_index_nu[cv_split_nu == k]].T, thetah_cv) ** 2) / 2. \
+ (1 - alpha) * np.mean(
np.dot(K_de[:, cv_index_de[cv_split_de == k]].T, thetah_cv) ** 2) / 2. \
- np.mean(dot(K_nu[:, cv_index_nu[cv_split_nu == k]].T, thetah_cv))
score_cv[sigma_index, :] = mean(score_tmp, 0)
score_cv_tmp = score_cv.min(1)
lambda_chosen_index = score_cv.argmin(1)
score = score_cv_tmp.min()
sigma_chosen_index = score_cv_tmp.argmin()
lambda_chosen = lambda_list[lambda_chosen_index[sigma_chosen_index]]
sigma_chosen = sigma_list[sigma_chosen_index]
# compute coe
K_de = Classification.kernel_Gaussian(x_de, x_ce, sigma_chosen).T
K_nu = Classification.kernel_Gaussian(x_nu, x_ce, sigma_chosen).T
coe = alpha * np.dot(K_nu, K_nu.T) / n_nu \
+ (1 - alpha) * np.dot(K_de, K_de.T) / n_de \
+ lambda_chosen * np.eye(b)
var = np.mean(K_nu, 1)
# solve theta
thetat = linalg.solve(coe, var)
thetatTranspose = thetat.transpose()
# compute loss
loss1 = dot(thetat.T, (alpha * dot(K_nu, K_nu.T) / n_nu - (1 - alpha) * dot(K_de, K_de.T) / n_de))
loss_bias = 0.5 * dot(loss1, thetat) - dot(var.T, thetat) + 0.5 * lambda_chosen * dot(thetat.T, thetat)
print("part 2 loss:", loss_bias)
if alpha < 0:
loss_alpha = -1
elif alpha > 1:
loss_alpha = 1
else:
loss_alpha = 0
print("alpha loss:", loss_alpha)
loss_new = loss_alpha + 1.0 * loss_bias
# gradient
result = dot(thetatTranspose, (dot(K_nu, K_nu.T) / n_nu - dot(K_de, K_de.T) / n_de))
loss2change = 0.5 * dot(result, thetat)
print("loss bias change:", loss2change)
# update alpha
alpha_old = alpha
print("alpha old:", alpha_old)
while True:
mu = mu * exp(-k1 * i)
if alpha < 0:
alpha = alpha - mu * (-1 + loss2change)
if alpha > 1:
alpha = alpha - mu * (1 + loss2change)
if 0 <= alpha <= 1:
alpha = alpha - mu * loss2change
if alpha < 0 or alpha >= 1:
k1 = 2 * k1
alpha = alpha_old
else:
k1 = 0.1
break
print("mu:", mu)
if abs(loss_old - loss_new) < 1e-7:
break
print("Old loss:", loss_old)
print("new loss:", loss_new)
print("alpha updated:", alpha)
print("alpha change:", alpha_old - alpha)
print("count", count)
print()
loss_old = loss_new
# return result
thetah = thetat
wh_x_de = np.dot(K_de.T, thetah).T
wh_x_nu = np.dot(K_nu.T, thetah).T
print("wh_x_de:", wh_x_de)
print("wh_x_de len:", len(wh_x_de))
print("wh_x_nu:", wh_x_nu)
print("wh_x_nu len:", len(wh_x_nu))
wh_x_de[wh_x_de < 0] = 0
return (thetah, wh_x_de, x_ce, sigma_chosen)
@staticmethod
def compute_target_weight(thetah, x_ce, sigma, x_nu):
K_nu = Classification.kernel_Gaussian(x_nu, x_ce, sigma).T
wh_x_nu = dot(K_nu.T, thetah).T
wh_x_nu[wh_x_nu < 0.00000001] = 0.00000001
return wh_x_nu
@staticmethod
def sigma_list(x_nu: np.array, x_de: np.array) -> ndarray:
# concatenate tow ndarrays along columns
x: np.array = c_[x_nu, x_de]
med = Classification.compmedDist(x.T)
return med * array([0.6, 0.8, 1, 1.2, 1.4])
@staticmethod
def lambda_list() -> ndarray:
return array([10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** (0), 10 ** (1)])
@staticmethod
def read_csv(path: str, size=None, delimiter=",", header=None) -> Tuple[matrix, ndarray]:
data: ndarray = None
data_label: ndarray = None
with open(path) as csvfile:
count = 0
if not header:
reader = csv.reader(csvfile, delimiter=delimiter)
else:
reader = csv.DictReader(csvfile, fieldnames=header, delimiter=delimiter)
for row in reader:
tmp = [float(x) for x in row[:-1]]
label = row[-1]
if data is None:
data = np.array(tmp)
else:
data = np.vstack((data, tmp))
if data_label is None:
data_label = np.array(label)
else:
data_label = np.vstack((data_label, label))
count += 1
if size and count > size:
break
data: matrix = | np.matrix(data, dtype=np.float64) | numpy.matrix |
# -*- coding: utf-8 -*-
"""
==============================================================================
@author: <NAME>
@date: Thu May 13 09:50:26 2021
@reference: Ojala, A Comparative Study of Texture Measures with Classification on Feature Distributions
Ojala, Gray Scale and Roation Invariaant Texture Classification with Local Binary Patterns
==============================================================================
"""
import numpy as np
from skimage import feature
def _energy(x):
return np.multiply(x,x).sum()
def _entropy(x):
return -np.multiply(x, np.log(x+1e-16)).sum()
def lbp_features(f, mask, P=[8,16,24], R=[1,2,3]):
'''
Parameters
----------
f : numpy ndarray
Image of dimensions N1 x N2.
mask : numpy ndarray
Mask image N1 x N2 with 1 if pixels belongs to ROI, 0 else. Give None
if you want to consider ROI the whole image.
P : list, optional
Number of points in neighborhood. The default is [8,16,24].
R : list, optional
Radius/Radii. The default is [1,2,3].
Returns
-------
features : numpy ndarray
Energy and entropy of LBP image (2 x 1).
labels : list
Labels of features.
'''
if mask is None:
mask = np.ones(f.shape)
P = | np.array(P) | numpy.array |
import numpy as np
import datetime
import time
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S').replace(' ', '-').replace(':', '-')
import os
os.environ["OMP_NUM_THREADS"] = "4" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "4" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "6" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "4" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "6" # export NUMEXPR_NUM_THREADS=6
from auxiliaryFunctions import project_onto_simplex, performUpdate, exitCriterion, stepSize
"""## Away FW or Pairwise FW"""
#Maintains active list of weights and vertices.
def runFWSimplex(x0, function, feasibleReg, tolerance, maxTime, FWVariant = "AFW", typeStep = "SS", criterion = "PG", criterionRef = 0.0):
#Quantities we want to output.
grad = function.fEvalGrad(x0)
FWGap = [np.dot(grad, x0 - feasibleReg.LPOracle(grad))]
fVal = [function.fEval(x0)]
timing = [time.time()]
x = x0.copy()
itCount = 1
while(True):
if(FWVariant == "AFW"):
x, vertvar, gap = awayStepFWSimplex(function, feasibleReg, x, typeStep)
else:
x, vertvar, gap = pairwiseStepFWSimplex(function, feasibleReg, x, typeStep)
performUpdate(function, x, FWGap, fVal, timing, gap)
if(exitCriterion(itCount, fVal[-1], FWGap[-1], criterion = criterion, numCriterion = tolerance, critRef = criterionRef) or timing[-1] - timing[0] > maxTime):
timing[:] = [t - timing[0] for t in timing]
return x, FWGap, fVal, timing
itCount += 1
def awayStepFWSimplex(function, feasibleReg, x, typeStep):
grad = function.fEvalGrad(x)
v = feasibleReg.LPOracle(grad)
a, indexMax = feasibleReg.AwayOracle(grad, x)
vertvar = 0
#Choose FW direction, can overwrite index.
if(np.dot(grad, x - v) > np.dot(grad, a - x)):
d = v - x
alphaMax = 1.0
optStep = stepSize(function, d, grad, typeStep)
alpha = min(optStep, alphaMax)
#Less than maxStep
if(alpha != alphaMax):
#newVertex returns true if vertex is new.
if(np.dot(v, x) == 0.0):
vertvar = 1
#Max step length away step, only one vertex now.
else:
vertvar = -1
else:
d = x - a
alphaMax = x[indexMax]/(1.0 - x[indexMax])
optStep = stepSize(function, d, grad, typeStep)
alpha = min(optStep, alphaMax)
#Max step, need to delete a vertex.
if(alpha == alphaMax):
vertvar = -1
return x + alpha*d, vertvar, np.dot(grad, x - v)
#Perform one step of the Pairwise FW algorithm
#Also specifies if the number of vertices has decreased var = -1 or
#if it has increased var = +1. Otherwise 0.
def pairwiseStepFWSimplex(function, feasibleReg, x, typeStep):
grad = function.fEvalGrad(x)
v = feasibleReg.LPOracle(grad)
a, index = feasibleReg.AwayOracle(grad, x)
vertVar = 0
#Find the weight of the extreme point a in the decomposition.
alphaMax = x[index]
#Update weight of away vertex.
d = v - a
optStep = stepSize(function, d, grad, typeStep)
alpha = min(optStep, alphaMax)
if(alpha == alphaMax):
vertVar = -1
#Update the FW vertex
if(np.dot(v, x) == 0.0):
vertVar = 1
return x + alpha*d, vertVar, | np.dot(grad, x - v) | numpy.dot |
#!/usr/bin/env python
"""
Does some simple calculations to test trace gas PACE calculations
Adapted from benchmark4Amir.py
<NAME>, April 2020
"""
import os
import sys
from netCDF4 import Dataset
from netCDF4 import Dataset as ncread
import numpy as np
from MAPL.constants import *
from py_leo_vlidort import VLIDORT_POLAR_
from scipy.interpolate import interp1d
import scipy.integrate as integrate
from pyhdf.SD import SD, SDC
from multiprocessing import Pool
from hyperTest import get_channels,get_geom,get_ROT,get_TOA_unpack,get_TOA
from hyperTest import get_PTWV_profile, writenc
format = 'NETCDF4_CLASSIC'
MISSING = -1.e+20
def get_kg(inFile):
"""
Read c-k distribution coefficients from Amir's calculations
"""
nc = Dataset(inFile)
# wavelength [nm]
wav_abs = np.array(nc.variables['wavelengths'][:])
# c-k coefficients [not sure about units]
kg_o2 = nc.variables['kg_o2'][:].T
kg_h2o = nc.variables['kg_h2o'][:].T
kg_co = nc.variables['kg_co'][:].T
kg_co2 = nc.variables['kg_co2'][:].T
kg_ch4 = nc.variables['kg_ch4'][:].T
kg_n2o = nc.variables['kg_n2o'][:].T
g_bins = nc.variables['g_bins'][:]
nc.close()
return kg_o2, kg_h2o, kg_co, kg_co2, kg_ch4, kg_n2o, g_bins, wav_abs
def get_alpha(A,VMR,rhoe,ze):
"""
Calculate Absorption optical depth profile
A - absorption coefficient [m2/molecule]
VMR - trace gas mixing ratio [vol/vol, dimensionless]
rhoe - air number density [molecules/m3]
ze - profile altitude [m]
"""
# convert vmr to molecules/m3
nxe = VMR*rhoe
# integrate to get the optical depth subcolumns
km, nbin, nch = A.shape
alpha = np.zeros([km,nbin,nch])
for i in range(km):
for b in range(nbin):
c1 = A[i,b,:]*nxe[i]
c2 = A[i,b,:]*nxe[i+1]
c1.shape = (1, nch)
c2.shape = (1, nch)
c = np.append(c1,c2,axis=0)
alpha[i,b,:] = np.trapz(c,ze[i:i+2],axis=0)
#alpha = np.trapz(A*nxe,ze)
return alpha
#------------------------------------ M A I N ------------------------------------
if __name__ == "__main__":
outRoot = 'hyperTest/'
# Pressure [Pa], temperature [K], height [m], water vapor [ppm] profile - standard atmosphere
# used to make OCI look up tables
inFile = '{}/atrem_tpvmr.nc'.format(outRoot)
km, pe, te, ze, rhoe, h2oe, DP = get_PTWV_profile(inFile)
# Read in Amir's c-k distribution coefficients wavelengths
# kg has dimensions km,ngbins,nwav. ngbins = 10
inFile = 'correlated_k/kg_gas_refined_v2_with_RSR_v0.nc'
kg_o2, kg_h2o, kg_co, kg_co2, kg_ch4, kg_n2o, g_bins, wav_abs = get_kg(inFile)
ngbin = len(g_bins)
# convert kg from pressure space to z space
# ---------------------------------------------------
Q = 2.15199993E+25
C = (Q*28.966) / 6.0225e23 / 1e-6
# integrate air density in each layer
rhoint = np.zeros(km)
for i in range(km):
rhoint[i] = np.trapz(rhoe[i:i+2],ze[i:i+2])
DP.shape = (km,1,1)
rhoint.shape = (km,1,1)
for ibin in range(ngbin-1):
DP = np.append(DP,DP[:,0:1,:],axis=1)
rhoint = np.append(rhoint,rhoint[:,0:1,:],axis=1)
kg_o2_z = kg_o2*C*DP/rhoint
kg_co_z = kg_co*C*DP/rhoint
kg_co2_z = kg_co2*C*DP/rhoint
kg_ch4_z = kg_ch4*C*DP/rhoint
kg_n2o_z = kg_n2o*C*DP/rhoint
kg_h2o_z = kg_h2o*C*DP/rhoint
# get absorption optical depth with new aborption coefficient
co_vmr = 0.1*1e-6
alpha_co = get_alpha(kg_co_z,co_vmr,rhoe,ze)
o2_vmr = 0.21
alpha_o2 = get_alpha(kg_o2_z,o2_vmr,rhoe,ze)
co2_vmr = 400.*1.0E-06
alpha_co2 = get_alpha(kg_co2_z,co2_vmr,rhoe,ze)
ch4_vmr = 1.8*1.0E-06
alpha_ch4 = get_alpha(kg_ch4_z,ch4_vmr,rhoe,ze)
n2o_vmr = 0.3*1.0E-06
alpha_n2o = get_alpha(kg_n2o_z,n2o_vmr,rhoe,ze)
sys.exit('stop')
# scale water vapor so total precipitable water
# is equal to 1 cm
# ----------------
# first calculate water vapor column [molecules/m2]
h2ocol = np.trapz(h2oe*1e-6*rhoe,ze)
# normalize profile so water vapor column expressed as total precipitable water is equal to 1 cm
# use 1 mm of rainfall = 1 kg/m2
# or 1 cm = 10 kg/m2
# 10 kg/m2 is equal to 3.34e22 water molecules/cm2
h2ocolnew = 3.34e22
# convert to meters
h2ocolnew = h2ocolnew*1e4
h2oenew = h2oe*(h2ocolnew/h2ocol)
# get in vmr units
h2oe_vmr = 1e-6*h2oenew
alpha_h2o = get_alpha(kg_h2o_z,h2oe_vmr,rhoe,ze)
# add up all the alphas
alpha = alpha_h2o + alpha_n2o + alpha_ch4 + alpha_co2 + alpha_o2 + alpha_co
# append zeros down to wavelength equal to 299.91 nm
# this is because ozone is not being considered right now
# eventualy will add O3 cross sections from <NAME>
# new_wl = np.arange(wav_abs.min()-0.1,299.91,-0.1)
# all_wl = np.append(wav_abs,new_wl)
all_wl = wav_abs
# nnew = len(new_wl)
# alpha = np.append(alpha,np.zeros([km,nnew]),axis=1)
# flip everything vertically so going from top of atmosphere to surface
pe = pe[-1::-1]
te = te[-1::-1]
ze = ze[-1::-1]
alpha = alpha[-1::-1,:,:]
# add dimension to be in km+1,nobs
pe.shape = (km+1,1)
te.shape = (km+1,1)
ze.shape = (km+1,1)
# read in granule geometry
Iscan = 600
Icross = 1000
SZA,VZA,RAA,SAA,VAA = get_geom(Iscan,Icross)
# Read in solar irradiance spectrum
# second dim = wavelength, irradiance
# units=nm, uW/cm^2/nm
inFile = '{}/Thuillier_F0.npy'.format(outRoot)
F0 = | np.load(inFile) | numpy.load |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 08:33:16 2021
@author: athulsun
"""
from mat4py import loadmat
from scipy.signal import filtfilt
import numpy as np
from scipy.interpolate import interp1d,PchipInterpolator
import matplotlib.pyplot as plt
import os
import sys
from dtqpy.src.classes.DTQPy_CLASS_OPTS import *
from dtqpy.src.classes.DTQPy_CLASS_SETUP import *
from dtqpy.src.DTQPy_solve import DTQPy_solve
def f_dtqp_fowt(LinearModels,disturbance):
# load linear models
Chan = LinearModels['Chan']
#breakpoint()
# obtain the size of the arrays
nl = len(Chan)
nx,nx = np.shape(Chan[0]['A'])
nx,nu = np.shape(Chan[0]['B'])
ny = len(LinearModels['OutName'])
OutputName = LinearModels['OutName']
# initialize
Aw = np.zeros((nl,nx,nx))
Bw = np.zeros((nl,nx,nu))
Cw = np.zeros((nl,ny,nx))
Dw = np.zeros((nl,ny,nu))
xw = np.zeros((nx,nl))
uw = np.zeros((nu,nl))
yw = np.zeros((nl,ny))
ws = np.zeros((nl))
# collect
for i in range(nl):
Aw[i,:,:] = np.array(Chan[i]['A'])
Bw[i,:,:] = np.array(Chan[i]['B'])
Cw[i,:,:] = np.array(Chan[i]['C'])
Dw[i,:,:] = np.array(Chan[i]['D'])
xw[:,i] = np.squeeze(np.array(Chan[i]['xop']))
uw[:,i] = np.squeeze(np.array(Chan[i]['uop']))
yw[i,:] = np.squeeze(np.array(Chan[i]['yop']))
ws[i] = Chan[i]['WindSpeed']
# construct LPV models
# A matrix
A_op_pp = PchipInterpolator(ws, Aw, axis = 0)
A_op = lambda w: A_op_pp(w)
# Bmatrix
B_op_pp = PchipInterpolator(ws, Bw, axis = 0)
B_op = lambda w: B_op_pp(w)
# Cmatrix
C_op_pp = PchipInterpolator(ws,Cw,axis = 0)
C_op = lambda w: C_op_pp(w)
# Dmatrix
D_op_pp = PchipInterpolator(ws,Dw,axis = 0)
D_op = lambda w: D_op_pp(w)
# control operating points
Uo_pp = PchipInterpolator(ws,uw,axis = 1)
Uo_fun = lambda w: Uo_pp(w)
# state operating points
Xo_pp = PchipInterpolator(ws, xw, axis = 1)
Xo_fun = lambda w: Xo_pp(w)
# outputs
Yo_pp = PchipInterpolator(ws, yw, axis = 0)
Yo_fun = lambda w: Yo_pp(w)
# first time derivative of state operating points
DXo_pp = Xo_pp.derivative
DXo_pp = DXo_pp(nu=1)
DXo_fun = lambda w: DXo_pp(w)
Wind_o = disturbance['Chan']
Wind_speed = np.array(Wind_o['RtVAvgxh'])
tt = np.array(Wind_o['tt'])
filterflag = 1
if filterflag:
t_f = 1
dt = tt[2,0]-tt[1,0]
nb = int(np.floor(t_f/dt))
b = np.ones((nb,))/nb
a = b*nb
Wind_speed = filtfilt(b,1,Wind_speed,axis = 0)
opts = options()
opts.dt.nt = 1000
opts.solver.tolerence = 1e-16
opts.solver.maxiters = 1000000
opts.solver.function = 'pyoptsparse'
time = np.linspace(tt[0],tt[-1],opts.dt.nt)
W_pp = PchipInterpolator(np.squeeze(tt),np.squeeze(Wind_speed))
dW_pp = W_pp.derivative
dW_pp = dW_pp(nu = 1)
DW_fun = lambda t: dW_pp(t)
W_fun = lambda t: W_pp(t)
DXoDt_fun = lambda t: (-DXo_fun(W_fun(t)).T*DW_fun(t)).T
def BuildLambda(Ax):
return lambda t: Ax(t)
def TVmat2cell(f,time):
"""
function to convert nt*nx*nz matrix to nx*nx cell
"""
# evaluate function
At = f(time)
s = np.shape(At)
if len(s) ==4:
At = np.squeeze(At)
elif len(s) == 3:
At = np.squeeze(At)
At= At.T
# get size
try:
null,m,n = np.shape(At)
except:
null,m = np.shape(At)
n = 1
# initialize storage
A = np.empty((m,n),dtype = 'O')
Aval = np.empty((8,8))
#breakpoint()
for i in range(m):
for j in range(n):
try:
Ax = PchipInterpolator(np.squeeze(time),At[:,i,j],axis = 0)
except:
Ax = PchipInterpolator(np.squeeze(time),At[:,i],axis = 0)
# work around, as defining lambda functions in a loop in python is tricky
A[i,j] = BuildLambda(Ax)
return A
## Disc2 cont
def BuildFunction(w_ops,X):
Xpp = PchipInterpolator(w_ops,X)
return lambda w: Xpp(w)
# Generator speed function
GS_fun = BuildFunction(ws,xw[4,:])
# -1*GS function
GSn_fun = BuildFunction(ws,-xw[4,:])
# Generator torque
GT_fun = BuildFunction(ws,uw[1,:])
# -Generator torque
GTn_fun = BuildFunction(ws,-uw[1,:])
# Blade pitch
BP_fun = BuildFunction(ws,uw[2,:])
# Generator power
GP_fun = BuildFunction(ws,-uw[1,:]*xw[4,:])
# State operating point values
r = Xo_fun(ws)
# lambda function to find the values of lambda function at specific indices
indexat = lambda expr,index: expr[index,:]
# get shape
nws,nx,nu = np.shape(Bw)
# initialize
ub = np.ones((nx,1))*np.inf
lb = -np.ones((nx,1))*np.inf
# set ub values for PtfmPitch and Genspeed
ub[0] = np.deg2rad(6)
ub[4] = 0.7913+0.0001
# initialize
UBx = np.empty((nx,1),dtype = 'O')
LBx = np.empty((nx,1),dtype = 'O')
# need this function to define anaonymous functions in a loop in python
def BuildLambdaUB(ub,indexat,Xo_fun,W_fun,i):
return lambda t: ub - indexat(Xo_fun(W_fun(t)),i)
# build ub and lb functions
for i in range(nx):
UBx[i,0] = BuildLambdaUB(ub[i],indexat,Xo_fun,W_fun,i)
LBx[i,0] = BuildLambdaUB(lb[i],indexat,Xo_fun,W_fun,i)
# control bounds
UBc = np.array([[lambda t: W_fun(t)-W_fun(t)],
[lambda t: max(uw[1,:])-GT_fun(W_fun(t))],
[lambda t: max(uw[2,:])-BP_fun(W_fun(t))]])
LBc = np.array([[lambda t: W_fun(t)-W_fun(t)],
[lambda t: min(uw[1,:])-GT_fun(W_fun(t))],
[lambda t: min(uw[2,:])-BP_fun(W_fun(t))]])
# initial state
X0_n = np.array( [[0.0493],
[0.1957],
[0.0000],
[0.0001],
[0.7913],
[0],
[0],
[0]])
UBs = X0_n - Xo_fun(W_fun(0))[None].T
LBs = X0_n - Xo_fun(W_fun(0))[None].T
# UB,LB
UB = [Simple_Bounds() for n in range(3)]
LB = [Simple_Bounds() for n in range(3)]
# states
UB[0].right = 2
UB[0].matrix = UBx
LB[0].right = 2
LB[0].matrix = LBx
# control bounds
UB[1].right = 1
UB[1].matrix = UBc
LB[1].right = 1
LB[1].matrix = LBc
# initial state
UB[2].right = 4
UB[2].matrix = UBs
LB[2].right = 4
LB[2].matrix = LBs
# lagrange terms
R1 = 1e-0; R2 = 1e+8
lx = 0
L = [LQ_objective() for n in range(5)]
# uRu
L[lx].left = 1
L[lx].right = 1
L[lx].matrix = | np.diag([0,R1,R2]) | numpy.diag |
"""
Homemade implementation for inferencing using 'model.tflite' compares 'My Accuracy' with Tensorflow's implementation
"""
import tensorflow as tf
from tensorflow import keras
import numpy as np
import offline
import integer_inference
from PIL import Image
IMG_SIZE = (14, 14)
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_imgs_resize = []
test_imgs_resize = []
for img in train_images:
res = np.array(Image.fromarray(img).resize(size=IMG_SIZE))
train_imgs_resize.append(res)
train_imgs_resize = np.asarray(train_imgs_resize)
for img in test_images:
res = np.array(Image.fromarray(img).resize(size=IMG_SIZE))
test_imgs_resize.append(res)
test_imgs_resize = np.asarray(test_imgs_resize)
train_imgs_resize = train_imgs_resize / 255.0
test_imgs_resize = test_imgs_resize / 255.0
flat_train = []
flat_test = []
for i, img in enumerate(train_imgs_resize):
flat_train.append(img.flatten())
flat_train = np.asarray(flat_train)
for i, img in enumerate(test_imgs_resize):
flat_test.append(img.flatten())
flat_test = np.asarray(flat_test)
flat_train = flat_train[..., np.newaxis]
flat_test = flat_test[..., np.newaxis]
# load TFLite file
interpreter = tf.lite.Interpreter(model_path=f'model.tflite')
# Allocate memory.
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
inter_layer = interpreter.get_tensor_details()
tensorflow_no_softmax_acc = 0
tensorflow_softmax_acc = 0
homemade_acc = 0
num_test_imgs = 10000
# Conv1D offline parameters
# Hardcoded values for specific weights/biases ect. The application Netron was very helpful in understanding
# inputs and outputs to different layers. Netron gives a good overview of what a model looks like
weight_index = 4
bias_index = 6
output_index = 1
input_index = 7
quantized_weight_conv = interpreter.get_tensor(inter_layer[weight_index]['index'])
quantized_bias_conv = interpreter.get_tensor(inter_layer[bias_index]['index'])
weight_scale_conv, weight_offset_conv = inter_layer[weight_index]['quantization']
input_scale_conv, input_offset_conv = inter_layer[input_index]['quantization']
output_scale_conv, output_offset_conv = inter_layer[output_index]['quantization']
M_conv = (input_scale_conv * weight_scale_conv) / output_scale_conv
right_shift_conv, M_0_conv = offline.quantize_mult_smaller_one(M_conv)
# hidden dense layer offline parameters
weight_index = 10
bias_index = 8
output_index = 9
input_index = 0
quantized_weight_dense = interpreter.get_tensor(inter_layer[weight_index]['index'])
quantized_bias_dense = interpreter.get_tensor(inter_layer[bias_index]['index'])
weight_scale_dense, weight_offset_dense = inter_layer[weight_index]['quantization']
input_scale_dense, input_offset_dense = inter_layer[input_index]['quantization']
output_scale_dense, output_offset_dense = inter_layer[output_index]['quantization']
M_dense = (input_scale_dense * weight_scale_dense) / output_scale_dense
right_shift_dense, M_0_dense = offline.quantize_mult_smaller_one(M_dense)
# prediction layer offline parameters
weight_index = 14
bias_index = 12
output_index = 11
input_index = 9
quantized_weight_pred = interpreter.get_tensor(inter_layer[weight_index]['index'])
quantized_bias_pred = interpreter.get_tensor(inter_layer[bias_index]['index'])
weight_scale_pred, weight_offset_pred = inter_layer[weight_index]['quantization']
input_scale_pred, input_offset_pred = inter_layer[input_index]['quantization']
output_scale_pred, output_offset_pred = inter_layer[output_index]['quantization']
M_pred = (input_scale_pred * weight_scale_pred) / output_scale_pred
right_shift_pred, M_0_pred = offline.quantize_mult_smaller_one(M_pred)
avg_num_skip_dyn = 0
avg_num_skip_static = 0
total_exc = 0
for i in range(num_test_imgs):
# set up img to be infered on...
quantized_input = offline.quantize(input_details[0], flat_test[i:i+1])
interpreter.set_tensor(input_details[0]['index'], quantized_input)
# let tensorflow do the math
interpreter.invoke()
# save layer before softmax because softmax too hard for now
quantized_output_no_softmax = interpreter.get_tensor(inter_layer[output_index]['index'])
# Output with softmax
quantized_output_softmax = interpreter.get_tensor(output_details[0]['index'])
num_skip_dyn = 0
num_skip_static = 0
total_exc = 0
# Homemade inference time!
output_conv_arr, num_skip_dyn, num_skip_static, total_exc = (integer_inference.Conv(quantized_input, input_offset_conv, quantized_weight_conv,
weight_offset_conv, quantized_bias_conv, output_offset_conv, M_0_conv,
right_shift_conv, (IMG_SIZE[0] * IMG_SIZE[1], 8), num_skip_dyn, num_skip_static, total_exc))
# to do move to Conv function
output_conv_arr = output_conv_arr.flatten()
output_conv_arr = output_conv_arr[np.newaxis, ...]
output_full_conn_arr, num_skip_dyn, num_skip_static, total_exc = (integer_inference.FullyConnected(output_conv_arr, input_offset_dense,
quantized_weight_dense, weight_offset_dense,
quantized_bias_dense, output_offset_dense, M_0_dense,
right_shift_dense, (1, 16), num_skip_dyn, num_skip_static, total_exc))
output_full_conn_arr_2, num_skip_dyn, num_skip_static, total_exc = (integer_inference.FullyConnected(output_full_conn_arr, input_offset_pred,
quantized_weight_pred, weight_offset_pred,
quantized_bias_pred, output_offset_pred, M_0_pred,
right_shift_pred, (1, 10), num_skip_dyn, num_skip_static, total_exc))
avg_num_skip_dyn += num_skip_dyn
avg_num_skip_static += num_skip_static
if test_labels[i] == np.argmax(quantized_output_softmax):
tensorflow_softmax_acc += 1
if test_labels[i] == | np.argmax(quantized_output_no_softmax) | numpy.argmax |
"""
PRCAUCEvaluator uses `sklearn.metrics.precision_recall_curve` and
`sklearn.metrics.auc` internally.
Refer: http://scikit-learn.org/stable/modules/generated/sklearn.metrics.\
prc_auc_score.html
"""
import numpy
import pytest
import chainer
from chainer.iterators import SerialIterator
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset # NOQA
from chainer_chemistry.training.extensions.prc_auc_evaluator import PRCAUCEvaluator # NOQA
@pytest.fixture
def data0():
# `t` is correct label, `y` is dummy predict value by predictor
t = | numpy.array([0, 0, 1, 1], dtype=numpy.int32) | numpy.array |
"""
Script entry point
"""
from src.calrissian.particle333_network import Particle333Network
from src.calrissian.layers.particle333 import Particle333
from src.calrissian.optimizers.particle333_sgd import Particle333SGD
from multiprocessing import Pool
import numpy as np
import time
import pandas as pd
import pickle
def main():
train_X = np.asarray([[0.45, 3.33], [0.0, 2.22], [0.45, -0.54]])
train_Y = np.asarray([[1.0], [0.0], [0.0]])
net = Particle333Network(cost="mse")
net.append(Particle333(2, 5, activation="sigmoid", nr=4, nc=6))
net.append(Particle333(5, 1, activation="sigmoid", nr=4, nc=6))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
print(net.cost_gradient(train_X, train_Y))
def main2():
train_X = np.random.normal(0.0, 0.1, (3, 4*4))
train_Y = np.random.normal(0.0, 0.1, (3, 1))
nr = 3
nc = 3
net = Particle333Network(cost="mse")
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(4, 4, 1),
output_shape=(2, 2, 3),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(activation="sigmoid", nr=nr, nc=nc,
apply_convolution=True,
input_shape=(2, 2, 3),
output_shape=(2, 2, 1),
input_delta=(0.5, 0.5, 0.5),
output_delta=(0.5, 0.5, 0.5)))
net.append(Particle333(4, 1, activation="sigmoid", nr=nr, nc=nc))
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
def main3():
train_X = np.random.normal(0.0, 0.1, (3, 4*4))
train_Y = | np.random.normal(0.0, 0.1, (3, 1)) | numpy.random.normal |
from sklearn.base import BaseEstimator
from ..utils.projection import Raw, RandomProjection, RandomMaclaurinProjection, TensorSketchProjection
import numpy as np
import numpy.linalg as ln
import scipy.sparse as sp
from sklearn import preprocessing
class OnlineSketch(BaseEstimator):
"""Matrix-sketching-based online item recommender
<NAME>.
"Sketching Dynamic User-Item Interactions for Online Item Recommendation"
In Proceedings of CHIIR 2017, March 2017.
"""
def __init__(self, p=None, k=40, ell=-1, r=-1, proj='Raw'):
assert p is not None
# number of dimensions of input vectors
self.p = p
# dimension of projected vectors
# for `Raw` (i.e. w/o projection), k must equat to p
self.k = self.p if proj == 'Raw' else k
# if there is no preference for ell,
# this will be sqrt(k) similarly to what the original streaming anomaly detection paper did
self.ell = int(np.sqrt(self.k)) if ell < 1 else ell
# number of tracked orthogonal bases
# * upper bound of r is ell (r <= ell) because U_r is obtained from SVD(B) (or SVD(E)), and
# B and E always have ell columns
self.r = int(np.ceil(self.ell / 2)) if r < 1 else np.min(r, self.ell)
# initialize projection instance which is specified by `proj` argument
if proj == 'Raw':
self.proj = Raw(self.k, self.p)
elif proj == 'RandomProjection':
self.proj = RandomProjection(self.k, self.p)
elif proj == 'RandomMaclaurinProjection':
self.proj = RandomMaclaurinProjection(self.k, self.p)
elif proj == 'TensorSketchProjection':
self.proj = TensorSketchProjection(self.k, self.p)
self.i_mat = sp.csr_matrix([])
def update_model(self, y):
y = self.proj.reduce(np.array([y]).T)
y = np.ravel(preprocessing.normalize(y, norm='l2', axis=0))
if not hasattr(self, 'B'):
self.B = np.zeros((self.k, self.ell))
# combine current sketched matrix with input at time t
zero_cols = np.nonzero([np.isclose(s_col, 0.0) for s_col in np.sum(self.B, axis=0)])[0]
j = zero_cols[0] if zero_cols.size != 0 else self.ell - 1 # left-most all-zero column in B
self.B[:, j] = y
U, s, V = ln.svd(self.B, full_matrices=False)
# update the tracked orthonormal bases
self.U_r = U[:, :self.r]
# update ell orthogonal bases
U_ell = U[:, :self.ell]
s_ell = s[:self.ell]
# shrink step in the Frequent Directions algorithm
# (shrink singular values based on the squared smallest singular value)
delta = s_ell[-1] ** 2
s_ell = np.sqrt(s_ell ** 2 - delta)
self.B = np.dot(U_ell, np.diag(s_ell))
class OnlineRandomSketch(OnlineSketch):
"""Inspired by: Streaming Anomaly Detection using Randomized Matrix Sketching
[WIP] many matrix multiplications are computational heavy
"""
def update_model(self, y):
y = self.proj.reduce(np.array([y]).T)
y = np.ravel(preprocessing.normalize(y, norm='l2', axis=0))
if not hasattr(self, 'E'):
self.E = np.zeros((self.k, self.ell))
# combine current sketched matrix with input at time t
zero_cols = np.nonzero([np.isclose(s_col, 0.0) for s_col in np.sum(self.E, axis=0)])[0]
j = zero_cols[0] if zero_cols.size != 0 else self.ell - 1 # left-most all-zero column in B
self.E[:, j] = y
Gaussian = np.random.normal(0., 0.1, (self.k, 100 * self.ell))
MM = np.dot(self.E, self.E.T)
Q, R = ln.qr(np.dot(MM, Gaussian))
# eig() returns eigen values/vectors with unsorted order
s, A = ln.eig(np.dot(np.dot(Q.T, MM), Q))
order = np.argsort(s)[::-1]
s = s[order]
A = A[:, order]
U = np.dot(Q, A)
# update the tracked orthonormal bases
self.U_r = U[:, :self.r]
# update ell orthogonal bases
U_ell = U[:, :self.ell]
s_ell = s[:self.ell]
# shrink step in the Frequent Directions algorithm
delta = s_ell[-1]
s_ell = np.sqrt(s_ell - delta)
self.E = np.dot(U_ell, np.diag(s_ell))
class OnlineSparseSketch(OnlineSketch):
"""Inspired by: Efficient Frequent Directions Algorithm for Sparse Matrices
"""
def update_model(self, y):
y = self.proj.reduce(np.array([y]).T)
y = preprocessing.normalize(y, norm='l2', axis=0) # (k, 1)
if not hasattr(self, 'B'):
self.p_failure = 0.1
self.B = np.zeros((self.k, self.ell))
self.A = np.array([])
U, s, V = ln.svd(self.B, full_matrices=False)
# update the tracked orthonormal bases
self.U_r = U[:, :self.r]
if self.A.size == 0:
self.A = np.empty_like(y)
self.A[:] = y[:]
else:
self.A = | np.concatenate((self.A, y), axis=1) | numpy.concatenate |
#!/usr/bin/env python
"""
Use muon flux weights to calculate an effective livetime for combined
CORSIKA samples as a function of energy.
"""
from argparse import ArgumentParser
from os.path import expandvars
parser = ArgumentParser()
parser.add_argument("outfile", help="save plot to file")
args = parser.parse_args()
import matplotlib
matplotlib.use('agg')
import pylab, numpy
from icecube import dataclasses, MuonGun
surface = MuonGun.Cylinder(1600, 800)
area = numpy.pi**2*surface.radius*(surface.radius+surface.length)
# 1 file of E^-2.6 5-component 3e4-1e9 GeV (3:2:1:1:1)
soft = 4e5*MuonGun.corsika_genprob('CascadeOptimized5Comp')
# 1 file of E^-2 5-component 6e2-1e11 GeV (10:5:3:2:1)
hard = 2.5e6*MuonGun.corsika_genprob('Standard5Comp')
# In order to compare to "unweighted" CORSIKA, turn the Hoerandel flux
# into a probability (since we happen to know the integral)
areanorm = 0.131475115*area
# 1 file of natural-spectrum ("unweighted") CORSIKA
unweighted = (2.5e7/areanorm)*MuonGun.corsika_genprob('Hoerandel5')
model = MuonGun.load_model('GaisserH4a_atmod12_SIBYLL')
model.flux.min_multiplicity = 1
model.flux.max_multiplicity = 1
# spectrum = MuonGun.OffsetPowerLaw(5.0, 5e2, 8e2, 10**4.5)
spectrum = MuonGun.OffsetPowerLaw(5, 8e2, 2e3, 1e5)
# spectrum = MuonGun.OffsetPowerLaw(1.1, 650, 800, 1e8)
gun = 1e5*MuonGun.EnergyDependentSurfaceInjector(surface, model.flux, spectrum, model.radius)
# gun = 1e5*MuonGun.StaticSurfaceInjector(surface, model.flux, spectrum, model.radius)
# gun.target_surface = lambda e: surface
def get_weight(weighter, energy, zenith=numpy.pi/8, scale=True):
shape = energy.shape
if scale:
x = numpy.array([gun.target_surface(e).radius - 1 for e in energy])
else:
# x = numpy.ones(shape[0])*surface.radius - 1
x = numpy.ones(shape[0])*surface.radius - 1
# x = surface.radius*numpy.ones(shape) - 1
y = numpy.zeros(shape)
# z = z*numpy.ones(shape)
if scale:
z = numpy.array([gun.target_surface(e).center.z + gun.target_surface(e).length/2. for e in energy])
else:
z = numpy.ones(shape[0])*(surface.center.z + surface.length/2.)
azimuth = | numpy.zeros(shape) | numpy.zeros |
"""
Unsupervised MoE Variational AutoEncoder (VAE)
==============================================
Credit: <NAME>
Based on:
- https://towardsdatascience.com/mixture-of-variational-autoencoders-
a-fusion-between-moe-and-vae-22c0901a6675
The Variational Autoencoder (VAE) is a neural networks that try to learn the
shape of the input space. Once trained, the model can be used to generate
new samples from the input space.
If we have labels for our input data, it’s also possible to condition the
generation process on the label. The idea here is to achieve the same results
using an unsupervised approach.
Mixture of Experts
------------------
MoE is a supervised learning framework. MoE relies on the possibility that the
input might be segmented according to the x->y mapping. How can we train a
model that learns the split points while at the same time learns the mapping
that defines the split points.
MoE does so using an architecture of multiple subnetworks - one manager and
multiple experts. The manager maps the input into a soft decision over the
experts, which is used in two contexts:
1. The output of the network is a weighted average of the experts' outputs,
where the weights are the manager's output.
2. The loss function is $\sum_i p_i(y - \bar{y_i})^2$. y is the label,
$\bar{y_i}$ is the output of the i'th expert, $p_i$ is the i'th entry of
the manager's output. When you differentiate the loss, you get these
results: a) the manager decides for each expert how much it contributes to
the loss. In other words, the manager chooses which experts should tune
their weights according to their error, and b) the manager tunes the
probabilities it outputs in such a way that the experts that got it right
will get higher probabilities than those that didn’t. This loss function
encourages the experts to specialize in different kinds of inputs.
MoE is a framework for supervised learning. Surely we can change y to be x for
the unsupervised case, right? MoE's power stems from the fact that each expert
specializes in a different segment of the input space with a unique mapping
x ->y. If we use the mapping x->x, each expert will specialize in a different
segment of the input space with unique patterns in the input itself.
We'll use VAEs as the experts. Part of the VAE’s loss is the reconstruction
loss, where the VAE tries to reconstruct the original input image x.
A cool byproduct of this architecture is that the manager can classify the
digit found in an image using its output vector!
One thing we need to be careful about when training this model is that the
manager could easily degenerate into outputting a constant vector -
regardless of the input in hand. This results in one VAE specialized in all
digits, and nine VAEs specialized in nothing. One way to mitigate it, which
is described in the MoE paper, is to add a balancing term to the loss.
It encourages the outputs of the manager over a batch of inputs to
be balanced: $\sum_\text{examples in batch} \vec{p} \approx Uniform$.
Let's begin with importing stuffs:
"""
import os
import sys
if "CI_MODE" in os.environ:
sys.exit()
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.distributions import Normal, kl_divergence
from pynet.datasets import DataManager, fetch_minst
from pynet.interfaces import DeepLearningInterface
from pynet.plotting import Board, update_board
#############################################################################
# The model will be trained on MNIST - handwritten digits dataset. The input
# is an image in R(28×28).
def flatten(arr):
return arr.flatten()
data = fetch_minst(datasetdir="/neurospin/nsap/datasets/minst")
manager = DataManager(
input_path=data.input_path,
metadata_path=data.metadata_path,
stratify_label="label",
number_of_folds=10,
batch_size=100,
test_size=0,
input_transforms=[flatten],
add_input=True,
sample_size=1)
#############################################################################
# The Model
# ---------
#
# The model is composed of two sub-networks:
#
# 1. Given x (image), encode it into a distribution over the latent space -
# referred to as Q(z|x).
# 2. Given z in latent space (code representation of an image), decode it into
# the image it represents - referred to as f(z).
class Encoder(nn.Module):
""" The encoder part of VAE.
"""
def __init__(self, input_dim, hidden_dim, latent_dim):
""" Init class.
Parameters
----------
input_dim: int
the size of input (in case of MNIST 28 * 28).
hidden_dim: int
the size of hidden dimension.
latent_dim: int
the latent dimension.
"""
super().__init__()
self.linear = nn.Linear(input_dim, hidden_dim)
self.mu = nn.Linear(hidden_dim, latent_dim)
self.logvar = nn.Linear(hidden_dim, latent_dim)
def forward(self, x):
hidden = torch.sigmoid(self.linear(x))
z_mu = self.mu(hidden)
z_logvar = self.logvar(hidden)
return z_mu, z_logvar
class Decoder(nn.Module):
""" The decoder part of VAE
"""
def __init__(self, latent_dim, hidden_dim, output_dim):
""" Init class.
Parameters
----------
latent_dim: int
the latent size.
hidden_dim: int
the size of hidden dimension.
output_dim: int
the output dimension (in case of MNIST it is 28 * 28).
"""
super().__init__()
self.latent_to_hidden = nn.Linear(latent_dim, hidden_dim)
self.hidden_to_out = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
hidden = torch.sigmoid(self.latent_to_hidden(x))
predicted = torch.sigmoid(self.hidden_to_out(hidden))
return predicted
class VAE(nn.Module):
""" This is the VAE.
"""
def __init__(self, input_dim, hidden_dim, latent_dim):
""" Init class.
Parameters
----------
input_dim: int
the size of input (in case of MNIST 28 * 28).
hidden_dim: int
the size of hidden dimension.
latent_dim: int
the latent dimension.
"""
super(VAE, self).__init__()
self.encoder = Encoder(input_dim, hidden_dim, latent_dim)
self.decoder = Decoder(latent_dim, hidden_dim, input_dim)
def forward(self, x):
# encode an image into a distribution over the latent space
z_mu, z_logvar = self.encoder(x)
# sample a latent vector from the latent space - using the
# reparameterization trick
# sample from the distribution having latent parameters z_mu, z_var
z_var = torch.exp(z_logvar) + 1e-5
std = torch.sqrt(z_var)
eps = torch.randn_like(std)
x_sample = eps.mul(std).add_(z_mu)
# decode the latent vector
predicted = self.decoder(x_sample)
return predicted, {"z_mu": z_mu, "z_var": z_var}
class VAELoss(object):
def __init__(self, use_distributions=True):
super(VAELoss, self).__init__()
self.layer_outputs = None
self.use_distributions = use_distributions
def __call__(self, x_sample, x):
if self.layer_outputs is None:
raise ValueError("The model needs to return the latent space "
"distribution parameters z_mu, z_var.")
if self.use_distributions:
p = x_sample
q = self.layer_outputs["q"]
else:
z_mu = self.layer_outputs["z_mu"]
z_var = self.layer_outputs["z_var"]
p = Normal(x_sample, 0.5)
q = Normal(z_mu, z_var.pow(0.5))
# reconstruction loss: log likelihood
ll_loss = - p.log_prob(x).sum(-1, keepdim=True)
# regularization loss: KL divergence
kl_loss = kl_divergence(q, Normal(0, 1)).sum(-1, keepdim=True)
combined_loss = ll_loss + kl_loss
return combined_loss, {"ll_loss": ll_loss, "kl_loss": kl_loss}
class Manager(nn.Module):
def __init__(self, input_dim, hidden_dim, experts, latent_dim,
log_alpha=None):
""" Init class.
Parameters
----------
input_dim: int
the size of input (in case of MNIST 28 * 28).
hidden_dim: int
the size of hidden dimension.
experts: list of VAE
the manager experts.
"""
super(Manager, self).__init__()
self._experts = nn.ModuleList(experts)
self.latent_dim = latent_dim
self._experts_results = []
self.linear1 = nn.Linear(input_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, len(experts))
def forward(self, x):
hidden = torch.sigmoid(self.linear1(x))
logits = self.linear2(hidden)
probs = func.softmax(logits)
self._experts_results = []
for net in self._experts:
self._experts_results.append(net(x))
return probs, {"experts_results": self._experts_results}
class ManagerLoss(object):
def __init__(self, balancing_weight=0.1):
""" Init class.
Parameters
----------
balancing_weight: float, default 0.1
how much the balancing term will contribute to the loss.
"""
super(ManagerLoss, self).__init__()
self.layer_outputs = None
self.balancing_weight = balancing_weight
self.criterion = VAELoss(use_distributions=False)
def __call__(self, probs, x):
if self.layer_outputs is None:
raise ValueError("The model needs to return the latent space "
"distribution parameters z_mu, z_var.")
losses = []
for result in self.layer_outputs["experts_results"]:
self.criterion.layer_outputs = result[1]
loss, extra_loss = self.criterion(result[0], x)
losses.append(loss.view(-1, 1))
losses = torch.cat(losses, dim=1)
expected_expert_loss = torch.mean(
torch.sum(losses * probs, dim=1), dim=0)
experts_importance = torch.sum(probs, dim=0)
# Remove effect of Bessel correction
experts_importance_std = experts_importance.std(dim=0, unbiased=False)
balancing_loss = torch.pow(experts_importance_std, 2)
combined_loss = (
expected_expert_loss + self.balancing_weight * balancing_loss)
return combined_loss, {"expected_expert_loss": expected_expert_loss,
"balancing_loss": balancing_loss}
#############################################################################
# Training
# --------
#
# We'll train the model to optimize the losses using Adam optimizer.
def sampling(signal):
""" Sample from the distribution and generate a image.
"""
device = signal.object.device
experts = signal.object.model._experts
latent_dim = signal.object.model.latent_dim
board = signal.object.board
# sample and generate a image
z = torch.randn(1, latent_dim).to(device)
# run only the decoder
images = []
for model in experts:
model.eval()
with torch.no_grad():
reconstructed_img = model.decoder(z)
img = reconstructed_img.view(-1, 28, 28).cpu().detach().numpy()
img = np.asarray([ndimage.zoom(arr, 5, order=0) for arr in img])
images.append(img)
# display result
images = | np.asarray(images) | numpy.asarray |
'''
###############################################################################
"MajoranaNanowire" Python3 Module
v 1.0 (2020)
Created by <NAME> (2018)
###############################################################################
"H_class/Lutchyn_Oreg/builders" submodule
This sub-package builds Lutchyn-Oreg Hamiltonians.
###############################################################################
'''
#%%############################################################################
######################## Required Packages ############################
###############################################################################
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
import scipy.constants as cons
from MajoranaNanowires.Functions import diagonal
#%%
def LO_1D_builder(N,dis,m_eff,mu,B,aR,d, space='position', k_vec=np.nan ,sparse='no'):
"""
1D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 1D
Lutchy-Oreg chain with superconductivity.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is an array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
an array with the on-site Rashba couplings in the direction i.
d: float or arr
Superconductor paring amplitud.
-If d is a float, d is the Rashba coupling along the y-direction,
with the same value in every site.
-If d is an array, each element of the array is the on-site
superconducting paring amplitud
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones(N)
if np.isscalar(mu):
mu = mu * np.ones(N)
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRy=np.zeros(N)
aRz=aR*np.ones(N)/(2*dis)
elif np.ndim(aR)==1:
if len(aR)==3:
aRy=aR[1]*np.ones(N)/(2*dis)
aRz=aR[2]*np.ones(N)/(2*dis)
else:
aRy=np.zeros(N)
aRz=aR/(2*dis)
else:
aRy=aR[1]/(2*dis)
aRz=aR[2]/(2*dis)
if np.isscalar(d):
d = d * np.ones(N)
if space=='momentum':
n_k=len(k_vec)
#Obtain the hopping and on-site energies:
t=cons.hbar**2/(2*m_eff*cons.m_e*(dis*1e-9)**2)/cons.e*1e3
e = 2 * t - mu
##Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(4 * N), int(4 * N)),dtype=complex)
elif sparse=='yes':
H=scipy.sparse.dok_matrix((int(4*N),int(4*N)),dtype=complex)
t, aRy, Bz = np.repeat(t,2), np.repeat(aRy,2), np.repeat(Bz,2)
Bz[1::2], aRy[1::2] = -Bz[::2], -aRy[::2]
for i in range(2):
H[diagonal(2*N*(i+1),init=2*N*i,k=1,step=2)], H[diagonal(2*N*(i+1),init=2*N*i,k=-1,step=2)] = (-1)**(i)*Bx-1j*By, (-1)**(i)*Bx+1j*By
H[diagonal(2*N*(i+1),init=2*N*i)] = (-1)**i*(Bz+np.repeat(e,2))
H[diagonal(2*N*(i+1),init=2*N*i,k=-2)] = -1*(-1)**i*t[2::]+1j*aRy[2::]
H[diagonal(2*N*(i+1),init=2*N*i,k=2)] = -1*(-1)**i*t[2::]-1j*aRy[2::]
H[diagonal(2*N*(i+1),k=1,step=2,init=1+2*N*i)] += -1*(-1)**i*aRz[1::]
H[diagonal(2*N*(i+1),k=-1,step=2,init=1+2*N*i)] += -1*(-1)**i*aRz[1::]
H[diagonal(2*N*(i+1),init=2*N*i,k=3,step=2)] += (-1)**i*aRz[1::]
H[diagonal(2*N*(i+1),init=2*N*i,k=-3,step=2)] += (-1)**i*aRz[1::]
H[diagonal(4*N,k=2*N+1,step=2)], H[diagonal(4*N,k=-2*N-1,step=2)] = -np.conj(d), -d
H[diagonal(4*N,k=2*N-1,step=2,init=1)], H[diagonal(4*N,k=-2*N+1,step=2,init=1)] = np.conj(d), d
#Build it in momentum space if required:
if space=='momentum':
if sparse=='no':
H_k = np.zeros((int(4 * N), int(4 * N), int(n_k)),dtype=complex)
for i in range(n_k):
H_k[:,:,i]=H
H_k[2 * (N - 1):2 * (N - 1) + 2, 0: 2,i] += np.array([[-t[2]-1j*aRy[2], aRz[1]], [-aRz[1], -t[2]+1j*aRy[2]]])*np.exp(-1j*k_vec[i]*N)
H_k[2 * (N - 1)+2*N:2 * (N - 1) + 2+2*N, 2*N: 2+2*N,i] += -np.array([[-t[2]+1j*aRy[2], aRz[1]], [-aRz[1], -t[2]-1j*aRy[2]]])*np.exp(1j*k_vec[i]*N)
H_k[0: 2, 2 * (N - 1):2 * (N - 1) + 2,i] += np.array([[-t[2]+1j*aRy[2], -aRz[1]], [aRz[1], -t[2]-1j*aRy[2]]])*np.exp(1j*k_vec[i]*N)
H_k[2*N: 2+2*N, 2 * (N - 1)+2*N:2 * (N - 1) + 2+2*N,i] += -np.array([[-t[2]-1j*aRy[2], -aRz[1]], [aRz[1], -t[2]+1j*aRy[2]]])*np.exp(-1j*k_vec[i]*N)
return (H_k)
elif sparse=='yes':
return(H)
else:
return (H)
#%%
def LO_1D_builder_NoSC(N,dis,m_eff,mu,B,aR, space='position', k_vec=np.nan ,sparse='no'):
"""
1D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 1D
Lutchy-Oreg chain without superconductivity.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is an array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
an array with the on-site Rashba couplings in the direction i.
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones(N)
if np.isscalar(mu):
mu = mu * np.ones(N)
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRy=np.zeros(N)
aRz=aR*np.ones(N)/(2*dis)
elif np.ndim(aR)==1:
if len(aR)==3:
aRy=aR[1]*np.ones(N)/(2*dis)
aRz=aR[2]*np.ones(N)/(2*dis)
else:
aRy=np.zeros(N)
aRz=aR/(2*dis)
else:
aRy=aR[1]/(2*dis)
aRz=aR[2]/(2*dis)
if space=='momentum':
n_k=len(k_vec)
#Obtain the hopping and the on-site energies:
t=cons.hbar**2/(2*m_eff*cons.m_e*(dis*1e-9)**2)/cons.e*1e3
e = 2 * t - mu
##Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(2 * N), int(2 * N)),dtype=complex)
elif sparse=='yes':
H=scipy.sparse.dok_matrix((int(2*N),int(2*N)),dtype=complex)
Bz,Bx,By=np.repeat(Bz,2),np.repeat(Bx,2), 1j*np.repeat(By,2)
Bx[1::2], By[1::2], Bz[1::2] = 0, 0, -Bz[::2]
H[diagonal(2*N,k=1)], H[diagonal(2*N,k=-1)] = Bx[:-1]-By[:-1], Bx[:-1]+By[:-1]
H[diagonal(2*N)]=Bz+np.repeat(e,2)
t=-np.repeat(t,2)
aRy=np.repeat(aRy,2)
aRy[1::2]= -aRy[::2]
H[diagonal(2*N,k=-2)], H[diagonal(2*N,k=2)] = t[2::]+1j*aRy[2::], t[2::]-1j*aRy[2::]
H[diagonal(2*N,k=1,step=2,init=1)] += -aRz[1::]
H[diagonal(2*N,k=-1,step=2,init=1)] += -aRz[1::]
H[diagonal(2*N,k=3,step=2)] += aRz[1::]
H[diagonal(2*N,k=-3,step=2)] += aRz[1::]
#Build it in momentum space if required:
if space=='momentum':
if sparse=='no':
H_k = np.zeros((int(2 * N), int(2 * N), int(n_k)),dtype=complex)
for i in range(n_k):
H_k[:,:,i]=H
H_k[2 * (N - 1):2 * (N - 1) + 2, 0: 2,i] += np.array([[-t[2]-1j*aRy[2], aRz[1]], [-aRz[1], -t[2]+1j*aRy[2]]])*np.exp(-1j*k_vec[i]*N)
H_k[0: 2, 2 * (N - 1):2 * (N - 1) + 2,i] += np.array([[-t[2]+1j*aRy[2], -aRz[1]], [aRz[1], -t[2]-1j*aRy[2]]])*np.exp(1j*k_vec[i]*N)
return (H_k)
elif sparse=='yes':
return (H)
else:
return (H)
#%%
def LO_2D_builder(N,dis,m_eff,mu,B,aR, d, space='position', k_vec=np.nan ,sparse='no'):
"""
2D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 2D
Lutchy-Oreg chain with superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 2D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 2D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 2D array with the on-site Rashba couplings in the direction i.
d: float or arr
Superconductor paring amplitud.
-If d is a float, d is the Rashba coupling along the y-direction,
with the same value in every site.
-If d is a 2D array, each element of the array is the on-site
superconducting paring amplitud
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Ny, Nz = N[0], N[1]
if np.ndim(dis)==0:
dis_y, dis_z = dis, dis
else:
dis_y, dis_z = dis[0], dis[1]
m = 4 * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones(N)
aRy=aR[1]*np.ones(N)
aRz=aR[2]*np.ones(N)
else:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
else:
aRx=aR[0]
aRy=aR[1]
aRz=aR[2]
if np.isscalar(d):
d = d * np.ones(N)
#Obtain the eigenenergies:
ty=cons.hbar**2/(2*(m_eff[1::,:]+m_eff[:-1,:])/2*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3
tz=cons.hbar**2/(2*(m_eff[:,1::]+m_eff[:,:-1])/2*cons.m_e*(dis_z*1e-9)**2)/cons.e*1e3
e = - mu
e += np.append(2*ty[0,:].reshape(1,Nz),np.append(ty[1::,:]+ty[:-1,:],2*ty[-1,:].reshape(1,Nz),axis=0),axis=0)
e += np.append(2*tz[:,0].reshape(Ny,1),np.append(tz[:,1::]+tz[:,:-1],2*tz[:,-1].reshape(Ny,1),axis=1),axis=1)
#Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(m), int(m)),dtype=complex)
elif sparse=='yes':
H = scipy.sparse.dok_matrix((int(m),int(m)),dtype=complex)
e,d,Bx,By,Bz=e.flatten(),d.flatten(),Bx.flatten(),By.flatten(),Bz.flatten()
Bz=np.repeat(Bz,2)
Bz[1::2] = -Bz[::2]
ty, aRx_ky, aRz_ky = np.repeat(ty.flatten(),2), np.repeat(((aRx[1::,:]+aRx[:-1,:])/(4*dis_y)).flatten(),2), ((aRz[1::,:]+aRz[:-1,:])/(4*dis_y)).flatten()
tz, aRx_kz, aRy_kz = np.repeat(tz.flatten(),2), ((aRx[:,1::]+aRx[:,:-1])/(4*dis_z)).flatten(), ((aRy[:,1::]+aRy[:,:-1])/(4*dis_z)).flatten()
aRx_ky[1::2] = -aRx_ky[::2]
tz, aRx_kz, aRy_kz=np.insert(tz,np.repeat(np.arange(2*(Nz-1),2*(Nz-1)*Ny,2*(Nz-1)),2),np.zeros(2*(Ny-1))), np.insert(aRx_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1))), np.insert(aRy_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1)))
for i in range(2):
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1,step=2)], H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1,step=2)] = (-1)**(i)*Bx-1j*By, (-1)**(i)*Bx+1j*By
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i)] = (-1)**(i)*(np.repeat(e,2) + Bz)
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2*Nz)] = -1*(-1)**(i)*ty+1j*aRx_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2*Nz)] = -1*(-1)**(i)*ty-1j*aRx_ky
H[diagonal(int(m/2)*(i+1),k=2*Nz-1,step=2,init=1+int(m/2)*i)] += -1j*aRz_ky
H[diagonal(int(m/2)*(i+1),k=-2*Nz+1,step=2,init=1+int(m/2)*i)] += 1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1+2*Nz,step=2)] += -1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1-2*Nz,step=2)] += 1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2)] = -1*(-1)**(i)*tz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2)] = -1*(-1)**(i)*tz
H[diagonal(int(m/2)*(i+1),k=1,step=2,init=1+int(m/2)*i)] += (-1)**(i)*aRx_kz+1j*aRy_kz
H[diagonal(int(m/2)*(i+1),k=-1,step=2,init=1+int(m/2)*i)] += (-1)**(i)*aRx_kz-1j*aRy_kz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=3,step=2)] += -1*(-1)**(i)*aRx_kz+1j*aRy_kz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-3,step=2)] += -1*(-1)**(i)*aRx_kz-1j*aRy_kz
H[diagonal(m,k=int(m/2)+1,step=2)], H[diagonal(m,k=-int(m/2)-1,step=2)] = -np.conj(d), -d
H[diagonal(m,k=int(m/2)-1,step=2,init=1)], H[diagonal(m,k=-int(m/2)+1,step=2,init=1)] = np.conj(d), d
return (H)
#%%
def LO_2D_builder_NoSC(N,dis,m_eff,mu,B,aR, space='position', k_vec=np.nan ,sparse='no'):
"""
2D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 2D
Lutchy-Oreg chain without superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 2D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 2D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 2D array with the on-site Rashba couplings in the direction i.
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Ny, Nz = N[0], N[1]
if np.ndim(dis)==0:
dis_y, dis_z = dis, dis
else:
dis_y, dis_z = dis[0], dis[1]
m = 2 * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones(N)
aRy=aR[1]*np.ones(N)
aRz=aR[2]*np.ones(N)
else:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
else:
aRx=aR[0]
aRy=aR[1]
aRz=aR[2]
#Obtain the eigenenergies:
ty=cons.hbar**2/(2*(m_eff[1::,:]+m_eff[:-1,:])/2*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3
tz=cons.hbar**2/(2*(m_eff[:,1::]+m_eff[:,:-1])/2*cons.m_e*(dis_z*1e-9)**2)/cons.e*1e3
e = - mu
e += np.append(2*ty[0,:].reshape(1,Nz),np.append(ty[1::,:]+ty[:-1,:],2*ty[-1,:].reshape(1,Nz),axis=0),axis=0)
e += np.append(2*tz[:,0].reshape(Ny,1),np.append(tz[:,1::]+tz[:,:-1],2*tz[:,-1].reshape(Ny,1),axis=1),axis=1)
#Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(m), int(m)),dtype=complex)
elif sparse=='yes':
H = scipy.sparse.dok_matrix((int(m),int(m)),dtype=complex)
e,Bx,By,Bz=e.flatten(),Bx.flatten(),By.flatten(),Bz.flatten()
Bz=np.repeat(Bz,2)
Bz[1::2] = -Bz[::2]
ty, aRx_ky, aRz_ky = np.repeat(ty.flatten(),2), np.repeat(((aRx[1::,:]+aRx[:-1,:])/(4*dis_y)).flatten(),2), ((aRz[1::,:]+aRz[:-1,:])/(4*dis_y)).flatten()
tz, aRx_kz, aRy_kz = np.repeat(tz.flatten(),2), ((aRx[:,1::]+aRx[:,:-1])/(4*dis_z)).flatten(), ((aRy[:,1::]+aRy[:,:-1])/(4*dis_z)).flatten()
aRx_ky[1::2] = -aRx_ky[::2]
H[diagonal(m,k=1,step=2)], H[diagonal(m,k=-1,step=2)] = Bx-1j*By, Bx+1j*By
H[diagonal(m)] = np.repeat(e,2) + Bz
H[diagonal(m,k=2*Nz)] = -ty+1j*aRx_ky
H[diagonal(m,k=-2*Nz)] = -ty-1j*aRx_ky
H[diagonal(m,k=2*Nz-1,step=2,init=1)] += -1j*aRz_ky
H[diagonal(m,k=-2*Nz+1,step=2,init=1)] += 1j*aRz_ky
H[diagonal(m,k=1+2*Nz,step=2)] += -1j*aRz_ky
H[diagonal(m,k=-1-2*Nz,step=2)] += 1j*aRz_ky
tz, aRx_kz, aRy_kz=np.insert(tz,np.repeat(np.arange(2*(Nz-1),2*(Nz-1)*Ny,2*(Nz-1)),2),np.zeros(2*(Ny-1))), np.insert(aRx_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1))), np.insert(aRy_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1)))
H[diagonal(m,k=2)] = -tz
H[diagonal(m,k=-2)] = -tz
H[diagonal(m,k=1,step=2,init=1)] += aRx_kz+1j*aRy_kz
H[diagonal(m,k=-1,step=2,init=1)] += aRx_kz-1j*aRy_kz
H[diagonal(m,k=3,step=2)] += -aRx_kz+1j*aRy_kz
H[diagonal(m,k=-3,step=2)] += -aRx_kz-1j*aRy_kz
return (H)
#%%
def LO_3D_builder(N,dis,m_eff,mu,B,aR,d, space='position', k_vec=np.nan ,sparse='yes'):
"""
3D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
Lutchy-Oreg chain with superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 3D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 3D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 3D array with the on-site Rashba couplings in the direction i.
d: float or arr
Superconductor paring amplitud.
-If d is a float, d is the Rashba coupling along the y-direction,
with the same value in every site.
-If d is a 3D array, each element of the array is the on-site
superconducting paring amplitud
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Nx, Ny, Nz = N[0], N[1], N[2]
if np.ndim(dis)==0:
dis_x, dis_y, dis_z = dis, dis, dis
else:
dis_x, dis_y, dis_z = dis[0], dis[1], dis[2]
m = 4 * Nx * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Nx,Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros((Nx,Ny,Nz))
aRy=np.zeros((Nx,Ny,Nz))
aRz=aR*np.ones((Nx,Ny,Nz))
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones((Nx,Ny,Nz))
aRy=aR[1]*np.ones((Nx,Ny,Nz))
aRz=aR[2]*np.ones((Nx,Ny,Nz))
else:
aRx=np.zeros((Nx,Ny,Nz))
aRy=np.zeros((Nx,Ny,Nz))
aRz=aR*np.ones((Nx,Ny,Nz))
else:
aRx=aR[0]
aRy=aR[1]
aRz=aR[2]
if np.isscalar(d):
d = d * np.ones((Nx,Ny,Nz))
if space=='momentum':
n_k=len(k_vec)
#Obtain the hoppings and the on-site energies:
tx=cons.hbar**2/(2*(m_eff[1::,:,:]+m_eff[:-1,:,:])/2*cons.m_e*(dis_x*1e-9)**2)/cons.e*1e3
ty=cons.hbar**2/(2*(m_eff[:,1::,:]+m_eff[:,:-1,:])/2*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3
tz=cons.hbar**2/(2*(m_eff[:,:,1::]+m_eff[:,:,:-1])/2*cons.m_e*(dis_z*1e-9)**2)/cons.e*1e3
e = - mu
e += np.append(2*tx[0,:,:].reshape(1,Ny,Nz),np.append(tx[1::,:,:]+tx[:-1,:,:],2*tx[-1,:,:].reshape(1,Ny,Nz),axis=0),axis=0)
e += np.append(2*ty[:,0,:].reshape(Nx,1,Nz),np.append(ty[:,1::,:]+ty[:,:-1,:],2*ty[:,-1,:].reshape(Nx,1,Nz),axis=1),axis=1)
e += np.append(2*tz[:,:,0].reshape(Nx,Ny,1),np.append(tz[:,:,1::]+tz[:,:,:-1],2*tz[:,:,-1].reshape(Nx,Ny,1),axis=2),axis=2)
#Built the Hamiltonian:
if sparse=='no':
H = np.zeros((int(m), int(m)),dtype=complex)
elif sparse=='yes':
H = scipy.sparse.dok_matrix((int(m),int(m)),dtype=complex)
e,d,Bx,By,Bz=e.flatten(),d.flatten(),Bx.flatten(),By.flatten(),Bz.flatten()
Bz=np.repeat(Bz,2)
Bz[1::2] = -Bz[::2]
tx, aRy_kx, aRz_kx = np.repeat(tx.flatten(),2), np.repeat(((aRy[1::,:,:]+aRy[:-1,:,:])/(4*dis_x)).flatten(),2), ((aRz[1::,:,:]+aRz[:-1,:,:])/(4*dis_x)).flatten()
ty, aRx_ky, aRz_ky = np.repeat(ty.flatten(),2), np.repeat(((aRx[:,1::,:]+aRx[:,:-1,:])/(4*dis_y)).flatten(),2), ((aRz[:,1::,:]+aRz[:,:-1,:])/(4*dis_y)).flatten()
tz, aRx_kz, aRy_kz = np.repeat(tz.flatten(),2), ((aRx[:,:,1::]+aRx[:,:,:-1])/(4*dis_z)).flatten(), ((aRy[:,:,1::]+aRy[:,:,:-1])/(4*dis_z)).flatten()
aRy_kx[1::2], aRx_ky[1::2] = -aRy_kx[::2], -aRx_ky[::2]
ty, aRx_ky, aRz_ky = np.insert(ty,np.repeat(np.arange(2*(Nz*Ny-Nz),2*(Ny*Nz-Nz)*Nx,2*(Ny*Nz-Nz)),2*Nz),np.zeros(2*Nz*(Nx-1))), np.insert(aRx_ky,np.repeat(np.arange(2*(Nz*Ny-Nz),2*(Ny*Nz-Nz)*Nx,2*(Ny*Nz-Nz)),2*Nz),np.zeros(2*Nz*(Nx-1))),np.insert(aRz_ky,np.repeat(np.arange((Nz*Ny-Nz),(Ny*Nz-Nz)*Nx,(Ny*Nz-Nz)),Nz),np.zeros(Nz*(Nx-1)))
tz, aRx_kz, aRy_kz=np.insert(tz,np.repeat(np.arange(2*(Nz-1),2*(Nz-1)*Ny*Nx,2*(Nz-1)),2),np.zeros(2*Nx*(Ny-1)+2*(Nx-1))), np.insert(aRx_kz,np.arange((Nz-1),(Nz-1)*Ny*Nx,(Nz-1)),np.zeros(Nx*(Ny-1)+(Nx-1))), np.insert(aRy_kz,np.arange((Nz-1),(Nz-1)*Ny*Nx,(Nz-1)),np.zeros(Nx*(Ny-1)+(Nx-1)))
for i in range(2):
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1,step=2)], H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1,step=2)] = (-1)**(i)*Bx-1j*By, (-1)**(i)*Bx+1j*By
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i)] = (-1)**(i)*np.repeat(e,2) + (-1)**(i)*Bz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2*Ny*Nz)] = -1*(-1)**(i)*tx-1j*aRy_kx
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2*Ny*Nz)] = -1*(-1)**(i)*tx+1j*aRy_kx
H[diagonal(int(m/2)*(i+1),k=2*Ny*Nz-1,step=2,init=1+int(m/2)*i)] += -1*(-1)**(i)*aRz_kx
H[diagonal(int(m/2)*(i+1),k=-2*Ny*Nz+1,step=2,init=1+int(m/2)*i)] += -1*(-1)**(i)*aRz_kx
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1+2*Ny*Nz,step=2)] += (-1)**(i)*aRz_kx
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1-2*Ny*Nz,step=2)] += (-1)**(i)*aRz_kx
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2*Nz)] = -1*(-1)**(i)*ty+1j*aRx_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2*Nz)] = -1*(-1)**(i)*ty-1j*aRx_ky
H[diagonal(int(m/2)*(i+1),k=2*Nz-1,step=2,init=1+int(m/2)*i)] += -1j*aRz_ky
H[diagonal(int(m/2)*(i+1),k=-2*Nz+1,step=2,init=1+int(m/2)*i)] += 1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1+2*Nz,step=2)] += -1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1-2*Nz,step=2)] += 1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2)] = -1*(-1)**(i)*tz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2)] = -1*(-1)**(i)*tz
H[diagonal(int(m/2)*(i+1),k=1,step=2,init=1+int(m/2)*i)] += (-1)**(i)*aRx_kz+1j*aRy_kz
H[diagonal(int(m/2)*(i+1),k=-1,step=2,init=1+int(m/2)*i)] += (-1)**(i)*aRx_kz-1j*aRy_kz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=3,step=2)] += -1*(-1)**(i)*aRx_kz+1j*aRy_kz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-3,step=2)] += -1*(-1)**(i)*aRx_kz-1j*aRy_kz
H[diagonal(m,k=int(m/2)+1,step=2)], H[diagonal(m,k=-int(m/2)-1,step=2)] = -np.conj(d), -d
H[diagonal(m,k=int(m/2)-1,step=2,init=1)], H[diagonal(m,k=-int(m/2)+1,step=2,init=1)] = np.conj(d), d
#Build it in momentum space if required:
if space=='momentum':
if sparse=='no':
H_k = np.zeros((int(m), int(m), int(n_k)),dtype=complex)
for i in range(n_k):
H_k[:,:,i] = H
for j in range(2):
H_k[diagonal(int(m/2)*(j+1),init=int(m/2)*j,k=m-2*Ny*Nz),i] = (-1*(-1)**(j)*tx-1j*aRy_kx)*np.exp(-1j*(-1)**(i)*k_vec[i]*Nx)
H_k[diagonal(int(m/2)*(j+1),init=int(m/2)*j,k=-m+2*Ny*Nz),i] = (-1*(-1)**(j)*tx+1j*aRy_kx)*np.exp(1j*(-1)**(i)*k_vec[i]*Nx)
H_k[diagonal(int(m/2)*(j+1),k=m-2*Ny*Nz-1,step=2,init=1+int(m/2)*j),i] += (-1)**(j)*(-aRz_kx)*np.exp(-1j*(-1)**(i)*k_vec[i]*Nx)
H_k[diagonal(int(m/2)*(j+1),k=-m+2*Ny*Nz+1,step=2,init=1+int(m/2)*j),i] += (-1)**(j)*(-aRz_kx)*np.exp(1j*(-1)**(i)*k_vec[i]*Nx)
H_k[diagonal(int(m/2)*(j+1),init=int(m/2)*j,k=m+1-2*Ny*Nz,step=2),i] += (-1)**(j)*(aRz_kx)*np.exp(-1j*(-1)**(i)*k_vec[i]*Nx)
H_k[diagonal(int(m/2)*(j+1),init=int(m/2)*j,k=-m-1+2*Ny*Nz,step=2),i] += (-1)**(j)*(aRz_kx)*np.exp(1j*(-1)**(i)*k_vec[i]*Nx)
return (H_k)
elif sparse=='yes':
return(H)
else:
return (H)
#%%
def LO_3D_builder_NoSC(N,dis,m_eff,mu,B,aR, space='position', k_vec=np.nan ,sparse='no'):
"""
3D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
Lutchy-Oreg chain with superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 3D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 3D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 3D array with the on-site Rashba couplings in the direction i.
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Nx, Ny, Nz = N[0], N[1], N[2]
if np.ndim(dis)==0:
dis_x, dis_y, dis_z = dis, dis, dis
else:
dis_x, dis_y, dis_z = dis[0], dis[1], dis[2]
m = 2 * Nx * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Nx,Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros((Nx,Ny,Nz))
aRy=np.zeros((Nx,Ny,Nz))
aRz=aR*np.ones((Nx,Ny,Nz))
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones((Nx,Ny,Nz))
aRy=aR[1]*np.ones((Nx,Ny,Nz))
aRz=aR[2]*np.ones((Nx,Ny,Nz))
else:
aRx=np.zeros((Nx,Ny,Nz))
aRy=np.zeros((Nx,Ny,Nz))
aRz=aR*np.ones((Nx,Ny,Nz))
else:
aRx=aR[0]
aRy=aR[1]
aRz=aR[2]
if space=='momentum':
n_k=len(k_vec)
#Obtain the hoppings and the on-site energies:
tx=cons.hbar**2/(2*(m_eff[1::,:,:]+m_eff[:-1,:,:])/2*cons.m_e*(dis_x*1e-9)**2)/cons.e*1e3
ty=cons.hbar**2/(2*(m_eff[:,1::,:]+m_eff[:,:-1,:])/2*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3
tz=cons.hbar**2/(2*(m_eff[:,:,1::]+m_eff[:,:,:-1])/2*cons.m_e*(dis_z*1e-9)**2)/cons.e*1e3
e = - mu
e += np.append(2*tx[0,:,:].reshape(1,Ny,Nz),np.append(tx[1::,:,:]+tx[:-1,:,:],2*tx[-1,:,:].reshape(1,Ny,Nz),axis=0),axis=0)
e += np.append(2*ty[:,0,:].reshape(Nx,1,Nz),np.append(ty[:,1::,:]+ty[:,:-1,:],2*ty[:,-1,:].reshape(Nx,1,Nz),axis=1),axis=1)
e += np.append(2*tz[:,:,0].reshape(Nx,Ny,1),np.append(tz[:,:,1::]+tz[:,:,:-1],2*tz[:,:,-1].reshape(Nx,Ny,1),axis=2),axis=2)
#Built the Hamiltonian:
if sparse=='no':
H = np.zeros((int(m), int(m)),dtype=complex)
elif sparse=='yes':
H = scipy.sparse.dok_matrix((int(m),int(m)),dtype=complex)
e,Bx,By,Bz=e.flatten(),Bx.flatten(),By.flatten(),Bz.flatten()
Bz=np.repeat(Bz,2)
Bz[1::2] = -Bz[::2]
tx, aRy_kx, aRz_kx = np.repeat(tx.flatten(),2), np.repeat(((aRy[1::,:,:]+aRy[:-1,:,:])/(4*dis_x)).flatten(),2), ((aRz[1::,:,:]+aRz[:-1,:,:])/(4*dis_x)).flatten()
ty, aRx_ky, aRz_ky = np.repeat(ty.flatten(),2), np.repeat(((aRx[:,1::,:]+aRx[:,:-1,:])/(4*dis_y)).flatten(),2), ((aRz[:,1::,:]+aRz[:,:-1,:])/(4*dis_y)).flatten()
tz, aRx_kz, aRy_kz = np.repeat(tz.flatten(),2), ((aRx[:,:,1::]+aRx[:,:,:-1])/(4*dis_z)).flatten(), ((aRy[:,:,1::]+aRy[:,:,:-1])/(4*dis_z)).flatten()
aRy_kx[1::2], aRx_ky[1::2] = -aRy_kx[::2], -aRx_ky[::2]
H[diagonal(m,k=1,step=2)], H[diagonal(m,k=-1,step=2)] = Bx-1j*By, Bx+1j*By
H[diagonal(m)] = np.repeat(e,2) + Bz
H[diagonal(m,k=2*Ny*Nz)] = -tx-1j*aRy_kx
H[diagonal(m,k=-2*Ny*Nz)] = -tx+1j*aRy_kx
H[diagonal(m,k=2*Ny*Nz-1,step=2,init=1)] += -aRz_kx
H[diagonal(m,k=-2*Ny*Nz+1,step=2,init=1)] += -aRz_kx
H[diagonal(m,k=1+2*Ny*Nz,step=2)] += aRz_kx
H[diagonal(m,k=-1-2*Ny*Nz,step=2)] += aRz_kx
ty, aRx_ky, aRz_ky = np.insert(ty,np.repeat(np.arange(2*(Nz*Ny-Nz),2*(Ny*Nz-Nz)*Nx,2*(Ny*Nz-Nz)),2*Nz),np.zeros(2*Nz*(Nx-1))), np.insert(aRx_ky,np.repeat(np.arange(2*(Nz*Ny-Nz),2*(Ny*Nz-Nz)*Nx,2*(Ny*Nz-Nz)),2*Nz),np.zeros(2*Nz*(Nx-1))),np.insert(aRz_ky,np.repeat(np.arange((Nz*Ny-Nz),(Ny*Nz-Nz)*Nx,(Ny*Nz-Nz)),Nz),np.zeros(Nz*(Nx-1)))
H[diagonal(m,k=2*Nz)] = -ty+1j*aRx_ky
H[diagonal(m,k=-2*Nz)] = -ty-1j*aRx_ky
H[diagonal(m,k=2*Nz-1,step=2,init=1)] += -1j*aRz_ky
H[diagonal(m,k=-2*Nz+1,step=2,init=1)] += 1j*aRz_ky
H[diagonal(m,k=1+2*Nz,step=2)] += -1j*aRz_ky
H[diagonal(m,k=-1-2*Nz,step=2)] += 1j*aRz_ky
tz, aRx_kz, aRy_kz=np.insert(tz,np.repeat(np.arange(2*(Nz-1),2*(Nz-1)*Ny*Nx,2*(Nz-1)),2),np.zeros(2*Nx*(Ny-1)+2*(Nx-1))), np.insert(aRx_kz,np.arange((Nz-1),(Nz-1)*Ny*Nx,(Nz-1)),np.zeros(Nx*(Ny-1)+(Nx-1))), np.insert(aRy_kz,np.arange((Nz-1),(Nz-1)*Ny*Nx,(Nz-1)),np.zeros(Nx*(Ny-1)+(Nx-1)))
H[diagonal(m,k=2)] = -tz
H[diagonal(m,k=-2)] = -tz
H[diagonal(m,k=1,step=2,init=1)] += aRx_kz+1j*aRy_kz
H[diagonal(m,k=-1,step=2,init=1)] += aRx_kz-1j*aRy_kz
H[diagonal(m,k=3,step=2)] += -aRx_kz+1j*aRy_kz
H[diagonal(m,k=-3,step=2)] += -aRx_kz-1j*aRy_kz
#Build it in momentum space if required:
if space=='momentum':
if sparse=='no':
H_k = np.zeros((int(m), int(m), int(n_k)),dtype=complex)
for i in range(n_k):
H_k[:,:,i] = H
H_k[diagonal(m,k=m-2*Ny*Nz),i] = (-tx-1j*aRy_kx)*np.exp(-1j*k_vec[i]*Nx)
H_k[diagonal(m,k=-m+2*Ny*Nz),i] = (-tx+1j*aRy_kx)*np.exp(1j*k_vec[i]*Nx)
H_k[diagonal(m,k=m-2*Ny*Nz-1,step=2,init=1),i] += (-aRz_kx)*np.exp(-1j*k_vec[i]*Nx)
H_k[diagonal(m,k=-m+2*Ny*Nz+1,step=2,init=1),i] += (-aRz_kx)*np.exp(1j*k_vec[i]*Nx)
H_k[diagonal(m,k=m+1-2*Ny*Nz,step=2),i] += (aRz_kx)*np.exp(-1j*k_vec[i]*Nx)
H_k[diagonal(m,k=-m-1+2*Ny*Nz,step=2),i] += (aRz_kx)*np.exp(1j*k_vec[i]*Nx)
return (H_k)
elif sparse=='yes':
return(H)
else:
return (H)
#%%
def LO_3D_builder_MO(N,dis,m_eff,
mu,B,aR,d=0,
BdG='yes',
Nxp=None):
"""
3D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a
3D Lutchy-Oreg chain with the method of <NAME>.
Parameters
----------
N: int or arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites in each direction.
m_eff: int or arr
Effective mass. If it is a 3D array, each element is the effective
mass on each site of the lattice.
mu: float or arr
Chemical potential. If it is a 3D array, each element is the
chemical potential on each site of the lattice.
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
an array (1D, 2D or 3D) with the on-site Rashba couplings in the
direction i.
d: float or arr
On-site supercondcuting pairing amplitude. If it is float, the
pairing is the same in every site, while if it is a 3D array,
it is the on-site pairing.
Nxp: int
Number of points to compute the molecular orbitals of the H_2D. For
the remaining (N[0]-Nxp) slices, it is considered that the
molecular orbitals corresponding to the first (N[0]-Nxp)/2 slices
are the same than for the slice N[Nxp]. Similarly, it is considered
that for the last (N[0]-Nxp)/2 slices, the molecular orbitals are
the same than that of N[N[0]-Nxp].
Returns
-------
H: tuple of arr
H[0]: A 1D array whose elements H[0][i] are 2D arrays describing
the cross-section Hamiltonian at the position x[i] of the wire.
H[1]: the 3D Hamiltonian which includes the orbital-coupling terms.
H[2]: the 3D Hamiltonian which includes the SC-coupling terms
"""
#Obtain dimensions:
Nx, Ny, Nz = N[0], N[1], N[2]
if np.ndim(dis)==0:
dis_x, dis_y, dis_z = dis, dis, dis
else:
dis_x, dis_y, dis_z = dis[0], dis[1], dis[2]
if not(Nxp==None or Nxp==N[0]):
N_dif=np.int((Nx-Nxp)/2)
else:
Nxp, N_dif = Nx, 0
m = int(2 * Nx * Ny * Nz)
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * | np.ones((Nx,Ny,Nz)) | numpy.ones |
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import time
import numpy as np
import numba
from sklearn.utils.validation import check_is_fitted
import scipy.sparse
@numba.njit(parallel=True)
def fast_knn_indices(X, n_neighbors):
"""A fast computation of knn indices.
Parameters
----------
X: array of shape (n_samples, n_features)
The input data to compute the k-neighbor indices of.
n_neighbors: int
The number of nearest neighbors to compute for each sample in ``X``.
Returns
-------
knn_indices: array of shape (n_samples, n_neighbors)
The indices on the ``n_neighbors`` closest points in the dataset.
"""
knn_indices = | np.empty((X.shape[0], n_neighbors), dtype=np.int32) | numpy.empty |
#!/usr/bin/env python
#coding:utf-8
from __future__ import division
from __future__ import print_function
import numpy as np
import math
from PIL import Image
import cv2
def resize_short(img, target_size):
percent = float(target_size) / min(img.shape[0], img.shape[1])
resize_width = int(round(img.shape[1] * percent))
resize_height = int(round(img.shape[0] * percent))
img = cv2.resize(img, (resize_width, resize_height), interpolation=cv2.INTER_LINEAR)
return img
def crop_image(img, target_size, center=True):
width, height = img.shape[1], img.shape[0]
size = target_size
if center:
w_start = (width - size) // 2
h_start = (height - size) // 2
else:
w_start = | np.random.randint(0, width - size + 1) | numpy.random.randint |
from keras.datasets import mnist
from keras.datasets import cifar10
from keras.utils import to_categorical
import numpy as np
def get_keras_cifar10():
print("# Dataset")
print('## load data')
(train_X, train_y), (test_X, test_y) = cifar10.load_data()
print('## normalize data')
train_X = ((train_X - 127).astype('float32') / 128)
test_X = ((test_X - 127).astype('float32') / 128)
print('## perform one-hot encoding on the classes')
train_y = to_categorical(train_y, num_classes=10)
test_y = to_categorical(test_y, num_classes=10)
data_shape = (32, 32, 3)
labels = [str(v) for v in range(10)]
return train_X, train_y, test_X, test_y, data_shape, labels
#----------------------------------------------------------------------
class KerasMNISTDataset:
def __init__(self):
self._expected_shape = (28, 28, 1)
(train_X, train_y), (test_X, test_y) = mnist.load_data()
self._images = (np.vstack([train_X, test_X]) - 127.0) / 128.0
self._codes = np.hstack([train_y, test_y])
self._set_of_labels = np.array([str(v) for v in range(10)])
self._onehots = to_categorical(self._codes, num_classes=10)
self._labels = self.set_of_labels[self._codes]
@property
def expected_shape(self):
return self._expected_shape
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def codes(self):
return self._codes
@property
def onehots(self):
return self._onehots
@property
def set_of_labels(self):
return self._set_of_labels
def get_label(self, onehot):
if len(onehot) == len(self.set_of_labels):
return self.set_of_labels[onehot.argmax()]
else:
return "onehot.error"
def get(self, branch=0.20, random_state=42):
np.random.seed(random_state)
train_X = list()
train_y = list()
validation_X = list()
validation_y = list()
test_X = list()
test_y = list()
train_branch = 1.0 - branch
validation_branch = train_branch**2
for image, onehot in zip(self.images, self.onehots):
die = np.random.uniform()
if die < validation_branch:
train_X.append(image)
train_y.append(onehot)
elif die < train_branch:
validation_X.append(image)
validation_y.append(onehot)
else:
test_X.append(image)
test_y.append(onehot)
train_X = np.array(train_X)
train_y = np.array(train_y)
validation_X = np.array(validation_X)
validation_y = np.array(validation_y)
test_X = np.array(test_X)
test_y = np.array(test_y)
return train_X, train_y, validation_X, validation_y, test_X, test_y, self.expected_shape, self.set_of_labels
class ReshapedKerasMNISTDataset:
def __init__(self):
keras_mnist_dataset = KerasMNISTDataset()
original_shape = keras_mnist_dataset.images.shape
expected_shape = (original_shape[1]*original_shape[2],)
images = keras_mnist_dataset.images.reshape((original_shape[0], expected_shape[0]))
self._expected_shape = expected_shape
self._images = images
self._codes = keras_mnist_dataset.codes
self._set_of_labels = keras_mnist_dataset.set_of_labels
self._onehots = keras_mnist_dataset.onehots
self._labels = keras_mnist_dataset.labels
@property
def expected_shape(self):
return self._expected_shape
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def codes(self):
return self._codes
@property
def onehots(self):
return self._onehots
@property
def set_of_labels(self):
return self._set_of_labels
def get_label(self, onehot):
if len(onehot) == len(self.set_of_labels):
return self.set_of_labels[onehot.argmax()]
else:
return "onehot.error"
def get(self, branch=0.20, random_state=42):
np.random.seed(random_state)
train_X = list()
train_y = list()
validation_X = list()
validation_y = list()
test_X = list()
test_y = list()
train_branch = 1.0 - branch
validation_branch = train_branch**2
for image, onehot in zip(self.images, self.onehots):
die = np.random.uniform()
if die < validation_branch:
train_X.append(image)
train_y.append(onehot)
elif die < train_branch:
validation_X.append(image)
validation_y.append(onehot)
else:
test_X.append(image)
test_y.append(onehot)
train_X = np.array(train_X)
train_y = np.array(train_y)
validation_X = np.array(validation_X)
validation_y = np.array(validation_y)
test_X = np.array(test_X)
test_y = | np.array(test_y) | numpy.array |
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Class handling the visualization
#
# ----------------------------------------------------------------------------------------------------------------------
#
# <NAME> - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import torch
import os
os.environ.update(OMP_NUM_THREADS='1',
OPENBLAS_NUM_THREADS='1',
NUMEXPR_NUM_THREADS='1',
MKL_NUM_THREADS='1',)
import numpy as np
from sklearn.neighbors import KDTree
from os import makedirs, remove, rename, listdir
from os.path import exists, join
import time
from mayavi import mlab
import sys
from models.blocks import KPConv
# PLY reader
from utils.ply import write_ply, read_ply
# Configuration class
from utils.config import Config, bcolors
# ----------------------------------------------------------------------------------------------------------------------
#
# Trainer Class
# \*******************/
#
class ModelVisualizer:
# Initialization methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, net, config, chkp_path, on_gpu=True):
"""
Initialize training parameters and reload previous model for restore/finetune
:param net: network object
:param config: configuration object
:param chkp_path: path to the checkpoint that needs to be loaded (None for new training)
:param finetune: finetune from checkpoint (True) or restore training from checkpoint (False)
:param on_gpu: Train on GPU or CPU
"""
############
# Parameters
############
# Choose to train on CPU or GPU
if on_gpu and torch.cuda.is_available():
self.device = torch.device("cuda:0")
else:
self.device = torch.device("cpu")
net.to(self.device)
##########################
# Load previous checkpoint
##########################
checkpoint = torch.load(chkp_path)
new_dict = {}
for k, v in checkpoint['model_state_dict'].items():
if 'blocs' in k:
k = k.replace('blocs', 'blocks')
new_dict[k] = v
net.load_state_dict(new_dict)
self.epoch = checkpoint['epoch']
net.eval()
print("\nModel state restored from {:s}.".format(chkp_path))
return
# Main visualization methods
# ------------------------------------------------------------------------------------------------------------------
def top_relu_activations(self, model, dataset, relu_idx=0, top_num=5):
"""
Test the model on test dataset to see which points activate the most each neurons in a relu layer
:param model: model used at training
:param dataset: dataset used at training
:param relu_idx: which features are to be visualized
:param top_num: how many top candidates are kept per features
"""
#####################################
# First choose the visualized feature
#####################################
# List all relu ops
all_ops = [op for op in tf.get_default_graph().get_operations() if op.name.startswith('KernelPointNetwork')
and op.name.endswith('LeakyRelu')]
# List all possible Relu indices
print('\nPossible Relu indices:')
for i, t in enumerate(all_ops):
print(i, ': ', t.name)
# Print the chosen one
if relu_idx is not None:
features_tensor = all_ops[relu_idx].outputs[0]
else:
relu_idx = int(input('Choose a Relu index: '))
features_tensor = all_ops[relu_idx].outputs[0]
# Get parameters
layer_idx = int(features_tensor.name.split('/')[1][6:])
if 'strided' in all_ops[relu_idx].name and not ('strided' in all_ops[relu_idx+1].name):
layer_idx += 1
features_dim = int(features_tensor.shape[1])
radius = model.config.first_subsampling_dl * model.config.density_parameter * (2 ** layer_idx)
print('You chose to compute the output of operation named:\n' + all_ops[relu_idx].name)
print('\nIt contains {:d} features.'.format(int(features_tensor.shape[1])))
print('\n****************************************************************************')
#######################
# Initialize containers
#######################
# Initialize containers
self.top_features = -np.ones((top_num, features_dim))
self.top_classes = -np.ones((top_num, features_dim), dtype=np.int32)
self.saving = model.config.saving
# Testing parameters
num_votes = 3
# Create visu folder
self.visu_path = None
self.fmt_str = None
if model.config.saving:
self.visu_path = join('visu',
'visu_' + model.saving_path.split('/')[-1],
'top_activations',
'Relu{:02d}'.format(relu_idx))
self.fmt_str = 'f{:04d}_top{:02d}.ply'
if not exists(self.visu_path):
makedirs(self.visu_path)
# *******************
# Network predictions
# *******************
mean_dt = np.zeros(2)
last_display = time.time()
for v in range(num_votes):
# Run model on all test examples
# ******************************
# Initialise iterator with test data
if model.config.dataset.startswith('S3DIS'):
self.sess.run(dataset.val_init_op)
else:
self.sess.run(dataset.test_init_op)
count = 0
while True:
try:
if model.config.dataset.startswith('ShapeNetPart'):
if model.config.dataset.split('_')[1] == 'multi':
label_op = model.inputs['super_labels']
else:
label_op = model.inputs['point_labels']
elif model.config.dataset.startswith('S3DIS'):
label_op = model.inputs['point_labels']
elif model.config.dataset.startswith('Scannet'):
label_op = model.inputs['point_labels']
elif model.config.dataset.startswith('ModelNet40'):
label_op = model.inputs['labels']
else:
raise ValueError('Unsupported dataset')
# Run one step of the model
t = [time.time()]
ops = (all_ops[-1].outputs[0],
features_tensor,
label_op,
model.inputs['points'],
model.inputs['pools'],
model.inputs['in_batches'])
_, stacked_features, labels, all_points, all_pools, in_batches = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
count += in_batches.shape[0]
# Stack all batches
max_ind = np.max(in_batches)
stacked_batches = []
for b_i, b in enumerate(in_batches):
stacked_batches += [b[b < max_ind - 0.5]*0+b_i]
stacked_batches = np.hstack(stacked_batches)
# Find batches at wanted layer
for l in range(model.config.num_layers - 1):
if l >= layer_idx:
break
stacked_batches = stacked_batches[all_pools[l][:, 0]]
# Get each example and update top_activations
for b_i, b in enumerate(in_batches):
b = b[b < max_ind - 0.5]
in_points = all_points[0][b]
features = stacked_features[stacked_batches == b_i]
points = all_points[layer_idx][stacked_batches == b_i]
if model.config.dataset in ['ShapeNetPart_multi', 'ModelNet40_classif']:
l = labels[b_i]
else:
l = np.argmax(np.bincount(labels[b]))
self.update_top_activations(features, labels[b_i], points, in_points, radius)
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
if model.config.dataset.startswith('S3DIS'):
completed = count / (model.config.validation_size * model.config.batch_num)
else:
completed = count / dataset.num_test
message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(v,
100 * completed,
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
#class_names = np.array([dataset.label_to_names[i] for i in range(dataset.num_classes)])
#print(class_names[self.top_classes[:, :20]].T)
except tf.errors.OutOfRangeError:
break
return relu_idx
def update_top_activations(self, features, label, l_points, input_points, radius, max_computed=60):
top_num = self.top_features.shape[0]
# Compute top indice for each feature
max_indices = np.argmax(features, axis=0)
# get top_point neighborhoods
for features_i, idx in enumerate(max_indices[:max_computed]):
if features[idx, features_i] <= self.top_features[-1, features_i]:
continue
if label in self.top_classes[:, features_i]:
ind0 = np.where(self.top_classes[:, features_i] == label)[0][0]
if features[idx, features_i] <= self.top_features[ind0, features_i]:
continue
elif ind0 < top_num - 1:
self.top_features[ind0:-1, features_i] = self.top_features[ind0+1:, features_i]
self.top_classes[ind0:-1, features_i] = self.top_classes[ind0+1:, features_i]
for next_i in range(ind0 + 1, top_num):
old_f = join(self.visu_path, self.fmt_str.format(features_i, next_i + 1))
new_f = join(self.visu_path, self.fmt_str.format(features_i, next_i))
if exists(old_f):
if exists(new_f):
remove(new_f)
rename(old_f, new_f)
# Find indice where new top should be placed
top_i = np.where(features[idx, features_i] > self.top_features[:, features_i])[0][0]
# Update top features
if top_i < top_num - 1:
self.top_features[top_i + 1:, features_i] = self.top_features[top_i:-1, features_i]
self.top_features[top_i, features_i] = features[idx, features_i]
self.top_classes[top_i + 1:, features_i] = self.top_classes[top_i:-1, features_i]
self.top_classes[top_i, features_i] = label
# Find in which batch lays the point
if self.saving:
# Get inputs
l_features = features[:, features_i]
point = l_points[idx, :]
dist = np.linalg.norm(input_points - point, axis=1)
influence = (radius - dist) / radius
# Project response on input cloud
if l_points.shape[0] == input_points.shape[0]:
responses = l_features
else:
tree = KDTree(l_points, leaf_size=50)
nn_k = min(l_points.shape[0], 10)
interp_dists, interp_inds = tree.query(input_points, nn_k, return_distance=True)
tukeys = np.square(1 - np.square(interp_dists / radius))
tukeys[interp_dists > radius] = 0
responses = np.sum(l_features[interp_inds] * tukeys, axis=1)
# Handle last examples
for next_i in range(top_num - 1, top_i, -1):
old_f = join(self.visu_path, self.fmt_str.format(features_i, next_i))
new_f = join(self.visu_path, self.fmt_str.format(features_i, next_i + 1))
if exists(old_f):
if exists(new_f):
remove(new_f)
rename(old_f, new_f)
# Save
filename = join(self.visu_path, self.fmt_str.format(features_i, top_i + 1))
write_ply(filename,
[input_points, influence, responses],
['x', 'y', 'z', 'influence', 'responses'])
def show_deformable_kernels_old(self, model, dataset, deform_idx=0):
##########################################
# First choose the visualized deformations
##########################################
# List all deformation ops
all_ops = [op for op in tf.get_default_graph().get_operations() if op.name.startswith('KernelPointNetwork')
and op.name.endswith('deformed_KP')]
print('\nPossible deformed indices:')
for i, t in enumerate(all_ops):
print(i, ': ', t.name)
# Chosen deformations
deformed_KP_tensor = all_ops[deform_idx].outputs[0]
# Layer index
layer_idx = int(all_ops[deform_idx].name.split('/')[1].split('_')[-1])
# Original kernel point positions
KP_vars = [v for v in tf.global_variables() if 'kernel_points' in v.name]
tmp = np.array(all_ops[deform_idx].name.split('/'))
test = []
for v in KP_vars:
cmp = np.array(v.name.split('/'))
l = min(len(cmp), len(tmp))
cmp = cmp[:l]
tmp = tmp[:l]
test += [np.sum(cmp == tmp)]
chosen_KP = np.argmax(test)
print('You chose to visualize the output of operation named: ' + all_ops[deform_idx].name)
print('\n****************************************************************************')
# Run model on all test examples
# ******************************
# Initialise iterator with test data
if model.config.dataset.startswith('S3DIS'):
self.sess.run(dataset.val_init_op)
else:
self.sess.run(dataset.test_init_op)
count = 0
while True:
try:
# Run one step of the model
t = [time.time()]
ops = (deformed_KP_tensor,
model.inputs['points'],
model.inputs['features'],
model.inputs['pools'],
model.inputs['in_batches'],
KP_vars)
stacked_deformed_KP, \
all_points, \
all_colors, \
all_pools, \
in_batches, \
original_KPs = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
count += in_batches.shape[0]
# Stack all batches
max_ind = np.max(in_batches)
stacked_batches = []
for b_i, b in enumerate(in_batches):
stacked_batches += [b[b < max_ind - 0.5] * 0 + b_i]
stacked_batches = np.hstack(stacked_batches)
# Find batches at wanted layer
for l in range(model.config.num_layers - 1):
if l >= layer_idx:
break
stacked_batches = stacked_batches[all_pools[l][:, 0]]
# Get each example and update top_activations
in_points = []
in_colors = []
deformed_KP = []
points = []
lookuptrees = []
for b_i, b in enumerate(in_batches):
b = b[b < max_ind - 0.5]
in_points += [all_points[0][b]]
deformed_KP += [stacked_deformed_KP[stacked_batches == b_i]]
points += [all_points[layer_idx][stacked_batches == b_i]]
lookuptrees += [KDTree(points[-1])]
if all_colors.shape[1] == 4:
in_colors += [all_colors[b, 1:]]
else:
in_colors += [None]
print('New batch size : ', len(in_batches))
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Features', bgcolor=(1.0, 1.0, 1.0), size=(1280, 920))
fig1.scene.parallel_projection = False
# Indices
global obj_i, point_i, plots, offsets, p_scale, show_in_p, aim_point
p_scale = 0.03
obj_i = 0
point_i = 0
plots = {}
offsets = False
show_in_p = 2
aim_point = np.zeros((1, 3))
def picker_callback(picker):
""" Picker callback: this get called when on pick events.
"""
global plots, aim_point
if 'in_points' in plots:
if plots['in_points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
point_rez = plots['in_points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['in_points'].mlab_source.points):
# Get closest point in the layer we are interested in
aim_point = plots['in_points'].mlab_source.points[new_point_i:new_point_i + 1]
update_scene()
if 'points' in plots:
if plots['points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
point_rez = plots['points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['points'].mlab_source.points):
# Get closest point in the layer we are interested in
aim_point = plots['points'].mlab_source.points[new_point_i:new_point_i + 1]
update_scene()
def update_scene():
global plots, offsets, p_scale, show_in_p, aim_point, point_i
# Get the current view
v = mlab.view()
roll = mlab.roll()
# clear figure
for key in plots.keys():
plots[key].remove()
plots = {}
# Plot new data feature
p = points[obj_i]
# Rescale points for visu
p = (p * 1.5 / model.config.in_radius)
# Show point cloud
if show_in_p <= 1:
plots['points'] = mlab.points3d(p[:, 0],
p[:, 1],
p[:, 2],
resolution=8,
scale_factor=p_scale,
scale_mode='none',
color=(0, 1, 1),
figure=fig1)
if show_in_p >= 1:
# Get points and colors
in_p = in_points[obj_i]
in_p = (in_p * 1.5 / model.config.in_radius)
# Color point cloud if possible
in_c = in_colors[obj_i]
if in_c is not None:
# Primitives
scalars = np.arange(len(in_p)) # Key point: set an integer for each point
# Define color table (including alpha), which must be uint8 and [0,255]
colors = np.hstack((in_c, np.ones_like(in_c[:, :1])))
colors = (colors * 255).astype(np.uint8)
plots['in_points'] = mlab.points3d(in_p[:, 0],
in_p[:, 1],
in_p[:, 2],
scalars,
resolution=8,
scale_factor=p_scale*0.8,
scale_mode='none',
figure=fig1)
plots['in_points'].module_manager.scalar_lut_manager.lut.table = colors
else:
plots['in_points'] = mlab.points3d(in_p[:, 0],
in_p[:, 1],
in_p[:, 2],
resolution=8,
scale_factor=p_scale*0.8,
scale_mode='none',
figure=fig1)
# Get KP locations
rescaled_aim_point = aim_point * model.config.in_radius / 1.5
point_i = lookuptrees[obj_i].query(rescaled_aim_point, return_distance=False)[0][0]
if offsets:
KP = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
scals = np.ones_like(KP[:, 0])
else:
KP = points[obj_i][point_i] + original_KPs[chosen_KP]
scals = np.zeros_like(KP[:, 0])
KP = (KP * 1.5 / model.config.in_radius)
plots['KP'] = mlab.points3d(KP[:, 0],
KP[:, 1],
KP[:, 2],
scals,
colormap='autumn',
resolution=8,
scale_factor=1.2*p_scale,
scale_mode='none',
vmin=0,
vmax=1,
figure=fig1)
if True:
plots['center'] = mlab.points3d(p[point_i, 0],
p[point_i, 1],
p[point_i, 2],
scale_factor=1.1*p_scale,
scale_mode='none',
color=(0, 1, 0),
figure=fig1)
# New title
plots['title'] = mlab.title(str(obj_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
plots['text'] = mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
plots['orient'] = mlab.orientation_axes()
# Set the saved view
mlab.view(*v)
mlab.roll(roll)
return
def animate_kernel():
global plots, offsets, p_scale, show_in_p
# Get KP locations
KP_def = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
KP_def = (KP_def * 1.5 / model.config.in_radius)
KP_def_color = (1, 0, 0)
KP_rigid = points[obj_i][point_i] + original_KPs[chosen_KP]
KP_rigid = (KP_rigid * 1.5 / model.config.in_radius)
KP_rigid_color = (1, 0.7, 0)
if offsets:
t_list = | np.linspace(0, 1, 150, dtype=np.float32) | numpy.linspace |
import numpy as np
from gym import spaces
from gym import Env
class PointEnv(Env):
"""
point mass on a 2-D plane
goals are sampled randomly from a square
"""
def __init__(self, num_tasks=1, checkerboard_size=0.1):
self.checkerboard_size = checkerboard_size
self.reset_task()
self.reset()
self.observation_space = spaces.Box(low=-np.inf,
high=np.inf,
shape=(2,),
dtype=np.float32)
self.action_space = spaces.Box(low=-0.1,
high=0.1,
shape=(2,),
dtype=np.float32)
self._state = None
def reset_task(self, is_evaluation=False):
'''
sample a new task randomly
Problem 3: make training and evaluation goals disjoint sets
if `is_evaluation` is true, sample from the evaluation set,
otherwise sample from the training set
'''
# ==================================================================== #
# ----------PROBLEM 3----------
# ==================================================================== #
STATIC_SIZE = 10 # final normalized board range: -10..10
x_cell = np.random.uniform(0, 1)
y_cell = np.random.uniform(0, 1)
if self.checkerboard_size == 0:
# normalize to -10..10
self._goal = np.array([x_cell, y_cell]) * STATIC_SIZE * 2 - STATIC_SIZE
else:
# NB, size of the checkerboard should be quadratic and even to stay fair
WIDTH = HEIGHT = int(1. / self.checkerboard_size)
BLACK = int(np.ceil(WIDTH * HEIGHT / 2.)) # evaluation
WHITE = int(np.floor(WIDTH * HEIGHT / 2.)) # train
ROW_WIDTH = int(np.ceil(WIDTH / 2))
if is_evaluation:
pos = np.random.randint(0, BLACK)
y_pos = 2 * int(pos / WIDTH) + int((pos % WIDTH) / ROW_WIDTH)
x_pos = 2 * ((pos % WIDTH) % ROW_WIDTH) + (y_pos % 2)
else:
pos = np.random.randint(0, WHITE)
y_pos = 2 * int(pos / WIDTH) + int((pos % WIDTH) / ROW_WIDTH)
x_pos = 2 * ((pos % WIDTH) % ROW_WIDTH) + (1 - (y_pos % 2))
y = y_cell + y_pos
x = x_cell + x_pos
# normalize to -10..10
self._goal = np.array([x, y]) * STATIC_SIZE * 2 / WIDTH - STATIC_SIZE
def reset(self):
self._state = | np.array([0, 0], dtype=np.float32) | numpy.array |
import numpy as np
import openvino.runtime.opset8 as ov
import pytest
from openvino.runtime.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
)
from tests.runtime import get_runtime
def create_simple_if_with_two_outputs(condition_val):
condition = ov.constant(condition_val, dtype=np.bool)
# then_body
X_t = ov.parameter([], np.float32, "X")
Y_t = ov.parameter([], np.float32, "Y")
Z_t = ov.parameter([], np.float32, "Z")
add_t = ov.add(X_t, Y_t)
mul_t = ov.multiply(Y_t, Z_t)
then_body_res_1 = ov.result(add_t)
then_body_res_2 = ov.result(mul_t)
then_body = GraphBody([X_t, Y_t, Z_t], [then_body_res_1, then_body_res_2])
then_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1),
TensorIteratorInvariantInputDesc(3, 2)]
then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1)]
# else_body
X_e = ov.parameter([], np.float32, "X")
Z_e = ov.parameter([], np.float32, "Z")
W_e = ov.parameter([], np.float32, "W")
add_e = ov.add(X_e, W_e)
pow_e = ov.power(W_e, Z_e)
else_body_res_1 = ov.result(add_e)
else_body_res_2 = ov.result(pow_e)
else_body = GraphBody([X_e, Z_e, W_e], [else_body_res_1, else_body_res_2])
else_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(3, 1),
TensorIteratorInvariantInputDesc(4, 2)]
else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1)]
X = ov.constant(15.0, dtype=np.float32)
Y = ov.constant(-5.0, dtype=np.float32)
Z = ov.constant(4.0, dtype=np.float32)
W = ov.constant(2.0, dtype=np.float32)
if_node = ov.if_op(condition, [X, Y, Z, W], (then_body, else_body), (then_body_inputs, else_body_inputs),
(then_body_outputs, else_body_outputs))
return if_node
def create_diff_if_with_two_outputs(condition_val):
condition = ov.constant(condition_val, dtype=np.bool)
# then_body
X_t = ov.parameter([2], np.float32, "X")
Y_t = ov.parameter([2], np.float32, "Y")
mmul_t = ov.matmul(X_t, Y_t, False, False)
mul_t = ov.multiply(Y_t, X_t)
then_body_res_1 = ov.result(mmul_t)
then_body_res_2 = ov.result(mul_t)
then_body = GraphBody([X_t, Y_t], [then_body_res_1, then_body_res_2])
then_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1)]
then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1)]
# else_body
X_e = ov.parameter([2], np.float32, "X")
Z_e = ov.parameter([], np.float32, "Z")
mul_e = ov.multiply(X_e, Z_e)
else_body_res_1 = ov.result(Z_e)
else_body_res_2 = ov.result(mul_e)
else_body = GraphBody([X_e, Z_e], [else_body_res_1, else_body_res_2])
else_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(3, 1)]
else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1)]
X = ov.constant([3, 4], dtype=np.float32)
Y = ov.constant([2, 1], dtype=np.float32)
Z = ov.constant(4.0, dtype=np.float32)
if_node = ov.if_op(condition, [X, Y, Z], (then_body, else_body), (then_body_inputs, else_body_inputs),
(then_body_outputs, else_body_outputs))
return if_node
def simple_if(condition_val):
condition = ov.constant(condition_val, dtype=np.bool)
# then_body
X_t = ov.parameter([2], np.float32, "X")
Y_t = ov.parameter([2], np.float32, "Y")
then_mul = ov.multiply(X_t, Y_t)
then_body_res_1 = ov.result(then_mul)
then_body = GraphBody([X_t, Y_t], [then_body_res_1])
then_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1)]
then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]
# else_body
X_e = ov.parameter([2], np.float32, "X")
Y_e = ov.parameter([2], np.float32, "Y")
add_e = ov.add(X_e, Y_e)
else_body_res_1 = ov.result(add_e)
else_body = GraphBody([X_e, Y_e], [else_body_res_1])
else_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1)]
else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]
X = ov.constant([3, 4], dtype=np.float32)
Y = ov.constant([2, 1], dtype=np.float32)
if_node = ov.if_op(condition, [X, Y], (then_body, else_body), (then_body_inputs, else_body_inputs),
(then_body_outputs, else_body_outputs))
relu = ov.relu(if_node)
return relu
def simple_if_without_parameters(condition_val):
condition = ov.constant(condition_val, dtype=np.bool)
# then_body
then_constant = ov.constant(0.7, dtype=np.float)
then_body_res_1 = ov.result(then_constant)
then_body = GraphBody([], [then_body_res_1])
then_body_inputs = []
then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]
# else_body
else_const = ov.constant(9.0, dtype=np.float)
else_body_res_1 = ov.result(else_const)
else_body = GraphBody([], [else_body_res_1])
else_body_inputs = []
else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]
if_node = ov.if_op(condition, [], (then_body, else_body), (then_body_inputs, else_body_inputs),
(then_body_outputs, else_body_outputs))
relu = ov.relu(if_node)
return relu
def check_results(results, expected_results):
assert len(results) == len(expected_results)
for id_result, res in enumerate(results):
assert np.allclose(res, expected_results[id_result])
def check_if(if_model, cond_val, exp_results):
last_node = if_model(cond_val)
runtime = get_runtime()
computation = runtime.computation(last_node)
results = computation()
check_results(results, exp_results)
# After deleting evalute method for if, constant folding stopped working.
# As result bug with id 67255 began to appear
@pytest.mark.xfail(reason="bug 67255")
def test_if_with_two_outputs():
check_if(create_simple_if_with_two_outputs, True,
[np.array([10], dtype=np.float32), np.array([-20], dtype=np.float32)])
check_if(create_simple_if_with_two_outputs, False,
[np.array([17], dtype=np.float32), np.array([16], dtype=np.float32)])
@pytest.mark.xfail(reason="bug 67255")
def test_diff_if_with_two_outputs():
check_if(create_diff_if_with_two_outputs, True,
[np.array([10], dtype=np.float32), np.array([6, 4], dtype=np.float32)])
check_if(create_diff_if_with_two_outputs, False,
[np.array([4], dtype=np.float32), np.array([12, 16], dtype=np.float32)])
def test_simple_if():
check_if(simple_if, True, [np.array([6, 4], dtype=np.float32)])
check_if(simple_if, False, [ | np.array([5, 5], dtype=np.float32) | numpy.array |
import numpy as np
from .. import BaseModel, register_model
from ._graphwave import graphwave_alg
from .prone import ProNE
@register_model("graphwave")
class GraphWave(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--scale", type=float, default=100)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(args.hidden_size, args.scale, args)
def __init__(self, dimension, scale, args):
super(GraphWave, self).__init__()
self.dimension = dimension
self.scale = scale
self.whitening = args.task == "unsupervised_node_classification"
def train(self, G):
chi, heat_print, taus = graphwave_alg(
G, np.linspace(0, self.scale, self.dimension // 4)
)
# if self.whitening:
# chi = (chi - chi.mean(axis=0)) / (chi.std(axis=0) + 1e-8)
return chi
@register_model("graphwave_cat_prone")
class GraphwaveCatProne(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--scale", type=float, default=1e5)
ProNE.add_args(parser)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(args.hidden_size, args.scale, args)
def __init__(self, dimension, scale, args):
super(GraphwaveCatProne, self).__init__()
self.dimension = dimension // 2
self.scale = scale
self.whitening = args.task == "unsupervised_node_classification"
# HACK
args.hidden_size //= 2
self.prone = ProNE.build_model_from_args(args)
args.hidden_size *= 2
def train(self, G):
chi, heat_print, taus = graphwave_alg(
G, np.linspace(0, self.scale, self.dimension // 4)
)
# if self.whitening:
# chi = (chi - chi.mean(axis=0)) / (chi.std(axis=0) + 1e-8)
prone_embeddings = self.prone.train(G)
return | np.concatenate([chi, prone_embeddings], axis=1) | numpy.concatenate |
"""Probability distributions and auxiliary functions to deal with them."""
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d, RegularGridInterpolator
import scipy.signal
import math
from flavio.math.functions import normal_logpdf, normal_pdf
from flavio.statistics.functions import confidence_level
import warnings
import inspect
from collections import OrderedDict
import yaml
import re
def _camel_to_underscore(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def string_to_class(string):
"""Get a ProbabilityDistribution subclass from a string. This can
either be the class name itself or a string in underscore format
as returned from `class_to_string`."""
try:
return eval(string)
except NameError:
pass
for c in ProbabilityDistribution.get_subclasses():
if c.class_to_string() == string:
return c
raise NameError("Distribution " + string + " not found.")
class ProbabilityDistribution(object):
"""Common base class for all probability distributions"""
def __init__(self, central_value, support):
self.central_value = central_value
self.support = support
@classmethod
def get_subclasses(cls):
"""Return all subclasses (including subclasses of subclasses)."""
for subclass in cls.__subclasses__():
yield from subclass.get_subclasses()
yield subclass
def get_central(self):
return self.central_value
@property
def error_left(self):
"""Return the lower error"""
return self.get_error_left()
@property
def error_right(self):
"""Return the upper error"""
return self.get_error_right()
@classmethod
def class_to_string(cls):
"""Get a string name for a given ProbabilityDistribution subclass.
This converts camel case to underscore and removes the word
'distribution'.
Example: class_to_string(AsymmetricNormalDistribution) returns
'asymmetric_normal'.
"""
name = _camel_to_underscore(cls.__name__)
return name.replace('_distribution', '')
def get_dict(self, distribution=False, iterate=False, arraytolist=False):
"""Get an ordered dictionary with arguments and values needed to
the instantiate the distribution.
Optional arguments (default to False):
- `distribution`: add a 'distribution' key to the dictionary with the
value being the string representation of the distribution's name
(e.g. 'asymmetric_normal').
- `iterate`: If ProbabilityDistribution instances are among the
arguments (e.g. for KernelDensityEstimate), return the instance's
get_dict instead of the instance as value.
- `arraytolist`: convert numpy arrays to lists
"""
args = inspect.signature(self.__class__).parameters.keys()
d = self.__dict__
od = OrderedDict()
if distribution:
od['distribution'] = self.class_to_string()
od.update(OrderedDict((a, d[a]) for a in args))
if iterate:
for k in od:
if isinstance(od[k], ProbabilityDistribution):
od[k] = od[k].get_dict(distribution=True)
if arraytolist:
for k in od:
if isinstance(od[k], np.ndarray):
od[k] = od[k].tolist()
if isinstance(od[k], list):
for i, x in enumerate(od[k]):
if isinstance(x, np.ndarray):
od[k][i] = od[k][i].tolist()
for k in od:
if isinstance(od[k], np.int):
od[k] = int(od[k])
elif isinstance(od[k], np.float):
od[k] = float(od[k])
if isinstance(od[k], list):
for i, x in enumerate(od[k]):
if isinstance(x, np.float):
od[k][i] = float(od[k][i])
elif isinstance(x, np.int):
od[k][i] = int(od[k][i])
return od
def get_yaml(self, *args, **kwargs):
"""Get a YAML string representing the dictionary returned by the
get_dict method.
Arguments will be passed to `yaml.dump`."""
od = self.get_dict(distribution=True, iterate=True, arraytolist=True)
return yaml.dump(od, *args, **kwargs)
def delta_logpdf(self, x, **kwargs):
exclude = kwargs.get('exclude', None)
if exclude is not None:
d = len(self.central_value)
cv = [self.central_value[i] for i in range(d) if i not in exclude]
else:
cv = self.central_value
return self.logpdf(x, **kwargs) - self.logpdf(cv, **kwargs)
class UniformDistribution(ProbabilityDistribution):
"""Distribution with constant PDF in a range and zero otherwise."""
def __init__(self, central_value, half_range):
"""Initialize the distribution.
Parameters:
- central_value: arithmetic mean of the upper and lower range boundaries
- half_range: half the difference of upper and lower range boundaries
Example:
central_value = 5 and half_range = 3 leads to the range [2, 8].
"""
self.half_range = half_range
self.range = (central_value - half_range,
central_value + half_range)
super().__init__(central_value, support=self.range)
def __repr__(self):
return 'flavio.statistics.probability.UniformDistribution' + \
'({}, {})'.format(self.central_value, self.half_range)
def get_random(self, size=None):
return np.random.uniform(self.range[0], self.range[1], size)
def _logpdf(self, x):
if x < self.range[0] or x >= self.range[1]:
return -np.inf
else:
return -math.log(2 * self.half_range)
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return confidence_level(nsigma) * self.half_range
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return confidence_level(nsigma) * self.half_range
class DeltaDistribution(ProbabilityDistribution):
"""Delta Distrubution that is non-vanishing only at a single point."""
def __init__(self, central_value):
"""Initialize the distribution.
Parameters:
- central_value: point where the PDF does not vanish.
"""
super().__init__(central_value, support=(central_value, central_value))
def __repr__(self):
return 'flavio.statistics.probability.DeltaDistribution' + \
'({})'.format(self.central_value)
def get_random(self, size=None):
if size is None:
return self.central_value
else:
return self.central_value * np.ones(size)
def logpdf(self, x):
if np.ndim(x) == 0:
if x == self.central_value:
return 0.
else:
return -np.inf
y = -np.inf*np.ones(np.asarray(x).shape)
y[np.asarray(x) == self.central_value] = 0
return y
def get_error_left(self, *args, **kwargs):
return 0
def get_error_right(self, *args, **kwargs):
return 0
class NormalDistribution(ProbabilityDistribution):
"""Univariate normal or Gaussian distribution."""
def __init__(self, central_value, standard_deviation):
"""Initialize the distribution.
Parameters:
- central_value: location (mode and mean)
- standard_deviation: standard deviation
"""
super().__init__(central_value,
support=(central_value - 6 * standard_deviation,
central_value + 6 * standard_deviation))
if standard_deviation <= 0:
raise ValueError("Standard deviation must be positive number")
self.standard_deviation = standard_deviation
def __repr__(self):
return 'flavio.statistics.probability.NormalDistribution' + \
'({}, {})'.format(self.central_value, self.standard_deviation)
def get_random(self, size=None):
return np.random.normal(self.central_value, self.standard_deviation, size)
def logpdf(self, x):
return normal_logpdf(x, self.central_value, self.standard_deviation)
def pdf(self, x):
return normal_pdf(x, self.central_value, self.standard_deviation)
def cdf(self, x):
return scipy.stats.norm.cdf(x, self.central_value, self.standard_deviation)
def ppf(self, x):
return scipy.stats.norm.ppf(x, self.central_value, self.standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return nsigma * self.standard_deviation
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return nsigma * self.standard_deviation
class LogNormalDistribution(ProbabilityDistribution):
"""Univariate log-normal distribution."""
def __init__(self, central_value, factor):
r"""Initialize the distribution.
Parameters:
- central_value: median of the distribution (neither mode nor mean!).
Can be positive or negative, but must be nonzero.
- factor: must be larger than 1. 68% of the probability will be between
`central_value * factor` and `central_value / factor`.
The mean and standard deviation of the underlying normal distribution
correspond to `log(abs(central_value))` and `log(factor)`, respectively.
Example:
`LogNormalDistribution(central_value=3, factor=2)`
corresponds to the distribution of the exponential of a normally
distributed variable with mean ln(3) and standard deviation ln(2).
68% of the probability is within 6=3*2 and 1.5=4/2.
"""
if central_value == 0:
raise ValueError("Central value must not be zero")
if factor <= 1:
raise ValueError("Factor must be bigger than 1")
self.factor = factor
self.log_standard_deviation = np.log(factor)
self.log_central_value = math.log(abs(central_value))
if central_value < 0:
self.central_sign = -1
slim = math.exp(math.log(abs(central_value))
- 6 * self.log_standard_deviation)
super().__init__(central_value,
support=(slim, 0))
else:
self.central_sign = +1
slim = math.exp(math.log(abs(central_value))
+ 6 * self.log_standard_deviation)
super().__init__(central_value,
support=(0, slim))
def __repr__(self):
return 'flavio.statistics.probability.LogNormalDistribution' + \
'({}, {})'.format(self.central_value, self.factor)
def get_random(self, size=None):
s = self.central_sign
return s * np.random.lognormal(self.log_central_value, self.log_standard_deviation, size)
def logpdf(self, x):
s = self.central_sign
return scipy.stats.lognorm.logpdf(s * x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def pdf(self, x):
s = self.central_sign
return scipy.stats.lognorm.pdf(s * x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def cdf(self, x):
if self.central_sign == -1:
return 1 - scipy.stats.lognorm.cdf(-x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
else:
return scipy.stats.lognorm.cdf(x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def ppf(self, x):
if self.central_sign == -1:
return -scipy.stats.lognorm.ppf(1 - x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
else:
return scipy.stats.lognorm.ppf(x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
cl = confidence_level(nsigma)
return self.central_value - self.ppf(0.5 - cl/2.)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
cl = confidence_level(nsigma)
return self.ppf(0.5 + cl/2.) - self.central_value
class AsymmetricNormalDistribution(ProbabilityDistribution):
"""An asymmetric normal distribution obtained by gluing together two
half-Gaussians and demanding the PDF to be continuous."""
def __init__(self, central_value, right_deviation, left_deviation):
"""Initialize the distribution.
Parameters:
- central_value: mode of the distribution (not equal to its mean!)
- right_deviation: standard deviation of the upper half-Gaussian
- left_deviation: standard deviation of the lower half-Gaussian
"""
super().__init__(central_value,
support=(central_value - 6 * left_deviation,
central_value + 6 * right_deviation))
if right_deviation <= 0 or left_deviation <= 0:
raise ValueError(
"Left and right standard deviations must be positive numbers")
self.right_deviation = right_deviation
self.left_deviation = left_deviation
self.p_right = normal_pdf(
self.central_value, self.central_value, self.right_deviation)
self.p_left = normal_pdf(
self.central_value, self.central_value, self.left_deviation)
def __repr__(self):
return 'flavio.statistics.probability.AsymmetricNormalDistribution' + \
'({}, {}, {})'.format(self.central_value,
self.right_deviation,
self.left_deviation)
def get_random(self, size=None):
if size is None:
return self._get_random()
else:
return np.array([self._get_random() for i in range(size)])
def _get_random(self):
r = np.random.uniform()
a = abs(self.left_deviation /
(self.right_deviation + self.left_deviation))
if r > a:
x = abs(np.random.normal(0, self.right_deviation))
return self.central_value + x
else:
x = abs(np.random.normal(0, self.left_deviation))
return self.central_value - x
def _logpdf(self, x):
# values of the PDF at the central value
if x < self.central_value:
# left-hand side: scale factor
r = 2 * self.p_right / (self.p_left + self.p_right)
return math.log(r) + normal_logpdf(x, self.central_value, self.left_deviation)
else:
# left-hand side: scale factor
r = 2 * self.p_left / (self.p_left + self.p_right)
return math.log(r) + normal_logpdf(x, self.central_value, self.right_deviation)
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return nsigma * self.left_deviation
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return nsigma * self.right_deviation
class HalfNormalDistribution(ProbabilityDistribution):
"""Half-normal distribution with zero PDF above or below the mode."""
def __init__(self, central_value, standard_deviation):
"""Initialize the distribution.
Parameters:
- central_value: mode of the distribution.
- standard_deviation:
If positive, the PDF is zero below central_value and (twice) that of
a Gaussian with this standard deviation above.
If negative, the PDF is zero above central_value and (twice) that of
a Gaussian with standard deviation equal to abs(standard_deviation)
below.
"""
super().__init__(central_value,
support=sorted((central_value,
central_value + 6 * standard_deviation)))
if standard_deviation == 0:
raise ValueError("Standard deviation must be non-zero number")
self.standard_deviation = standard_deviation
def __repr__(self):
return 'flavio.statistics.probability.HalfNormalDistribution' + \
'({}, {})'.format(self.central_value, self.standard_deviation)
def get_random(self, size=None):
return self.central_value + np.sign(self.standard_deviation) * abs(np.random.normal(0, abs(self.standard_deviation), size))
def _logpdf(self, x):
if np.sign(self.standard_deviation) * (x - self.central_value) < 0:
return -np.inf
else:
return math.log(2) + normal_logpdf(x, self.central_value, abs(self.standard_deviation))
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def cdf(self, x):
if np.sign(self.standard_deviation) == -1:
return 1 - scipy.stats.halfnorm.cdf(-x,
loc=-self.central_value,
scale=-self.standard_deviation)
else:
return scipy.stats.halfnorm.cdf(x,
loc=self.central_value,
scale=self.standard_deviation)
def ppf(self, x):
if np.sign(self.standard_deviation) == -1:
return -scipy.stats.halfnorm.ppf(1 - x,
loc=-self.central_value,
scale=-self.standard_deviation)
else:
return scipy.stats.halfnorm.ppf(x,
loc=self.central_value,
scale=self.standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
if self.standard_deviation >= 0:
return 0
else:
return nsigma * (-self.standard_deviation) # return a positive value!
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
if self.standard_deviation <= 0:
return 0
else:
return nsigma * self.standard_deviation
class GaussianUpperLimit(HalfNormalDistribution):
"""Upper limit defined as a half-normal distribution."""
def __init__(self, limit, confidence_level):
"""Initialize the distribution.
Parameters:
- limit: value of the upper limit
- confidence_level: confidence_level of the upper limit. Float between
0 and 1.
"""
if confidence_level > 1 or confidence_level < 0:
raise ValueError("Confidence level should be between 0 und 1")
if limit <= 0:
raise ValueError("The upper limit should be a positive number")
super().__init__(central_value=0,
standard_deviation=self.get_standard_deviation(limit, confidence_level))
self.limit = limit
self.confidence_level = confidence_level
def __repr__(self):
return 'flavio.statistics.probability.GaussianUpperLimit' + \
'({}, {})'.format(self.limit, self.confidence_level)
def get_standard_deviation(self, limit, confidence_level):
"""Convert the confidence level into a Gaussian standard deviation"""
return limit / scipy.stats.norm.ppf(0.5 + confidence_level / 2.)
class GammaDistribution(ProbabilityDistribution):
r"""A Gamma distribution defined like the `gamma` distribution in
`scipy.stats` (with parameters `a`, `loc`, `scale`).
The `central_value` attribute returns the location of the mode.
"""
def __init__(self, a, loc, scale):
if loc > 0:
raise ValueError("loc must be negative or zero")
# "frozen" scipy distribution object
self.scipy_dist = scipy.stats.gamma(a=a, loc=loc, scale=scale)
mode = loc + (a-1)*scale
# support extends until the CDF is roughly "6 sigma"
support_limit = self.scipy_dist.ppf(1-2e-9)
super().__init__(central_value=mode, # the mode
support=(loc, support_limit))
self.a = a
self.loc = loc
self.scale = scale
def __repr__(self):
return 'flavio.statistics.probability.GammaDistribution' + \
'({}, {}, {})'.format(self.a, self.loc, self.scale)
def get_random(self, size):
return self.scipy_dist.rvs(size=size)
def cdf(self, x):
return self.scipy_dist.cdf(x)
def ppf(self, x):
return self.scipy_dist.ppf(x)
def logpdf(self, x):
return self.scipy_dist.logpdf(x)
def _find_error_cdf(self, confidence_level):
# find the value of the CDF at the position of the left boundary
# of the `confidence_level`% CL range by demanding that the value
# of the PDF is the same at the two boundaries
def x_left(a):
return self.ppf(a)
def x_right(a):
return self.ppf(a + confidence_level)
def diff_logpdf(a):
logpdf_x_left = self.logpdf(x_left(a))
logpdf_x_right = self.logpdf(x_right(a))
return logpdf_x_left - logpdf_x_right
return scipy.optimize.brentq(diff_logpdf, 0, 1 - confidence_level-1e-6)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
a = self._find_error_cdf(confidence_level(nsigma))
return self.central_value - self.ppf(a)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
a = self._find_error_cdf(confidence_level(nsigma))
return self.ppf(a + confidence_level(nsigma)) - self.central_value
class GammaDistributionPositive(ProbabilityDistribution):
r"""A Gamma distribution defined like the `gamma` distribution in
`scipy.stats` (with parameters `a`, `loc`, `scale`), but restricted to
positive values for x and correspondingly rescaled PDF.
The `central_value` attribute returns the location of the mode.
"""
def __init__(self, a, loc, scale):
if loc > 0:
raise ValueError("loc must be negative or zero")
# "frozen" scipy distribution object (without restricting x>0!)
self.scipy_dist = scipy.stats.gamma(a=a, loc=loc, scale=scale)
mode = loc + (a-1)*scale
if mode < 0:
mode = 0
# support extends until the CDF is roughly "6 sigma", assuming x>0
support_limit = self.scipy_dist.ppf(1-2e-9*(1-self.scipy_dist.cdf(0)))
super().__init__(central_value=mode, # the mode
support=(0, support_limit))
self.a = a
self.loc = loc
self.scale = scale
# scale factor for PDF to account for x>0
self._pdf_scale = 1/(1 - self.scipy_dist.cdf(0))
def __repr__(self):
return 'flavio.statistics.probability.GammaDistributionPositive' + \
'({}, {}, {})'.format(self.a, self.loc, self.scale)
def get_random(self, size=None):
if size is None:
return self._get_random(size=size)
else:
# some iteration necessary as discarding negative values
# might lead to too small size
r = np.array([], dtype=float)
while len(r) < size:
r = np.concatenate((r, self._get_random(size=2*size)))
return r[:size]
def _get_random(self, size):
r = self.scipy_dist.rvs(size=size)
return r[(r >= 0)]
def cdf(self, x):
cdf0 = self.scipy_dist.cdf(0)
cdf = (self.scipy_dist.cdf(x) - cdf0)/(1-cdf0)
return np.piecewise(
np.asarray(x, dtype=float),
[x<0, x>=0],
[0., cdf]) # return 0 for negative x
def ppf(self, x):
cdf0 = self.scipy_dist.cdf(0)
return self.scipy_dist.ppf((1-cdf0)*x + cdf0)
def logpdf(self, x):
# return -inf for negative x values
inf0 = np.piecewise(np.asarray(x, dtype=float), [x<0, x>=0], [-np.inf, 0.])
return inf0 + self.scipy_dist.logpdf(x) + np.log(self._pdf_scale)
def _find_error_cdf(self, confidence_level):
# find the value of the CDF at the position of the left boundary
# of the `confidence_level`% CL range by demanding that the value
# of the PDF is the same at the two boundaries
def x_left(a):
return self.ppf(a)
def x_right(a):
return self.ppf(a + confidence_level)
def diff_logpdf(a):
logpdf_x_left = self.logpdf(x_left(a))
logpdf_x_right = self.logpdf(x_right(a))
return logpdf_x_left - logpdf_x_right
return scipy.optimize.brentq(diff_logpdf, 0, 1 - confidence_level-1e-6)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
if self.logpdf(0) > self.logpdf(self.ppf(confidence_level(nsigma))):
# look at a one-sided 1 sigma range. If the PDF at 0
# is smaller than the PDF at the boundary of this range, it means
# that the left-hand error is not meaningful to define.
return self.central_value
else:
a = self._find_error_cdf(confidence_level(nsigma))
return self.central_value - self.ppf(a)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
one_sided_error = self.ppf(confidence_level(nsigma))
if self.logpdf(0) > self.logpdf(one_sided_error):
# look at a one-sided 1 sigma range. If the PDF at 0
# is smaller than the PDF at the boundary of this range, return the
# boundary of the range as the right-hand error
return one_sided_error
else:
a = self._find_error_cdf(confidence_level(nsigma))
return self.ppf(a + confidence_level(nsigma)) - self.central_value
class GammaUpperLimit(GammaDistributionPositive):
r"""Gamma distribution with x restricted to be positive appropriate for
a positive quantitity obtained from a low-statistics counting experiment,
e.g. a rare decay rate, given an upper limit on x."""
def __init__(self, counts_total, counts_background, limit, confidence_level):
r"""Initialize the distribution.
Parameters:
- counts_total: observed total number (signal and background) of counts.
- counts_background: number of expected background counts, assumed to be
known.
- limit: upper limit on x, which is proportional (with a positive
proportionality factor) to the number of signal events.
- confidence_level: confidence level of the upper limit, i.e. the value
of the CDF at the limit. Float between 0 and 1. Frequently used values
are 0.90 and 0.95.
"""
if confidence_level > 1 or confidence_level < 0:
raise ValueError("Confidence level should be between 0 und 1")
if limit <= 0:
raise ValueError("The upper limit should be a positive number")
if counts_total < 0:
raise ValueError("counts_total should be a positive number or zero")
if counts_background < 0:
raise ValueError("counts_background should be a positive number or zero")
self.limit = limit
self.confidence_level = confidence_level
self.counts_total = counts_total
self.counts_background = counts_background
a, loc, scale = self._get_a_loc_scale()
super().__init__(a=a, loc=loc, scale=scale)
def __repr__(self):
return 'flavio.statistics.probability.GammaUpperLimit' + \
'({}, {}, {}, {})'.format(self.counts_total,
self.counts_background,
self.limit,
self.confidence_level)
def _get_a_loc_scale(self):
"""Convert the counts and limit to the input parameters needed for
GammaDistributionPositive"""
a = self.counts_total + 1
loc_unscaled = -self.counts_background
dist_unscaled = GammaDistributionPositive(a=a, loc=loc_unscaled, scale=1)
limit_unscaled = dist_unscaled.ppf(self.confidence_level)
# rescale
scale = self.limit/limit_unscaled
loc = -self.counts_background*scale
return a, loc, scale
class NumericalDistribution(ProbabilityDistribution):
"""Univariate distribution defined in terms of numerical values for the
PDF."""
def __init__(self, x, y, central_value=None):
"""Initialize a 1D numerical distribution.
Parameters:
- `x`: x-axis values. Must be a 1D array of real values in strictly
ascending order (but not necessarily evenly spaced)
- `y`: PDF values. Must be a 1D array of real positive values with the
same length as `x`
- central_value: if None (default), will be set to the mode of the
distribution, i.e. the x-value where y is largest (by looking up
the input arrays, i.e. without interpolation!)
"""
self.x = x
self.y = y
if central_value is not None:
if x[0] <= central_value <= x[-1]:
super().__init__(central_value=central_value,
support=(x[0], x[-1]))
else:
raise ValueError("Central value must be within range provided")
else:
mode = x[np.argmax(y)]
super().__init__(central_value=mode, support=(x[0], x[-1]))
self.y_norm = y / np.trapz(y, x=x) # normalize PDF to 1
self.y_norm[self.y_norm < 0] = 0
self.pdf_interp = interp1d(x, self.y_norm,
fill_value=0, bounds_error=False)
_cdf = np.zeros(len(x))
_cdf[1:] = np.cumsum(self.y_norm[:-1] * np.diff(x))
_cdf = _cdf/_cdf[-1] # normalize CDF to 1
self.ppf_interp = interp1d(_cdf, x)
self.cdf_interp = interp1d(x, _cdf)
def __repr__(self):
return 'flavio.statistics.probability.NumericalDistribution' + \
'({}, {})'.format(self.x, self.y)
def get_random(self, size=None):
"""Draw a random number from the distribution.
If size is not None but an integer N, return an array of N numbers."""
r = np.random.uniform(size=size)
return self.ppf_interp(r)
def ppf(self, x):
return self.ppf_interp(x)
def cdf(self, x):
return self.cdf_interp(x)
def pdf(self, x):
return self.pdf_interp(x)
def logpdf(self, x):
# ignore warning from log(0)=-np.inf
with np.errstate(divide='ignore', invalid='ignore'):
return np.log(self.pdf_interp(x))
def _find_error_cdf(self, confidence_level):
# find the value of the CDF at the position of the left boundary
# of the `confidence_level`% CL range by demanding that the value
# of the PDF is the same at the two boundaries
def x_left(a):
return self.ppf(a)
def x_right(a):
return self.ppf(a + confidence_level)
def diff_logpdf(a):
logpdf_x_left = self.logpdf(x_left(a))
logpdf_x_right = self.logpdf(x_right(a))
return logpdf_x_left - logpdf_x_right
return scipy.optimize.brentq(diff_logpdf, 0, 1 - confidence_level-1e-6)
def get_error_left(self, nsigma=1, method='central'):
"""Return the lower error.
'method' should be one of:
- 'central' for a central interval (same probability on both sides of
the central value)
- 'hpd' for highest posterior density, i.e. probability is larger inside
the interval than outside
- 'limit' for a one-sided error, i.e. a lower limit"""
if method == 'limit':
return self.central_value - self.ppf(1 - confidence_level(nsigma))
cdf_central = self.cdf(self.central_value)
err_left = self.central_value - self.ppf(cdf_central * (1 - confidence_level(nsigma)))
if method == 'central':
return err_left
elif method == 'hpd':
if self.pdf(self.central_value + self.get_error_right(method='central')) == self.pdf(self.central_value - err_left):
return err_left
try:
a = self._find_error_cdf(confidence_level(nsigma))
except ValueError:
return np.nan
return self.central_value - self.ppf(a)
else:
raise ValueError("Method " + str(method) + " unknown")
def get_error_right(self, nsigma=1, method='central'):
"""Return the upper error
'method' should be one of:
- 'central' for a central interval (same probability on both sides of
the central value)
- 'hpd' for highest posterior density, i.e. probability is larger inside
the interval than outside
- 'limit' for a one-sided error, i.e. an upper limit"""
if method == 'limit':
return self.ppf(confidence_level(nsigma)) - self.central_value
cdf_central = self.cdf(self.central_value)
err_right = self.ppf(cdf_central + (1 - cdf_central) * confidence_level(nsigma)) - self.central_value
if method == 'central':
return err_right
elif method == 'hpd':
if self.pdf(self.central_value - self.get_error_left(method='central')) == self.pdf(self.central_value + err_right):
return err_right
try:
a = self._find_error_cdf(confidence_level(nsigma))
except ValueError:
return np.nan
return self.ppf(a + confidence_level(nsigma)) - self.central_value
else:
raise ValueError("Method " + str(method) + " unknown")
@classmethod
def from_pd(cls, pd, nsteps=1000):
if isinstance(pd, NumericalDistribution):
return pd
_x = np.linspace(pd.support[0], pd.support[-1], nsteps)
_y = np.exp(pd.logpdf(_x))
return cls(central_value=pd.central_value, x=_x, y=_y)
class GeneralGammaDistributionPositive(NumericalDistribution):
r"""Distribution appropriate for
a positive quantitity obtained from a low-statistics counting experiment,
e.g. a rare decay rate.
The difference to `GammaUpperLimit` is that this class also allows to
specify an uncertainty on the number of background events. The result
is a numerical distribution obtained from the convolution of a normal
distribution (for the background uncertainty) and a gamma distribution,
restricted to positive values.
In contrast to `GammaUpperLimit`, the scale factor (the relational between
the observable of interest and the raw number of counts) is not determined
from a limit and a confidence level, but specified explicitly.
For the case of a limit, see `GeneralGammaUpperLimit`.
"""
def __init__(self,
scale_factor=1,
counts_total=None,
counts_background=None,
counts_signal=None,
background_variance=0):
r"""Initialize the distribution.
Parameters:
Parameters:
- `scale_factor`: scale factor by which the number of counts is multiplied
to get the observable of interest.
- `counts_total`: observed total number (signal and background) of counts.
- `counts_background`: expected mean number of expected background counts
- `counts_signal`: mean obseved number of signal events
- `background_variance`: standard deviation of the expected number of
background events
Of the three parameters `counts_total`, `counts_background`, and
`counts_signal`, only two must be specified. The third one will
be determined from the relation
`counts_total = counts_signal + counts_background`
Note that if `background_variance=0`, it makes more sense to use
`GammaUpperLimit`, which is equivalent but analytical rather than
numerical.
"""
if scale_factor <= 0:
raise ValueError("Scale factor should be positive")
self.scale_factor = scale_factor
if counts_total is not None and counts_total < 0:
raise ValueError("counts_total should be a positive number, zero, or None")
if counts_background is not None and counts_background <= 0:
raise ValueError("counts_background should be a positive number or None")
if background_variance < 0:
raise ValueError("background_variance should be a positive number")
if [counts_total, counts_signal, counts_background].count(None) == 0:
# if all three are specified, check the relation holds!
if counts_background != counts_total - counts_signal:
raise ValueError("The relation `counts_total = counts_signal + counts_background` is not satisfied")
if counts_background is None:
self.counts_background = counts_total - counts_signal
else:
self.counts_background = counts_background
if counts_signal is None:
self.counts_signal = counts_total - counts_background
else:
self.counts_signal = counts_signal
if counts_total is None:
self.counts_total = counts_signal + counts_background
else:
self.counts_total = counts_total
self.background_variance = background_variance
x, y = self._get_xy()
if self.counts_total != 0 and self.background_variance/self.counts_total <= 1/100.:
warnings.warn("For vanishing or very small background variance, "
"it is safer to use GammaUpperLimit instead of "
"GeneralGammaUpperLimit to avoid numerical "
"instability.")
super().__init__(x=x, y=y)
def __repr__(self):
return ('flavio.statistics.probability.GeneralGammaDistributionPositive'
'({}, counts_total={}, counts_signal={}, '
'background_variance={})').format(self.scale_factor,
self.counts_total,
self.counts_signal,
self.background_variance)
def _get_xy(self):
if self.background_variance == 0:
# this is a bit pointless as in this case it makes more
# sense to use GammaUpperLimit itself
gamma_unscaled = GammaDistributionPositive(a = self.counts_total + 1,
loc = -self.counts_background,
scale = 1)
num_unscaled = NumericalDistribution.from_pd(gamma_unscaled)
else:
# define a gamma distribution (with x>loc, not x>0!) and convolve
# it with a Gaussian
gamma_unscaled = GammaDistribution(a = self.counts_total + 1,
loc = -self.counts_background,
scale = 1)
norm_bg = NormalDistribution(0, self.background_variance)
num_unscaled = convolve_distributions([gamma_unscaled, norm_bg], central_values='sum')
# now that we have convolved, cut off anything below x=0
x = num_unscaled.x
y = num_unscaled.y_norm
y = y[np.where(x >= 0)]
x = x[np.where(x >= 0)]
if x[0] != 0: # make sure the PDF at 0 exists
x = np.insert(x, 0, 0.) # add 0 as first element
y = np.insert(y, 0, y[0]) # copy first element
y[0]
num_unscaled = NumericalDistribution(x, y)
x = x * self.scale_factor
return x, y
class GeneralGammaUpperLimit(GeneralGammaDistributionPositive):
r"""Distribution appropriate for
a positive quantitity obtained from a low-statistics counting experiment,
e.g. a rare decay rate, given an upper limit on x.
The difference to `GammaUpperLimit` is that this class also allows to
specify an uncertainty on the number of background events. The result
is a numerical distribution obtained from the convolution of a normal
distribution (for the background uncertainty) and a gamma distribution,
restricted to positive values.
The only difference to `GeneralGammaDistributionPositive` is that the scale
factor is determined from the limit and confidence level.
"""
def __init__(self,
limit, confidence_level,
counts_total=None,
counts_background=None,
counts_signal=None,
background_variance=0):
r"""Initialize the distribution.
Parameters:
Parameters:
- `limit`: upper limit on x, which is proportional (with a positive
proportionality factor) to the number of signal events.
- `confidence_level`: confidence level of the upper limit, i.e. the value
of the CDF at the limit. Float between 0 and 1. Frequently used values
are 0.90 and 0.95.
- `counts_total`: observed total number (signal and background) of counts.
- `counts_background`: expected mean number of expected background counts
- `counts_signal`: mean obseved number of signal events
- `background_variance`: standard deviation of the expected number of
background events
Of the three parameters `counts_total`, `counts_background`, and
`counts_signal`, only two must be specified. The third one will
be determined from the relation
`counts_total = counts_signal + counts_background`
Note that if `background_variance=0`, it makes more sense to use
`GammaUpperLimit`, which is equivalent but analytical rather than
numerical.
"""
self.limit = limit
self.confidence_level = confidence_level
_d_unscaled = GeneralGammaDistributionPositive(
scale_factor=1,
counts_total=counts_total,
counts_background=counts_background,
counts_signal=counts_signal,
background_variance=background_variance)
limit_unscaled = _d_unscaled.ppf(self.confidence_level)
# use the value of the limit to determine the scale factor
scale_factor = self.limit / limit_unscaled
super().__init__(
scale_factor=scale_factor,
counts_total=counts_total,
counts_background=counts_background,
counts_signal=counts_signal,
background_variance=background_variance)
def __repr__(self):
return ('flavio.statistics.probability.GeneralGammaUpperLimit'
'({}, {}, counts_total={}, counts_signal={}, '
'background_variance={})').format(self.limit,
self.confidence_level,
self.counts_total,
self.counts_signal,
self.background_variance)
class KernelDensityEstimate(NumericalDistribution):
"""Univariate kernel density estimate.
Parameters:
- `data`: 1D array
- `kernel`: instance of `ProbabilityDistribution` used as smoothing kernel
- `n_bins` (optional): number of bins used in the intermediate step. This normally
does not have to be changed.
"""
def __init__(self, data, kernel, n_bins=None):
self.data = data
assert kernel.central_value == 0, "Kernel density must have zero central value"
self.kernel = kernel
self.n = len(data)
if n_bins is None:
self.n_bins = min(1000, self.n)
else:
self.n_bins = n_bins
y, x_edges = np.histogram(data, bins=self.n_bins, density=True)
x = (x_edges[:-1] + x_edges[1:])/2.
self.y_raw = y
self.raw_dist = NumericalDistribution(x, y)
cdist = convolve_distributions([self.raw_dist, self.kernel], 'sum')
super().__init__(cdist.x, cdist.y)
def __repr__(self):
return 'flavio.statistics.probability.KernelDensityEstimate' + \
'({}, {}, {})'.format(self.data, repr(self.kernel), self.n_bins)
class GaussianKDE(KernelDensityEstimate):
"""Univariate Gaussian kernel density estimate.
Parameters:
- `data`: 1D array
- `bandwidth` (optional): standard deviation of the Gaussian smoothing kernel.
If not provided, Scott's rule is used to estimate it.
- `n_bins` (optional): number of bins used in the intermediate step. This normally
does not have to be changed.
"""
def __init__(self, data, bandwidth=None, n_bins=None):
if bandwidth is None:
self.bandwidth = len(data)**(-1/5.) * np.std(data)
else:
self.bandwidth = bandwidth
super().__init__(data=data,
kernel = NormalDistribution(0, self.bandwidth),
n_bins=n_bins)
def __repr__(self):
return 'flavio.statistics.probability.GaussianKDE' + \
'({}, {}, {})'.format(self.data, self.bandwidth, self.n_bins)
class MultivariateNormalDistribution(ProbabilityDistribution):
"""A multivariate normal distribution.
Parameters:
- central_value: the location vector
- covariance: the covariance matrix
- standard_deviation: the square root of the variance vector
- correlation: the correlation matrix
If the covariance matrix is not specified, standard_deviation and the
correlation matrix have to be specified.
Methods:
- get_random(size=None): get `size` random numbers (default: a single one)
- logpdf(x, exclude=None): get the logarithm of the probability density
function. If an iterable of integers is given for `exclude`, the parameters
at these positions will be removed from the covariance before evaluating
the PDF, effectively ignoring certain dimensions.
Properties:
- error_left, error_right: both return the vector of standard deviations
"""
def __init__(self, central_value, covariance=None,
standard_deviation=None, correlation=None):
"""Initialize PDF instance.
Parameters:
- central_value: vector of means, shape (n)
- covariance: covariance matrix, shape (n,n)
"""
if covariance is not None:
self.covariance = covariance
self.standard_deviation = np.sqrt(np.diag(self.covariance))
self.correlation = self.covariance/np.outer(self.standard_deviation,
self.standard_deviation)
np.fill_diagonal(self.correlation, 1.)
else:
if standard_deviation is None:
raise ValueError("You must specify either covariance or standard_deviation")
self.standard_deviation = np.array(standard_deviation)
if correlation is None:
self.correlation = np.eye(len(self.standard_deviation))
else:
if isinstance(correlation, (int, float)):
# if it's a number, return delta_ij + (1-delta_ij)*x
n_dim = len(central_value)
self.correlation = np.eye(n_dim) + (np.ones((n_dim, n_dim))-np.eye(n_dim))*float(correlation)
else:
self.correlation = np.array(correlation)
self.covariance = np.outer(self.standard_deviation,
self.standard_deviation)*self.correlation
super().__init__(central_value, support=np.array([
np.asarray(central_value) - 6*self.standard_deviation,
np.asarray(central_value) + 6*self.standard_deviation
]))
# to avoid ill-conditioned covariance matrices, all data are rescaled
# by the inverse variances
self.err = np.sqrt(np.diag(self.covariance))
self.scaled_covariance = self.covariance / np.outer(self.err, self.err)
assert np.all(np.linalg.eigvals(self.scaled_covariance) >
0), "The covariance matrix is not positive definite!" + str(covariance)
def __repr__(self):
return 'flavio.statistics.probability.MultivariateNormalDistribution' + \
'({}, {})'.format(self.central_value, self.covariance)
def get_random(self, size=None):
"""Get `size` random numbers (default: a single one)"""
return np.random.multivariate_normal(self.central_value, self.covariance, size)
def reduce_dimension(self, exclude=None):
"""Return a different instance where certain dimensions, specified by
the iterable of integers `exclude`, are removed from the covariance.
If `exclude` contains all indices but one, an instance of
`NormalDistribution` will be returned.
"""
if not exclude:
return self
# if parameters are to be excluded, construct a
# distribution with reduced mean vector and covariance matrix
_cent_ex = np.delete(self.central_value, exclude)
_cov_ex = np.delete(
np.delete(self.covariance, exclude, axis=0), exclude, axis=1)
if len(_cent_ex) == 1:
# if only 1 dimension remains, can use a univariate Gaussian
_dist_ex = NormalDistribution(
central_value=_cent_ex[0], standard_deviation=np.sqrt(_cov_ex[0, 0]))
else:
# if more than 1 dimension remains, use a (smaller)
# multivariate Gaussian
_dist_ex = MultivariateNormalDistribution(
central_value=_cent_ex, covariance=_cov_ex)
return _dist_ex
def logpdf(self, x, exclude=None):
"""Get the logarithm of the probability density function.
Parameters:
- x: vector; position at which PDF should be evaluated
- exclude: optional; if an iterable of integers is given, the parameters
at these positions will be removed from the covariance before
evaluating the PDF, effectively ignoring certain dimensions.
"""
if exclude is not None:
# if parameters are to be excluded, construct a temporary
# distribution with reduced mean vector and covariance matrix
# and call its logpdf method
_dist_ex = self.reduce_dimension(exclude=exclude)
return _dist_ex.logpdf(x)
# undoing the rescaling of the covariance
pdf_scaled = scipy.stats.multivariate_normal.logpdf(
x / self.err, self.central_value / self.err, self.scaled_covariance)
sign, logdet = np.linalg.slogdet(self.covariance)
return pdf_scaled + (np.linalg.slogdet(self.scaled_covariance)[1] - np.linalg.slogdet(self.covariance)[1]) / 2.
def get_error_left(self, nsigma=1):
"""Return the lower errors"""
return nsigma * self.err
def get_error_right(self, nsigma=1):
"""Return the upper errors"""
return nsigma * self.err
def get_cov_mat(self):
"""Return the covariance matrix"""
return self.covariance
class MultivariateNumericalDistribution(ProbabilityDistribution):
"""A multivariate distribution with PDF specified numerically."""
def __init__(self, xi, y, central_value=None):
"""Initialize a multivariate numerical distribution.
Parameters:
- `xi`: for an N-dimensional distribution, a list of N 1D arrays
specifiying the grid in N dimensions. The 1D arrays must contain
real, evenly spaced values in strictly ascending order (but the
spacing can be different for different dimensions). Any of the 1D
arrays can also be given alternatively as a list of two numbers, which
will be assumed to be the upper and lower boundaries, while the
spacing will be determined from the shape of `y`.
- `y`: PDF values on the grid defined by the `xi`. If the N `xi` have
length M1, ..., MN, `y` has dimension (M1, ..., MN). This is the same
shape as the grid obtained from `numpy.meshgrid(*xi, indexing='ij')`.
- central_value: if None (default), will be set to the mode of the
distribution, i.e. the N-dimensional xi-vector where y is largest
(by looking up the input arrays, i.e. without interpolation!)
"""
for x in xi:
# check that grid spacings are even up to per mille precision
d = np.diff(x)
if abs(np.min(d)/np.max(d)-1) > 1e-3:
raise ValueError("Grid must be evenly spaced per dimension")
self.xi = [np.asarray(x) for x in xi]
self.y = np.asarray(y)
for i, x in enumerate(xi):
if len(x) == 2:
self.xi[i] = np.linspace(x[0], x[1], self.y.shape[i])
if central_value is not None:
super().__init__(central_value=central_value,
support=(np.asarray(self.xi).T[0], np.asarray(self.xi).T[-1]))
else:
# if no central value is specified, set it to the mode
mode_index = (slice(None),) + np.unravel_index(self.y.argmax(), self.y.shape)
mode = np.asarray(np.meshgrid(*self.xi, indexing='ij'))[mode_index]
super().__init__(central_value=mode, support=None)
_bin_volume = np.prod([x[1] - x[0] for x in self.xi])
self.y_norm = self.y / np.sum(self.y) / _bin_volume # normalize PDF to 1
# ignore warning from log(0)=-np.inf
with np.errstate(divide='ignore', invalid='ignore'):
# logy = np.nan_to_num(np.log(self.y_norm))
logy = np.log(self.y_norm)
logy[np.isneginf(logy)] = -1e100
self.logpdf_interp = RegularGridInterpolator(self.xi, logy,
fill_value=-np.inf, bounds_error=False)
# the following is needed for get_random: initialize to None
self._y_flat = None
self._cdf_flat = None
def __repr__(self):
return 'flavio.statistics.probability.MultivariateNumericalDistribution' + \
'({}, {}, {})'.format([x.tolist() for x in self.xi], self.y.tolist(), list(self.central_value))
def get_random(self, size=None):
"""Draw a random number from the distribution.
If size is not None but an integer N, return an array of N numbers.
For the MultivariateNumericalDistribution, the PDF from which the
random numbers are drawn is approximated to be piecewise constant in
hypercubes around the points of the lattice spanned by the `xi`. A finer
lattice spacing will lead to a smoother distribution of random numbers
(but will also be slower).
"""
if size is None:
return self._get_random()
else:
return np.array([self._get_random() for i in range(size)])
def _get_random(self):
# if these have not been initialized, do it (once)
if self._y_flat is None:
# get a flattened array of the PDF
self._y_flat = self.y.flatten()
if self._cdf_flat is None:
# get the (discrete) 1D CDF
_cdf_flat = np.cumsum(self._y_flat)
# normalize to 1
self._cdf_flat = _cdf_flat/_cdf_flat[-1]
# draw a number between 0 and 1
r = np.random.uniform()
# find the index of the CDF-value closest to r
i_r = np.argmin(np.abs(self._cdf_flat-r))
indices = np.where(self.y == self._y_flat[i_r])
i_bla = np.random.choice(len(indices[0]))
index = tuple([a[i_bla] for a in indices])
xi_r = [ self.xi[i][index[i]] for i in range(len(self.xi)) ]
xi_diff = np.array([ X[1]-X[0] for X in self.xi ])
return xi_r + np.random.uniform(low=-0.5, high=0.5, size=len(self.xi)) * xi_diff
def reduce_dimension(self, exclude=None):
"""Return a different instance where certain dimensions, specified by
the iterable of integers `exclude`, are removed from the covariance.
If `exclude` contains all indices but one, an instance of
`NumericalDistribution` will be returned.
"""
if not exclude:
return self
# if parameters are to be excluded, construct a
# distribution with reduced mean vector and covariance matrix
try:
exclude = tuple(exclude)
except TypeError:
exclude = (exclude,)
xi = np.delete(self.xi, tuple(exclude), axis=0)
y = np.amax(self.y_norm, axis=tuple(exclude))
cv = np.delete(self.central_value, tuple(exclude))
if len(xi) == 1:
# if there is just 1 dimension left, use univariate
dist = NumericalDistribution(xi[0], y, cv)
else:
dist = MultivariateNumericalDistribution(xi, y, cv)
return dist
def logpdf(self, x, exclude=None):
"""Get the logarithm of the probability density function.
Parameters:
- x: vector; position at which PDF should be evaluated
- exclude: optional; if an iterable of integers is given, the parameters
at these positions will be ignored by maximizing the likelihood
along the remaining directions, i.e., they will be "profiled out".
"""
if exclude is not None:
# if parameters are to be excluded, construct a temporary
# distribution with reduced mean vector and covariance matrix
# and call its logpdf method
dist = self.reduce_dimension(exclude=exclude)
return dist.logpdf(x)
if np.asarray(x).shape == (len(self.central_value),):
# return a scalar
return self.logpdf_interp(x)[0]
else:
return self.logpdf_interp(x)
def get_error_left(self, *args, **kwargs):
raise NotImplementedError(
"1D errors not implemented for multivariate numerical distributions")
def get_error_right(self, *args, **kwargs):
raise NotImplementedError(
"1D errors not implemented for multivariate numerical distributions")
@classmethod
def from_pd(cls, pd, nsteps=100):
if isinstance(pd, cls):
# nothing to do
return pd
_xi = np.array([np.linspace(pd.support[0][i], pd.support[-1][i], nsteps)
for i in range(len(pd.central_value))])
ndim = len(_xi)
_xlist = np.array(np.meshgrid(*_xi, indexing='ij')).reshape(ndim, nsteps**ndim).T
_ylist = np.exp(pd.logpdf(_xlist))
_y = _ylist.reshape(tuple(nsteps for i in range(ndim)))
return cls(central_value=pd.central_value, xi=_xi, y=_y)
# Auxiliary functions
def convolve_distributions(probability_distributions, central_values='same'):
"""Combine a set of probability distributions by convoluting the PDFs.
This function can be used in two different ways:
- for `central_values='same'`, it can be used to combine uncertainties on a
single parameter/observable expressed in terms of probability distributions
with the same central value.
- for `central_values='sum'`, it can be used to determine the probability
distribution of a sum of random variables.
The only difference between the two cases is a shift: for 'same', the
central value of the convolution is the same as the original central value,
for 'sum', it is the sum of the individual central values.
`probability_distributions` must be a list of instances of descendants of
`ProbabilityDistribution`.
"""
if central_values not in ['same', 'sum']:
raise ValueError("central_values must be either 'same' or 'sum'")
def dim(x):
# 1 for floats and length for arrays
try:
float(x)
except:
return len(x)
else:
return 1
dims = [dim(p.central_value) for p in probability_distributions]
assert all([d == dims[0] for d in dims]), "All distributions must have the same number of dimensions"
if dims[0] == 1:
return _convolve_distributions_univariate(probability_distributions, central_values)
else:
return _convolve_distributions_multivariate(probability_distributions, central_values)
def _convolve_distributions_univariate(probability_distributions, central_values='same'):
"""Combine a set of univariate probability distributions."""
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
if central_values == 'same':
central_value = probability_distributions[0].central_value
assert all(p.central_value == central_value for p in probability_distributions), \
"Distributions must all have the same central value"
# all delta dists
deltas = [p for p in probability_distributions if isinstance(
p, DeltaDistribution)]
if central_values == 'sum' and deltas:
raise NotImplementedError("Convolution of DeltaDistributions only implemented for equal central values")
# central_values is 'same', we can instead just ignore the delta distributions!
# all normal dists
gaussians = [p for p in probability_distributions if isinstance(
p, NormalDistribution)]
# all other univariate dists
others = [p for p in probability_distributions
if not isinstance(p, NormalDistribution)
and not isinstance(p, DeltaDistribution)]
if not others and not gaussians:
# if there is only a delta (or more than one), just return it
if central_values == 'same':
return deltas[0]
elif central_values == 'same':
return DeltaDistribution(sum([p.central_value for p in deltas]))
# let's combine the normal distributions into 1
if gaussians:
gaussian = _convolve_gaussians(gaussians, central_values=central_values)
if gaussians and not others:
# if there are only the gaussians, we are done.
return gaussian
else:
# otherwise, we need to combine the (combined) gaussian with the others
if gaussians:
to_be_combined = others + [gaussian]
else:
to_be_combined = others
# turn all distributions into numerical distributions!
numerical = [NumericalDistribution.from_pd(p) for p in to_be_combined]
return _convolve_numerical(numerical, central_values=central_values)
def _convolve_distributions_multivariate(probability_distributions, central_values='same'):
"""Combine a set of multivariate probability distributions."""
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
if central_values == 'same':
central_value = probability_distributions[0].central_value
assert all(p.central_value[i] == central_value[i] for p in probability_distributions for i in range(len(central_value))), \
"Distributions must all have the same central value"
for p in probability_distributions:
if not ( isinstance(p, MultivariateNormalDistribution)
or isinstance(p, MultivariateNumericalDistribution) ):
raise ValueError("Multivariate convolution only implemented "
"for normal and numerical distributions")
# all normal dists
gaussians = [p for p in probability_distributions if isinstance(
p, MultivariateNormalDistribution)]
# all numerical dists
others = [p for p in probability_distributions if isinstance(
p, MultivariateNumericalDistribution)]
# let's combine the normal distributions into 1
if gaussians:
gaussian = _convolve_multivariate_gaussians(gaussians,
central_values=central_values)
if gaussians and not others:
# if there are only the gaussians, we are done.
return gaussian
else:
# otherwise, we need to combine the (combined) gaussian with the others
if len(others) > 1:
NotImplementedError("Combining multivariate numerical distributions not implemented")
else:
num = _convolve_multivariate_gaussian_numerical(gaussian, others[0],
central_values=central_values)
return num
def _convolve_gaussians(probability_distributions, central_values='same'):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
assert all(isinstance(p, NormalDistribution) for p in probability_distributions), \
"Distributions should all be instances of NormalDistribution"
if central_values == 'same':
central_value = probability_distributions[0].central_value # central value of the first dist
assert all(p.central_value == central_value for p in probability_distributions), \
"Distrubtions must all have the same central value"
elif central_values == 'sum':
central_value = sum([p.central_value for p in probability_distributions])
sigmas = np.array(
[p.standard_deviation for p in probability_distributions])
sigma = math.sqrt(np.sum(sigmas**2))
return NormalDistribution(central_value=central_value, standard_deviation=sigma)
def _convolve_multivariate_gaussians(probability_distributions, central_values='same'):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
assert all(isinstance(p, MultivariateNormalDistribution) for p in probability_distributions), \
"Distributions should all be instances of MultivariateNormalDistribution"
if central_values == 'same':
central_value = probability_distributions[0].central_value # central value of the first dist
assert all(p.central_value == central_value for p in probability_distributions), \
"Distrubtions must all have the same central value"
elif central_values == 'sum':
central_value = np.sum([p.central_value for p in probability_distributions], axis=0)
cov = np.sum([p.covariance for p in probability_distributions], axis=0)
return MultivariateNormalDistribution(central_value=central_value, covariance=cov)
def _convolve_numerical(probability_distributions, nsteps=10000, central_values='same'):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
assert all(isinstance(p, NumericalDistribution) for p in probability_distributions), \
"Distributions should all be instances of NumericalDistribution"
if central_values == 'same':
central_value = probability_distributions[0].central_value # central value of the first dist
assert all(p.central_value == central_value for p in probability_distributions), \
"Distrubtions must all have the same central value"
elif central_values == 'sum':
central_value = sum([p.central_value for p in probability_distributions])
# differences of individual central values from combined central value
central_diffs = [central_value - p.central_value for p in probability_distributions]
# (shifted appropriately)
supports = (np.array([p.support for p in probability_distributions]).T + central_diffs).T
support = (central_value - (central_value - supports[:, 0]).sum(),
central_value - (central_value - supports[:, 1]).sum())
delta = (support[1] - support[0]) / (nsteps - 1)
x = np.linspace(support[0], support[1], nsteps)
# position of the central value
n_x_central = math.floor((central_value - support[0]) / delta)
y = None
for i, pd in enumerate(probability_distributions):
y1 = np.exp(pd.logpdf(x - central_diffs[i])) * delta
if y is None:
# first step
y = y1
else:
# convolution
y = scipy.signal.fftconvolve(y, y1, 'full')
# cut out the convolved signal at the right place
y = y[n_x_central:nsteps + n_x_central]
return NumericalDistribution(central_value=central_value, x=x, y=y)
def _convolve_multivariate_gaussian_numerical(mvgaussian,
mvnumerical,
central_values='same'):
assert isinstance(mvgaussian, MultivariateNormalDistribution), \
"mvgaussian must be a single instance of MultivariateNormalDistribution"
assert isinstance(mvnumerical, MultivariateNumericalDistribution), \
"mvgaussian must be a single instance of MultivariateNumericalDistribution"
nsteps = max(200, *[len(x) for x in mvnumerical.xi])
xi = np.zeros((len(mvnumerical.xi), nsteps))
for i, x in enumerate(mvnumerical.xi):
# enlarge the support
cvn = mvnumerical.central_value[i]
cvg = mvgaussian.central_value[i]
supp = [s[i] for s in mvgaussian.support]
x_max = cvn + (x[-1] - cvn) + (supp[-1] - cvn) + np.mean(x) - cvg
x_min = cvn + (x[0] - cvn) + (supp[0] - cvn) + np.mean(x) - cvg
xi[i] = np.linspace(x_min, x_max, nsteps)
xi_grid = np.array(np.meshgrid(*xi, indexing='ij'))
# this will transpose from shape (0, 1, 2, ...) to (1, 2, ..., 0)
xi_grid = np.transpose(xi_grid, tuple(range(1, xi_grid.ndim)) + (0,))
y_num = np.exp(mvnumerical.logpdf(xi_grid))
# shift Gaussian to the mean of the support
xi_grid = xi_grid - np.array([np.mean(x) for x in xi]) + np.array(mvgaussian.central_value)
y_gauss = np.exp(mvgaussian.logpdf(xi_grid))
f = scipy.signal.fftconvolve(y_num, y_gauss, mode='same')
f[f < 0] = 0
f = f/f.sum()
if central_values == 'sum':
# shift back
xi = (xi.T + np.array(mvgaussian.central_value)).T
return MultivariateNumericalDistribution(xi, f)
def combine_distributions(probability_distributions):
"""Combine a set of probability distributions by multiplying the PDFs.
`probability_distributions` must be a list of instances of descendants of
`ProbabilityDistribution`.
"""
def dim(x):
# 1 for floats and length for arrays
try:
float(x)
except:
return len(x)
else:
return 1
dims = [dim(p.central_value) for p in probability_distributions]
assert all([d == dims[0] for d in dims]), "All distributions must have the same number of dimensions"
if dims[0] == 1:
return _combine_distributions_univariate(probability_distributions)
else:
return _combine_distributions_multivariate(probability_distributions)
def _combine_distributions_univariate(probability_distributions):
# if there's just one: return it immediately
if len(probability_distributions) == 1:
return probability_distributions[0]
# all delta dists
deltas = [p for p in probability_distributions if isinstance(
p, DeltaDistribution)]
if len(deltas) > 1:
# for multiple delta dists, check if central values are the same
cvs = set([p.central_value for p in deltas])
if len(cvs) > 1:
raise ValueError("Combining multiple delta distributions with different central values yields zero PDF")
else:
return deltas[0]
elif len(deltas) == 1:
# for single delta dist, nothing to combine: delta always wins!
return deltas[0]
# all normal dists
gaussians = [p for p in probability_distributions if isinstance(
p, NormalDistribution)]
# all other univariate dists
others = [p for p in probability_distributions
if not isinstance(p, NormalDistribution)
and not isinstance(p, DeltaDistribution)]
# let's combine the normal distributions into 1
if gaussians:
gaussian = _combine_gaussians(gaussians)
if gaussians and not others:
# if there are only the gaussians, we are done.
return gaussian
else:
# otherwise, we need to combine the (combined) gaussian with the others
if gaussians:
to_be_combined = others + [gaussian]
else:
to_be_combined = others
# turn all distributions into numerical distributions!
numerical = [NumericalDistribution.from_pd(p) for p in to_be_combined]
return _combine_numerical(numerical)
def weighted_average(central_values, standard_deviations):
"""Return the central value and standard deviation of the weighted average
if a set of normal distributions specified by a list of central values
and standard deviations"""
c = np.average(central_values, weights=1 / | np.asarray(standard_deviations) | numpy.asarray |
import copy
import warnings
import pandas as pd
import numpy as np
from scipy.optimize import fmin
import os
CWD = os.path.dirname(os.path.abspath(__file__))
class FFD(object):
"""Flare frequency distribution.
alpha and beta refer to a power law that
can be used to model the FFD.
dN/dE = beta * E^(-alpha)
N - number of flares
E - energy or equivalent duration
Attributes:
-----------
f : DataFrame
flare table in the FlareLightCurve.flares format
with extra columns for flare target identifiers
alpha : float
power law exponent
alpha_err : float
power law exponent uncertainty
beta : float
power law intercept
beta_err : float
power law intercept uncertainty
tot_obs_time: float
total observing time during which
the flares in f were detected
ID : str
column name in f for the flare target identifier
ed : array
EDs in cumulative FFD, sorted
freq : array
frequencies of EDs in cumulative FFD, sorted like ed
count_ed : array
frequency adjusted ed sample
multiple_stars : bool
True when ed_and_freq was called with multiple_stars
flag set
"""
def __init__(self, f=None, alpha=None, alpha_err=None,
beta=None, beta_err=None, tot_obs_time=1.,
ID=None, multiple_stars=False):
self.f = f
self.alpha = alpha
self.alpha_err = alpha_err
self.beta = beta
self.beta_err = beta_err
self.tot_obs_time = tot_obs_time
self._ed = None
self._freq = None
self._count_ed = None
self.ID = ID
self._multiple_stars = multiple_stars
# Set all the setters and getters for attributes
# that only methods should be allowed to change:
@property
def multiple_stars(self):
return self._multiple_stars
@multiple_stars.setter
def multiple_stars(self, multiple_stars):
print(f"Setting multiple_stars flag with {multiple_stars}.")
self._multiple_stars = multiple_stars
@property
def ed(self):
return self._ed
@ed.setter
def ed(self, ed):
print(f"Setting ED with new values, size {len(ed)}.")
self._ed = ed
@property
def freq(self):
return self._freq
@freq.setter
def freq(self, freq):
print(f"Setting frequency values with new values, size {len(freq)}.")
self._freq = freq
@property
def count_ed(self):
return self._count_ed
@count_ed.setter
def count_ed(self, count_ed):
print(f"Setting frequency adjusted count values "
f"with new values, size {len(count_ed)}.")
self._count_ed = count_ed
# -----------------------------------------------------------------------
def ed_and_freq(self, energy_correction=False,
recovery_probability_correction=False,
multiple_stars=False):
"""Take the flare table and return the FFD with
different or no corrections. tot_obs_time is used to
convert counts to frequencies and defines its unit.
Parameters:
------------
energy_correction: bool, default False
use ed_corr instead of ed_rec
recovery_probability_correction: bool, default False
multiply inverse recovery probabilities instead
of assuming the recovery probability was 1
multiple_stars: bool, default False
apply a first order approximation to account
for the effects of stacking FFDs of stars with
different detection thresholds
Return:
-------
ed, freq, count_ed - equivalent durations and corresponding
cumulative frequencies, and frequency
adjusted event sample. See `_ed_and_counts`
method for details.
"""
# Convert human readable cases to keywords
if ((energy_correction is False) &
(recovery_probability_correction is False)):
key = "no_corr"
elif ((energy_correction is True) &
(recovery_probability_correction is False)):
key = "ed_corr"
elif ((energy_correction is True) &
(recovery_probability_correction is True)):
key = "edrecprob_corr"
else:
raise KeyError("This set of parameters for energy "
"correction, recovery probability "
"correction is not implemented. You must"
" set energy_correction=True if you wish to "
"set recovery_probability_correction=True.")
return self._ed_and_counts(key, multiple_stars)
def _ed_and_counts(self, key, multiple_stars):
"""Sub function to ed_and_func.
Parameters:
------------
key : str
defines type of correction to apply to FFD
multiple_stars: bool
if True will use a first order approximation to
account for stacking FFDs of multiple stars
Return:
-------
ed, freq, count_ed - equivalent durations and corresponding
cumulative frequencies, and frequency
adjusted event sample
"""
# df, ID, col are flare table, identifier column name in df,
# and column name for the ED array in df in each of the
# functions below.
# Each function return two arrays: sorted flare EDs or energies,
# and their respective frequencies.
def cum_dist(df, col, ID):
"""simple cumulative distribution."""
return (np.arange(1, df[col].shape[0] + 1, 1) / self.tot_obs_time,
np.ones_like(df[col].values))
def get_msf_cum_dist(df, col, ID):
"""simple cumulative distribution
accounting for multiple stars with different
detection thresholds in FFDs"""
freq = _get_multistar_factors(df, ID, col)
self.multiple_stars = True
return (np.cumsum(1 / freq) / self.tot_obs_time,
1 / freq)
def cum_dist_rec_prob(df, col, ID):
"""cumulative distribution accounting for
recovery probabilities of individual flares"""
freq = (np.cumsum(1. / df.recovery_probability.values) /
self.tot_obs_time)
return freq, 1. / df.recovery_probability.values
def get_msf_cumdist_recprob(df, col, ID):
"""cumulative distribution accounting for
recovery probabilities of individual flares
and multiple stars with different detection
thresholds in FFDs"""
freq_ = _get_multistar_factors(df, ID, col)
self.multiple_stars = True
cfreq = (np.cumsum(1. / df.recovery_probability.values / freq_) /
self.tot_obs_time)
return cfreq, 1. / df.recovery_probability.values / freq_
# Different keys call different correction procedures
vals = {"no_corr": {False: ["ed_rec", cum_dist],
True: ["ed_rec", get_msf_cum_dist]},
"ed_corr": {False: ["ed_corr", cum_dist],
True: ["ed_corr", get_msf_cum_dist]},
"edrecprob_corr": {False: ["ed_corr", cum_dist_rec_prob],
True: ["ed_corr", get_msf_cumdist_recprob]}
}
# make a copy to sort safely without affecting self.f
df = self.f.copy(deep=True)
# retrieve ED type (corrected or not), and function for counts
col, func = vals[key][multiple_stars]
df = df.sort_values(by=col, ascending=False)
ed = df[col].values # get the right EDs
# get the (corrected) flare counts
freq, counts = func(df, col, self.ID)
self.ed = ed
self.freq = freq
self.count_ed = _get_frequency_corrected_ed_sample(ed, counts)
return self.ed, self.freq, self.count_ed
def fit_beta_to_powerlaw(self, mode="ED"):
'''Fit beta via non-linear least squares to a power
law with given alpha using the cumulative
FFD. Generate uncertainty using jackknife algorithm.
Parameters:
-----------
mode : str
ED or energy will set the starting value for the
least square minimization
Return:
-------
_beta, beta, beta_err - array, float, float
jackknife sample of beta values, mean beta, beta uncertainty
'''
def LSQ(x0, ed, freq, alpha):
zw = ((x0 /
(np.power(ed, alpha - 1.) * (alpha - 1.)) - freq)**2).sum()
return np.sqrt(zw)
N = len(self.ed)
if N == 0:
raise ValueError('No data.')
# jackknife uncertainty
x0starts = {'ED': 10, 'energy': 1e25}
_beta = np.array([fmin(LSQ, x0=x0starts[mode],
args=(np.delete(self.ed, i),
np.delete(self.freq, i),
self.alpha),
disp=0)[0] for i in range(N)])
# cumulative beta = beta_cum
beta = _beta.mean()
beta_err = np.sqrt((N - 1) / N * ((_beta - beta)**2).sum())
# propagate errors on alpha to beta
beta_err = (np.sqrt(beta_err**2 * (self.alpha - 1.)**2 +
beta**2 * self.alpha_err**2))
# set attributes
self.beta = beta
self.beta_err = beta_err
return _beta, self.beta, self.beta_err
def plot_powerlaw(self, ax, custom_xlim=None, **kwargs):
'''
Plot the power law fit to the FFD. [No tests]
Parameters:
-----------
ax : matplotlibe Axes object
plot to insert the power law in to
custom_xlim : 2-tuple
minimum, maximum ED/energy value for power law
kwargs : dict
Keyword arguments to pass to plt.plot()
Return:
--------
3 power law points to construct a line
in log-log representation.
'''
if custom_xlim is None:
x = np.linspace(np.nanmin(self.ed), np.nanmax(self.ed), 3)
else:
mi, ma = custom_xlim
x = np.linspace(mi, ma, 3)
y = self.beta / np.abs(self.alpha - 1.) * np.power(x, -self.alpha + 1.)
a = ax.plot(x, y, **kwargs)
return a, x, y
def fit_powerlaw(self, alims=[1.01, 3.]):
'''
Calculate the un-biased ML power law estimator
from Maschberger and Kroupa (2009), sections
3.1.4. and 3.1.5. by simply minimizing the equation in
ML_powerlaw_estimator.
Parameters:
------------
alims:
parameter range for power law exponent
Return:
-------
alpha, alpha_err - float, float
power law exponent and its jackknife uncertainty
'''
# use frequency adjusted ED sample?
ed = self._get_ed()
# solve eq. 9 using scipy.fmin, define jacknife uncertainty
N = len(ed)
_alpha = np.array([fmin(_ML_powerlaw_estimator, x0=2.,
args=(np.delete(ed, i),), disp=0)[0]
for i in range(N)])
# alpha is the mean value
alpha = _alpha.mean()
# uncertainty is the standard deviation
sig_alpha = np.sqrt((N - 1) / N * ((_alpha - alpha)**2).sum())
self.alpha = alpha
self.alpha_err = sig_alpha
return self.alpha, self.alpha_err
def is_powerlaw_truncated(self, rejection=(.15, .05), nthresh=100):
'''
Apply the exceedance test recommended by
Maschberger and Kroupa 2009.
Parameters:
------------
rejection : tuple of floats < 1.
above these thresholds the distribution
can be suspected to be truncated
nthresh : int
Number at which to use the more permissive
or more restrictive truncation rejection
limit, i.e. value 0 or 1 in `rejection`
Return:
---------
True if power law not consistent with an un-truncated power law
False if power law is consitent with an un-truncated power law
'''
ed = self._get_ed()
mean, std = _calculate_average_number_of_exceeding_values(ed,
self.alpha,
500)
if self.alpha > 2.:
warnings.warn('Power law exponent is steep. '
'Power of statistical tests decreases '
'according to Maschberger and Kroupa 2009.')
if len(ed) >= nthresh:
truncation_limit = rejection[1]
else:
truncation_limit = rejection[0]
truncated = (mean / len(ed) > truncation_limit)
return truncated
def is_powerlaw(self, sig_level=0.05):
'''
Test if we must reject the power law hypothesis
judging by the stabilised Kolmogorov-Smirnov
statistic, suggested by Maschberger and Kroupa
2009.
Parameters:
-----------
sig_level : float < 1.
significance level for the hypothesis test
Returns:
---------
True if we cannot reject the power law hypothesis.
False if we must reject the power law hypothesis.
'''
ed = self._get_ed()
truncated = self.is_powerlaw_truncated()
KS = _stabilised_KS_statistic(ed, alpha=self.alpha,
truncated=truncated)
limit = _calculate_KS_acceptance_limit(len(self.ed),
sig_level=sig_level)
ispowerlaw = KS < limit
if ispowerlaw is False:
warnings.warn('Kolmogorov-Smirnov tells us to reject'
r' the power law hypothesis at p={}.'
' KS={}, limit={}'.format(sig_level, KS, limit))
return ispowerlaw
def _get_ed(self):
"""Get ED array either for a single star sample
or a multiple stars sample, depending on `multiple_stars`
flag.
Return:
-------
ed - sample of flare energies
"""
if self.multiple_stars is True:
ed = self.count_ed
elif self.multiple_stars is False:
ed = self.ed
return ed
def _calculate_average_number_of_exceeding_values(data, alpha, n, **kwargs):
'''
Parameters:
-----------
ffd : FFD object
n : int
number of samples to average
kwargs : dict
Keyword arguments to pass to
:func:calculate_number_of_exceeding_values
Returns:
--------
(mean, std) : (float, float)
average number number of exceeding values
and standard deviation
'''
assert alpha is not None
assert data is not None
exceedance_statistic = [_calculate_number_of_exceeding_values(data,
alpha,
**kwargs)
for i in range(n)]
exceedance_statistic = np.array(exceedance_statistic)
return np.nanmean(exceedance_statistic), np.nanstd(exceedance_statistic)
def _calculate_number_of_exceeding_values(data, alpha, maxlim=1e8, **kwargs):
'''
Helper function that mimicks data similar
to the observations (same alpha and size)
and returns a sample from an untruncated
distribution. The number of values that
exceeds the maximum in the actual data is
returned.
Parameters:
-----------
data : array
observed values
alpha : float
best-fit power law exponent to the data
maxlim : float > 1.
factor to simulate an untruncated
version of the given power law
distribution
kwargs : dict
Keyword arguments to pass to
:func:generate_random_power_law_distribution
Return:
--------
int : number of exceeding values
'''
pdist = generate_random_power_law_distribution(np.min(data),
np.max(data) * maxlim,
-alpha + 1,
size=data.shape[0],
**kwargs)
if np.isnan(pdist).any():
raise ValueError('Fake power law distribution for the'
' exceedance test could not be generated.'
' Check your inputs.')
return len(np.where(pdist > np.max(data))[0])
def _get_multistar_factors(dataframe, ID, sort):
"""Returns an array of factors to apply
to the detected flares. Accounts for the
number of targets with different detection
thresholds that contribute to the sample
factor = 1 / number of targets that contribute
their flares above a given flare energy.
This is a first order approximation that is
assuming that the least energetic flare in a
light curve is just above the detection limit.
If the smallest flare is significantly above
the detection limit of a target's light curve
small energy flare frequencies will be
overestimated.
The steeper the FFD the better the approximation.
Parameters:
-----------
dataframe: DataFrame
flare table with ID and sort columns
ID: str
column name for star ID in dataframe
sort: str
column name for energies or EDs dataframe
Return:
-------
multistar factor array
"""
freq = []
# make a copy to safely sort the dataframe
df = dataframe.copy(deep=True)
# check if sort exists, otherwise throw error
try:
df = df.sort_values(by=sort, ascending=True)
except:
raise KeyError(f"The flare table needs a {sort} column.")
# loop over sorted dataframe and find the number of targets
# that contribution to the sub-frame
for i in range(df.shape[0]):
try:
f = df.iloc[:i + 1] # sub-frame
freq.append(len(set(f[ID].values))) # append number of targets
except KeyError: # where are the unique target ID
raise KeyError("Pass the column name of target IDs "
"to the FFD constructor: ID = ???.")
# factor for maximum energy goes first and must be 1, the rest < 1
return np.array(freq[::-1]) / freq[-1]
def _ML_powerlaw_estimator(alpha, ed):
'''
Power law maximum likelihood estimator from
Maschberger and Kroupa (2009),
formula (9).
Parameters:
-----------
alpha : float
approximate value for power law exponent
ed : array
ED or energy array
Return:
--------
absolute value of left side of formula (9)
To find MLE for alpha, minimize this term.
'''
if np.array(alpha <= 1.).any():
# Power law exponent must be >1.
return np.nan
n = len(ed)
if n == 0:
raise ValueError('No data.')
# Calculate Y variable in formula (9)
Y = ed.min()
if Y < 0:
raise ValueError('Negative value encountered in data.')
# De-bias alpha following Maschberger and Kroupa 2009
alpha = _de_bias_alpha(n, alpha)
# Calculate the remaining variables in formula (9)
Yexp = (np.power(Y, 1 - alpha))
T = np.log(ed).sum()
Z = _de_biased_upper_limit(ed, alpha)
Zexp = (np.power(Z, 1 - alpha))
return (np.abs(n / (alpha - 1) +
n * ((Zexp * np.log(Z) - Yexp * np.log(Y)) /
(Zexp - Yexp)) -
T))
def _de_biased_upper_limit(data, a):
'''
De-biases the upper limits for a
ML power law exponent estimator.
Uses formular (13) (and (14)) from
Maschberger and Kroupa (2009).
Parameters:
-----------
data : Series or array
data that is suspected to follow
a power law relation
a : float or array of floats
quasi de-biased ML estimator for alpha
(de_bias_alpha before inserting here!)
Returns:
---------
Quasi de-biased upper limit.
'''
if len(data) == 0:
raise ValueError('No data.')
if (data < 0).any():
raise ValueError('Negative values '
'encountered in data.')
Xn = data.max()
X1 = data.min()
if Xn == X1:
raise ValueError('Data range is zero.')
n = len(data)
G = (1. - a) * np.log(Xn / X1) # (14)
base = 1. + (np.exp(G) - 1.) / n
exponent = 1. / (1. - a)
return Xn * np.power(base, exponent)
def _de_bias_alpha(n, alpha):
'''
De-biases the power law value
according to Maschberger and Kroupa (2009),
formula (12).
Paramaters:
------------
n : int
Size of the data
alpha : float or array of floats
Power law exponent value from ML estimator
Returns:
-----------
quasi de-biased ML estimator for alpha
'''
if np.array(np.isnan(n) | np.isnan(np.array(alpha)).any()):
raise ValueError('de_bias_alpha: one or '
'both arg(s) is/are NaN')
return (alpha - 1.) * n / (n - 2) + 1.
def _stabilised_KS_statistic(data, alpha, truncated):
'''
Calculate the stabilised KS statistic
from Maschberger and Kroupa 2009, Eqn. (21)
orginally from Michael 1983, and Kimber 1985.
Parameters:
--------------
data : array
observed values that are suspected
to follow a power law relation
kwargs : dict
Keyword arguments to pass to
:func:calculate_cumulative_powerlaw_distribution
Return:
--------
float - stablised KS statistic
'''
sorted_data = np.sort(data)
pp = _calculate_cumulative_powerlaw_distribution(sorted_data,
alpha, truncated)
y = (np.arange(1, len(pp) + 1) - .5) / len(pp)
argument = (_apply_stabilising_transformation(y) -
_apply_stabilising_transformation(pp))
return np.max(np.abs(argument))
def _calculate_cumulative_powerlaw_distribution(data, alpha, truncated):
'''
Calculates the cumulative powerlaw distribution
from the data, given the best fit power law exponent
for y(x) ~ x^(-alpha).
Eq. (2) in Maschberger and Kroupa 2009.
Parameters:
-----------
data : array
observed values that are suspected
to follow a power law relation, sorted in
ascending order
alpha : float
best-fit power law exponent
truncated : bool
True if the power law distribution is truncated
Returns:
---------
array : cumulative distribution
'''
if alpha <= 1.:
raise ValueError('This distribution function is only'
' valid for alpha > 1., see also '
'Maschberger and Kroupa 2009.')
data = np.sort(data)
def expa(x, alpha):
return | np.power(x, 1. - alpha) | numpy.power |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: <NAME> <EMAIL>
# License: BSD 3 clause
# Modified by MDA (<EMAIL>)
from datetime import datetime
from datetime import date
import numpy as np
from matplotlib import rcParams
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlencode
from sklearn import cluster, covariance, manifold
import quandl
from quandl_config import API_CONFIG_KEY
quandl.ApiConfig.api_key = API_CONFIG_KEY
rcParams.update({'font.size': 8})
###############################################################################
# Retrieve the data from Internet
def quotes_historical_google(symbol, date1, date2):
"""Get the historical data from Google finance.
Parameters
----------
symbol : str
Ticker symbol to query for, for example ``"DELL"``.
date1 : datetime.datetime
Start date.
date2 : datetime.datetime
End date.
Returns
-------
X : array
The columns are ``date`` -- datetime, ``open``, ``high``,
``low``, ``close`` and ``volume`` of type float.
"""
params = urlencode({
'q': symbol,
'startdate': date1.strftime('%b %d, %Y'),
'enddate': date2.strftime('%b %d, %Y'),
'output': 'csv'
})
url = 'http://www.google.com/finance/historical?' + params
with urlopen(url) as response:
# print(response, symbol)
dtype = {
'names': ['date', 'open', 'high', 'low', 'close', 'volume'],
'formats': ['object', 'f4', 'f4', 'f4', 'f4', 'f4']
}
converters = {0: lambda s: datetime.strptime(s.decode(), '%d-%b-%y')}
return np.genfromtxt(response, delimiter=',', skip_header=1,
dtype=dtype, converters=converters,
missing_values='-', filling_values=-1)
# We are looking for 2y max
d1 = datetime(2017, 1, 1)
d2 = datetime(2017, 7, 10)
# d1 = today = datetime.datetime(2016, 1, 1)
# d1 = today = date.today()
# d2 = start = datetime.datetime(2017, 5, 10)
# start = (today.day, today.month, today.year - 1)
# start = str(start[0]) + '-' + str(start[1]) + '-' + str(start[2])
def make_symbol_dic(stocks, **kwargs):
symbol_dict = {}
if not kwargs:
F = open(stocks, 'r')
stocks = set(F.readlines())
for symbol in stocks:
try:
qnd_symbol = 'WIKI/' + symbol.strip()
stock = quandl.Dataset(qnd_symbol).name
symbol_dict.setdefault(symbol.strip(), stock.split(' (', 1)[0])
except:
pass
return symbol_dict
shortlist = [
'MMM', 'CVX', 'PTEN', 'NVDA', 'TDG',
'JNJ', 'TSLA', 'F', 'NFLX', 'AMD', 'MU',
'AMZN', 'AAPL', 'FB', 'BA', 'TDG',
'BA', 'GOOGL', 'STX', 'WDC', 'TXN', 'AMS', 'WLM',
'BK', 'WMT'
]
longlist = [
'AMZN', 'GOOGL', 'TSLA', 'FB', 'GLD', 'NVDA', 'GILD',
'NESN', 'MSFT', 'BBY', 'INTC', 'AMD', 'IMGN',
'HBI', 'DIS', 'MEET', 'MCD', 'TWTR',
'NFLX', 'NXPI', 'BABA', 'JNJ', 'T', 'PG', 'MA',
'WMT', 'IBM', 'AAPL', 'BBBY', 'MRK', 'GS', 'CRM',
'MS', 'TDG', 'BA', 'MU', 'MMM', 'ILMN', 'LPL',
'SNE', 'BAC', 'FOXA', 'GPRO', 'GILD', 'SNAP', 'FIT',
'TXN', 'STX', 'WDC'
]
pipeline = ['LB', 'FL', 'COL','VXX', 'TVIX', 'XIV', 'SVXY', 'UVXY', 'SNAP', 'AMD']
symbol_dict = make_symbol_dic(pipeline, t=list)
symbols, names = np.array(list(symbol_dict.items())).T
'''
quotes = []
for symbol in symbols:
try:
quote = quotes_historical_google(symbol, d1, d2)
quotes.append(quote)
except Exception as e:
print(e)
close_prices = np.stack([q['close'] for q in quotes])
open_prices = np.stack([q['open'] for q in quotes])
'''
for symbol in symbols:
try:
quotes = [quandl.get('WIKI/' + symbol, start_date=d1,
end_date=d2, returns='numpy')
for symbol in symbols]
except Exception as e:
print(e)
close_prices = np.stack([q['Close'] for q in quotes])
open_prices = np.stack([q['Open'] for q in quotes])
# The daily variations of the quotes are what carry most information
variation = close_prices - open_prices
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = | np.where(non_zero) | numpy.where |
from __future__ import division
import copy
import itertools
import math
import numpy as np
import torch
from mmcv.runner import get_dist_info
from mmdet.datasets import ConcatDataset
from torch.utils.data import Sampler, WeightedRandomSampler
from ssod.utils import get_root_logger
from ..builder import SAMPLERS
def chain(iter_obj):
tmp = []
for s in iter_obj:
tmp.extend(s)
return tmp
@SAMPLERS.register_module()
class DistributedGroupFixRatioSampler(Sampler):
def __init__(
self,
dataset: ConcatDataset,
samples_per_gpu=1,
num_replicas=None,
rank=None,
sample_ratio=None,
by_prob=True,
at_least_one=False,
seed=0,
max_iters=None,
):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
assert isinstance(
dataset, ConcatDataset
), "The dataset must contains multiple sub datasets"
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
if at_least_one:
assert self.samples_per_gpu >= len(self.dataset.datasets)
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.seed = seed if seed is not None else 0
self.sample_ratio = (
sample_ratio
if sample_ratio is not None
else [1] * len(self.dataset.datasets)
)
assert len(self.sample_ratio) == len(self.dataset.datasets)
self.by_prob = by_prob
self.at_least_one = at_least_one
self.base_indices = [
self._get_sub_seq(
d, offset=self.dataset.cumulative_sizes[i - 1] if i > 0 else 0
)
for i, d in enumerate(self.dataset.datasets)
]
self.set_num = len(self.base_indices)
group_num_per_set = [len(self.base_indices[i]) for i in range(self.set_num)]
if not all([num == max(group_num_per_set) for num in group_num_per_set]):
self.logger.warn(
"The number of groups in each set is not same. Ignoring the group flag...."
)
self.base_indices = [
np.concatenate(indices) for indices in self.base_indices
]
self.group_num = 1
else:
self.group_num = len(self.base_indices[0])
self.max_iters = max_iters
self._compute_samples()
def __iter__(self):
self._compute_samples()
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = copy.deepcopy(self.base_indices)
cumulated_indices = []
for i in range(self.group_num):
_indices = [
itertools.cycle(
indice[i][
list(torch.randperm(len(indice[i]), generator=g).numpy())
].tolist()
)
for indice in indices
]
for _ in range(self.iter_per_group[i]):
size_per_bucket = self._sample(g)
samples_per_batch = [
[next(_indices[j]) for _ in range(size_per_bucket[j])]
for j in range(self.set_num)
]
# print(samples_per_batch)
# shuffle across process
if self.at_least_one:
for s in samples_per_batch:
assert (
len(s) >= self.num_replicas
), "When `at_least_one` set to `True`, size of each set must be larger than world_size."
base = chain(
[
np.asarray(s[: self.num_replicas])[
list(
torch.randperm(
self.num_replicas, generator=g
).numpy()
)
].tolist()
for s in samples_per_batch
]
)
extra = np.array(
chain(
[
s[self.num_replicas :]
for s in samples_per_batch
if len(s) > self.num_replicas
]
)
)
if len(extra) > 0:
extra = extra[
list(torch.randperm(len(extra), generator=g,).numpy())
]
extra = extra.tolist()
samples_per_batch = base + extra
else:
samples_per_batch = np.array(chain(samples_per_batch))[
list(
torch.randperm(
self.samples_per_gpu * self.num_replicas, generator=g
).numpy()
)
].tolist()
cumulated_indices.append(samples_per_batch)
cumulated_indices = (
np.asarray(cumulated_indices)[
list(torch.randperm(len(cumulated_indices), generator=g).numpy())
]
.reshape((-1,))
.tolist()
)
assert len(cumulated_indices) == len(self) * self.num_replicas
# subsample
cumulated_indices = cumulated_indices[
self.rank : self.rank + self.num_replicas * len(self) : self.num_replicas
]
assert len(cumulated_indices) == len(self)
return iter(cumulated_indices)
def _sample(self, generator=None):
total_batch_size = self.num_replicas * self.samples_per_gpu
# normalize
sample_prob = [s / sum(self.sample_ratio) for s in self.sample_ratio]
if (not self.by_prob) or (generator is None):
size_per_bucket = [int(total_batch_size * p) for p in sample_prob]
size_per_bucket[-1] = total_batch_size - sum(size_per_bucket[:-1])
else:
if self.at_least_one:
extra_size = total_batch_size - self.num_replicas * self.set_num
if extra_size > 0:
sample_seq = list(
WeightedRandomSampler(
sample_prob,
extra_size,
replacement=True,
generator=generator,
)
)
else:
sample_seq = []
for i in range(self.set_num):
sample_seq = sample_seq + [i for _ in range(self.num_replicas)]
_, size_per_bucket = | np.unique(sample_seq, return_counts=True) | numpy.unique |
import numpy
import xraylib
import scipy.constants as codata
# needed by bragg_calc
from xoppylib.crystals.bragg_preprocessor_file_io import bragg_preprocessor_file_v2_write
from dabax.common_tools import f0_xop, f0_xop_with_fractional_charge
from dabax.common_tools import bragg_metrictensor, lorentz, atomic_symbols
import sys
import os
import platform
from xoppylib.xoppy_util import locations
from dabax.dabax_xraylib import DabaxXraylib
#
#
#
def bragg_metrictensor(a,b,c,a1,a2,a3,RETURN_REAL_SPACE=0,RETURN_VOLUME=0,HKL=None):
"""
Returns the metric tensor in the reciprocal space
:param a: unit cell a
:param b: unit cell b
:param c: unit cell c
:param a1: unit cell alpha
:param a2: unit cell beta
:param a3: unit cell gamma
:param RETURN_REAL_SPACE: set to 1 for returning metric tensor in real space
:param RETURN_VOLUME: set to 1 to return the unit cell volume in Angstroms^3
:param HKL: if !=None, returns the d-spacing for the corresponding [H,K,L] reflection
:return: the returned value depends on the keywords used. If RETURN_REAL_SPACE=0,RETURN_VOLUME=0, and HKL=None
then retuns the metric tensor in reciprocal space.
"""
# input cell a,b,c,alpha,beta,gamma; angles in degrees
a1 *= numpy.pi / 180.0
a2 *= numpy.pi / 180.0
a3 *= numpy.pi / 180.0
# ;
# ; tensor in real space
# ;
g = numpy.array( [ [a*a, a*b*numpy.cos(a3), a*c*numpy.cos(a2)], \
[a*b*numpy.cos(a3), b*b, b*c*numpy.cos(a1)], \
[a*c*numpy.cos(a2), b*c*numpy.cos(a1), c*c]] )
if RETURN_REAL_SPACE: return g
# print("g: ",g)
# ;
# ; volume of the lattice
# ;
volume2 = numpy.linalg.det(g)
volume = numpy.sqrt(volume2)
# print("Volume of unit cell: %g A^3",volume)
if RETURN_VOLUME: return volume
# ;
# ; tensor in reciprocal space
# ;
ginv = numpy.linalg.inv(g)
# ;print,gInv
#
# itmp = where(abs(ginv) LT 1d-8)
# IF itmp[0] NE -1 THEN ginv[itmp]=0D
itmp = numpy.where(numpy.abs(ginv) < 1e-8)
ginv[itmp] = 0.0
# print("ginv: ",ginv)
if HKL != None:
# ; computes d-spacing
dd = numpy.dot( numpy.array(HKL) , numpy.dot( ginv , numpy.array(HKL)))
#
# print("DD: ", dd)
dd1 = 1.0 / numpy.sqrt(dd)
# print("D-spacing: ",dd1)
return dd1
else:
return ginv
def lorentz(theta_bragg_deg,return_what=0):
"""
This function returns the Lorentz factor, polarization factor (unpolarized beam), geometric factor,
or a combination of them.
:param theta_bragg_deg: Bragg angle in degrees
:param return_what: A flag indicating the returned variable:
0: (default) PolFac*lorentzFac
1: PolFac
2: lorentzFac
3: geomFac
:return: a scalar value
"""
tr = theta_bragg_deg * numpy.pi / 180.
polarization_factor = 0.5 * (1.0 + (numpy.cos(2.0 * tr))**2)
lorentz_factor = 1.0 / numpy.sin(2.0 * tr)
geometrical_factor = 1.0 * numpy.cos(tr) / numpy.sin(2.0 * tr)
if return_what == 0:
return polarization_factor*lorentz_factor
elif return_what == 1:
return polarization_factor
elif return_what == 2:
return lorentz_factor
elif return_what == 3:
return geometrical_factor
elif return_what == 4:
return polarization_factor*lorentz_factor*geometrical_factor
# OBSOLETE.... USE bragg_calc2() INSTEAD!
def bragg_calc(descriptor="Si",hh=1,kk=1,ll=1,temper=1.0,emin=5000.0,emax=15000.0,estep=100.0,fileout=None,
material_constants_library=xraylib):
"""
Preprocessor for Structure Factor (FH) calculations. It calculates the basic ingredients of FH.
:param descriptor: crystal name (as in xraylib)
:param hh: miller index H
:param kk: miller index K
:param ll: miller index L
:param temper: temperature factor (scalar <=1.0 )
:param emin: photon energy minimum
:param emax: photon energy maximum
:param estep: photon energy step
:param fileout: name for the output file (default=None, no output file)
:return: a dictionary with all ingredients of the structure factor.
"""
output_dictionary = {}
codata_e2_mc2 = codata.e**2 / codata.m_e / codata.c**2 / (4*numpy.pi*codata.epsilon_0) # in m
# f = open(fileout,'w')
version = "2.5"
output_dictionary["version"] = version
# todo: txt not longer used here... can be removed
txt = ""
txt += "# Bragg version, Data file type\n"
txt += "%s 1\n" % version
cryst = material_constants_library.Crystal_GetCrystal(descriptor)
if cryst is None:
raise Exception("Crystal not found in xraylib: %s" % descriptor )
volume = cryst['volume']
# crystal data - not needed
icheck = 0
if icheck:
print (" Unit cell dimensions are %f %f %f" % (cryst['a'],cryst['b'],cryst['c']))
print (" Unit cell angles are %f %f %f" % (cryst['alpha'],cryst['beta'],cryst['gamma']))
print (" Unit cell volume is %f A^3" % volume )
print (" Atoms at:")
print (" Z fraction X Y Z")
for i in range(cryst['n_atom']):
atom = cryst['atom'][i]
print (" %3i %f %f %f %f" % (atom['Zatom'], atom['fraction'], atom['x'], atom['y'], atom['z']) )
print (" ")
volume = volume*1e-8*1e-8*1e-8 # in cm^3
dspacing = material_constants_library.Crystal_dSpacing(cryst, hh, kk, ll)
rn = (1e0/volume)*(codata_e2_mc2*1e2)
dspacing *= 1e-8 # in cm
txt += "# RN = (e^2/(m c^2))/V) [cm^-2], d spacing [cm]\n"
txt += "%e %e \n" % (rn , dspacing)
output_dictionary["rn"] = rn
output_dictionary["dspacing"] = dspacing
atom = cryst['atom']
list_Zatom = [ atom[i]['Zatom'] for i in range(len(atom))]
number_of_atoms = len(list_Zatom)
list_fraction = [ atom[i]['fraction'] for i in range(len(atom))]
try:
list_charge = [atom[i]['charge'] for i in range(len(atom))]
except:
list_charge = [0.0] * number_of_atoms
list_x = [ atom[i]['x'] for i in range(len(atom))]
list_y = [ atom[i]['y'] for i in range(len(atom))]
list_z = [ atom[i]['z'] for i in range(len(atom))]
# creates an is that contains Z, occupation and charge, that will
# define the different sites.
IDs = []
number_of_atoms = len(list_Zatom)
for i in range(number_of_atoms):
IDs.append("Z:%2d-F:%g-C:%g" % (list_Zatom[i],list_fraction[i], list_charge[i]))
# calculate indices of uniqte Id's sorted by Z
unique_indexes1 = numpy.unique(IDs, return_index=True) [1]
unique_Zatom1 = [list_Zatom[i] for i in unique_indexes1]
# sort by Z
ii = numpy.argsort(unique_Zatom1)
unique_indexes = unique_indexes1[ii]
unique_Zatom = [list_Zatom[i] for i in unique_indexes]
unique_charge = [list_charge[i] for i in unique_indexes]
unique_scattering_electrons = []
for i, Zi in enumerate(unique_Zatom):
unique_scattering_electrons.append(Zi - unique_charge[i])
nbatom = (len(unique_Zatom))
txt += "# Number of different element-sites in unit cell NBATOM:\n%d \n" % nbatom
output_dictionary["nbatom"] = nbatom
txt += "# for each element-site, the number of scattering electrons (Z_i + charge_i)\n"
for i in unique_Zatom:
txt += "%d "%i
txt += "\n"
output_dictionary["atnum"] = list(unique_scattering_electrons)
txt += "# for each element-site, the occupation factor\n"
unique_fraction = []
for i in range(len(unique_indexes)):
unique_fraction.append(list_fraction[unique_indexes[i]])
txt += "%g "%(unique_fraction[i])
txt += "\n"
output_dictionary["fraction"] = unique_fraction
txt += "# for each element-site, the temperature factor\n" # temperature parameter
list_temper = []
for i in range(len(unique_indexes)):
txt += "%5.3f "%temper
list_temper.append(temper)
txt += "\n"
output_dictionary["temper"] = list_temper
#
# Geometrical part of structure factor: G and G_BAR
#
txt += "# for each type of element-site, COOR_NR=G_0\n"
list_multiplicity = []
for i in range(len(unique_indexes)):
id = IDs[unique_indexes[i]]
txt += "%d "%IDs.count(id)
list_multiplicity.append(IDs.count(id))
txt += "\n"
output_dictionary["G_0"] = list_multiplicity
txt += "# for each type of element-site, G and G_BAR (both complex)\n"
list_g = []
list_g_bar = []
for i in range(len(unique_indexes)):
id = IDs[unique_indexes[i]]
ga = 0.0 + 0j
for i,zz in enumerate(IDs):
if zz == id:
ga += numpy.exp(2j*numpy.pi*(hh*list_x[i]+kk*list_y[i]+ll*list_z[i]))
txt += "(%g,%g) \n"%(ga.real,ga.imag)
txt += "(%g,%g) \n"%(ga.real,-ga.imag)
list_g.append(ga)
list_g_bar.append(ga.conjugate())
output_dictionary["G"] = list_g
output_dictionary["G_BAR"] = list_g_bar
#
# F0 part
#
txt += "# for each type of element-site, the number of f0 coefficients followed by them\n"
list_f0 = []
for i in range(len(unique_indexes)):
zeta = list_Zatom[unique_indexes[i]]
tmp = f0_xop(zeta)
txt += ("11 "+"%g "*11+"\n")%(tuple(tmp))
list_f0.append(tmp.tolist())
output_dictionary["f0coeff"] = list_f0
npoint = int( (emax - emin)/estep + 1 )
txt += "# The number of energy points NPOINT: \n"
txt += ("%i \n") % npoint
output_dictionary["npoint"] = npoint
txt += "# for each energy point, energy, F1(1),F2(1),...,F1(nbatom),F2(nbatom)\n"
list_energy = []
out_f1 = numpy.zeros( (len(unique_indexes),npoint), dtype=float)
out_f2 = numpy.zeros( (len(unique_indexes),npoint), dtype=float)
out_fcompton = numpy.zeros( (len(unique_indexes),npoint), dtype=float) # todo is complex?
for i in range(npoint):
energy = (emin+estep*i)
txt += ("%20.11e \n") % (energy)
list_energy.append(energy)
for j in range(len(unique_indexes)):
zeta = list_Zatom[unique_indexes[j]]
f1a = material_constants_library.Fi(int(zeta),energy*1e-3)
f2a = -material_constants_library.Fii(int(zeta),energy*1e-3) # TODO: check the sign!!
txt += (" %20.11e %20.11e 1.000 \n")%(f1a, f2a)
out_f1[j,i] = f1a
out_f2[j,i] = f2a
out_fcompton[j,i] = 1.0
output_dictionary["energy"] = list_energy
output_dictionary["f1"] = out_f1
output_dictionary["f2"] = out_f2
output_dictionary["fcompton"] = out_fcompton
if fileout != None:
bragg_preprocessor_file_v2_write(output_dictionary, fileout)
# with open(fileout,"w") as f:
# f.write(txt)
# print("File written to disk: %s" % fileout)
return output_dictionary
#
#
#
def crystal_fh(input_dictionary,phot_in,theta=None,forceratio=0):
"""
:param input_dictionary: as resulting from bragg_calc()
:param phot_in: photon energy in eV
:param theta: incident angle (half of scattering angle) in rad
:return: a dictionary with structure factor
"""
# outfil = input_dictionary["outfil"]
# fract = input_dictionary["fract"]
rn = input_dictionary["rn"]
dspacing = numpy.array(input_dictionary["dspacing"])
nbatom = numpy.array(input_dictionary["nbatom"])
atnum = numpy.array(input_dictionary["atnum"])
temper = numpy.array(input_dictionary["temper"])
G_0 = numpy.array(input_dictionary["G_0"])
G = numpy.array(input_dictionary["G"])
G_BAR = numpy.array(input_dictionary["G_BAR"])
f0coeff = numpy.array(input_dictionary["f0coeff"])
npoint = numpy.array(input_dictionary["npoint"])
energy = numpy.array(input_dictionary["energy"])
fp = numpy.array(input_dictionary["f1"])
fpp = numpy.array(input_dictionary["f2"])
fraction = numpy.array(input_dictionary["fraction"])
phot_in = numpy.array(phot_in,dtype=float).reshape(-1)
toangstroms = codata.h * codata.c / codata.e * 1e10
itheta = numpy.zeros_like(phot_in)
for i,phot in enumerate(phot_in):
if theta is None:
itheta[i] = numpy.arcsin(toangstroms*1e-8/phot/2/dspacing)
else:
itheta[i] = theta
# print("energy= %g eV, theta = %15.13g deg"%(phot,itheta[i]*180/numpy.pi))
if phot < energy[0] or phot > energy[-1]:
raise Exception("Photon energy %g eV outside of valid limits [%g,%g]"%(phot,energy[0],energy[-1]))
if forceratio == 0:
ratio = numpy.sin(itheta[i]) / (toangstroms / phot)
else:
ratio = 1 / (2 * dspacing * 1e8)
# print("Ratio: ",ratio)
F0 = numpy.zeros(nbatom)
F000 = numpy.zeros(nbatom)
for j in range(nbatom):
#icentral = int(f0coeff.shape[1]/2)
#F0[j] = f0coeff[j,icentral]
icentral = int(len(f0coeff[j])/2)
F0[j] = f0coeff[j][icentral]
# F000[j] = F0[j]
for i in range(icentral):
#F0[j] += f0coeff[j,i] * numpy.exp(-1.0*f0coeff[j,i+icentral+1]*ratio**2)
F0[j] += f0coeff[j][i] * numpy.exp(-1.0*f0coeff[j][i+icentral+1]*ratio**2)
#srio F000[j] += f0coeff[j][i] #actual number of electrons carried by each atom, <NAME>, <EMAIL>
F000[j] = atnum[j] # srio
# ;C
# ;C Interpolate for the atomic scattering factor.
# ;C
for j,ienergy in enumerate(energy):
if ienergy > phot:
break
nener = j - 1
F1 = numpy.zeros(nbatom,dtype=float)
F2 = numpy.zeros(nbatom,dtype=float)
F = numpy.zeros(nbatom,dtype=complex)
for j in range(nbatom):
F1[j] = fp[j,nener] + (fp[j,nener+1] - fp[j,nener]) * \
(phot - energy[nener]) / (energy[nener+1] - energy[nener])
F2[j] = fpp[j,nener] + (fpp[j,nener+1] - fpp[j,nener]) * \
(phot - energy[nener]) / (energy[nener+1] - energy[nener])
r_lam0 = toangstroms * 1e-8 / phot
for j in range(nbatom):
F[j] = F0[j] + F1[j] + 1j * F2[j]
# print("F",F)
F_0 = 0.0 + 0.0j
FH = 0.0 + 0.0j
FH_BAR = 0.0 + 0.0j
FHr = 0.0 + 0.0j
FHi = 0.0 + 0.0j
FH_BARr = 0.0 + 0.0j
FH_BARi = 0.0 + 0.0j
TEMPER_AVE = 1.0
for j in range(nbatom):
FH += fraction[j] * (G[j] * F[j] * 1.0) * temper[j]
FHr += fraction[j] * (G[j] * (F0[j] + F1[j])* 1.0) * temper[j]
FHi += fraction[j] * (G[j] * F2[j] * 1.0) * temper[j]
FN = F000[j] + F1[j] + 1j * F2[j]
F_0 += fraction[j] * (G_0[j] * FN * 1.0)
# TEMPER_AVE *= (temper[j])**(G_0[j]/(G_0.sum()))
FH_BAR += fraction[j] * ((G_BAR[j] * F[j] * 1.0)) * temper[j]
FH_BARr += fraction[j] * ((G_BAR[j] * (F0[j] + F1[j]) *1.0)) * temper[j]
FH_BARi += fraction[j] * ((G_BAR[j] * F2[j] * 1.0)) * temper[j]
# print("TEMPER_AVE: ",TEMPER_AVE)
# ;C
# ;C multiply by the average temperature factor
# ;C
# FH *= TEMPER_AVE
# FHr *= TEMPER_AVE
# FHi *= TEMPER_AVE
# FH_BAR *= TEMPER_AVE
# FH_BARr *= TEMPER_AVE
# FH_BARi *= TEMPER_AVE
STRUCT = numpy.sqrt(FH * FH_BAR)
# ;C
# ;C PSI_CONJ = F*( note: PSI_HBAR is PSI at -H position and is
# ;C proportional to fh_bar but PSI_CONJ is complex conjugate os PSI_H)
# ;C
psi_over_f = rn * r_lam0**2 / numpy.pi
psi_h = rn * r_lam0**2 / numpy.pi * FH
psi_hr = rn * r_lam0**2 / numpy.pi * FHr
psi_hi = rn * r_lam0**2 / numpy.pi * FHi
psi_hbar = rn * r_lam0**2 / numpy.pi * FH_BAR
psi_hbarr = rn * r_lam0**2 / numpy.pi * FH_BARr
psi_hbari = rn * r_lam0**2 / numpy.pi * FH_BARi
psi_0 = rn * r_lam0**2 / numpy.pi * F_0
psi_conj = rn * r_lam0**2 / numpy.pi * FH.conjugate()
# ;
# ; Darwin width
# ;
# print(rn,r_lam0,STRUCT,itheta)
ssvar = rn * (r_lam0**2) * STRUCT / numpy.pi / | numpy.sin(2.0*itheta) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Created on Thu May 18 11:52:51 2017
@author: student
"""
import pandas as pd
import numpy as np
import utm
import collections
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
from terrain import cropTerrainBB
import glob
import os
from PIL import Image
def loadCameras(directory_images,directory_metadata,metaDataType,terrain):
'''
loadCameras loads camera locations and orientations from a set of images and matching metadata
This is a variation specifically for the Medina Wasl images and metadata
INPUTS
directory_images - Location of folder holding images
directory_metadata - Location of folder holding matching mark_1_pva metadata in csv format
OUTPUTS
cn, ce, ce - camera north east and down positions (m)
cor_n, cor_e, cor_d - camera center orientation vector, or pointing direction (m)
cup_n, cup_e, cup_d - camera up vector, or the vector pointing out the top of the camera (m)
alphax - camera horizontal field of view (degrees)
alphay - camera vertical field of view (degrees)
df_all - camera metadata read from mark_1_pva.csv
'''
# Load metadata
if(metaDataType=='Muscat'):
df_all = readMetaDataMuscat(directory_images,directory_metadata)
elif(metaDataType=='Medina'):
df_all = readMetaDataMedina(directory_images,directory_metadata)
else:
print('Invalid Metadata Type.')
sys.exit()
# Convert lat/lon to UTM coordinates
lat = df_all['field.lat'].values
lon = df_all['field.lon'].values
cn = []
ce = []
for row in np.c_[lat,lon]:
east,north,zone,zone_letter = utmConversion(row)
cn.append(north)
ce.append(east)
cn = np.asarray(cn)
ce = np.asarray(ce)
cd = df_all['field.height'].values
if(metaDataType=='Muscat'):
# Load sensor parameters from parameter file
SensorYaw, SensorPitch, SensorRoll, ImageHeight, ImageWidth, FocalLength = getSensorParamsMuscat(directory_images,directory_metadata)
df_all['sensor_yaw'] = SensorYaw
df_all['sensor_pitch'] = SensorPitch
df_all['sensor_roll'] = SensorRoll
df_all['focal_length'] = FocalLength
df_all['ImageHeight'] = ImageHeight
df_all['ImageWidth'] = ImageWidth
# Get field of view angles
alphax_val = np.rad2deg(2*np.arctan(ImageWidth/(2*FocalLength)))
alphay_val = np.rad2deg(2*np.arctan(ImageHeight/(2*FocalLength)))
alphax = np.ones(len(df_all))*alphax_val
alphay = np.ones(len(df_all))*alphay_val
elif(metaDataType=='Medina'):
ImageHeight, ImageWidth = getSensorParamsMedina(directory_images)
df_all['ImageHeight'] = ImageHeight
df_all['ImageWidth'] = ImageWidth
alphax = df_all['HorizontalFOV']
alphay = df_all['VerticalFOV']
#TODO: Check on focal length for Medina data
# Loop through all cameras and compute their orientations
cor_n = np.zeros(len(df_all))
cor_e = np.zeros(len(df_all))
cor_d = np.zeros(len(df_all))
cup_n = np.zeros(len(df_all))
cup_e = np.zeros(len(df_all))
cup_d = np.zeros(len(df_all))
if(metaDataType=='Muscat'):
for i,row in enumerate(df_all['field.azimuth'].values):
Yaw = np.deg2rad(df_all['field.azimuth'][i])
Pitch = np.deg2rad(df_all['field.pitch'][i])
Roll = np.deg2rad(df_all['field.roll'][i])
SensorYaw = df_all['sensor_yaw'][i]
SensorPitch = df_all['sensor_pitch'][i]
SensorRoll = df_all['sensor_roll'][i]
ImageHeight = df_all['ImageHeight'][i]
ImageWidth = df_all['ImageWidth'][i]
FocalLength = df_all['focal_length'][i]
projectionContext = collections.namedtuple('projectionContext',' \
Yaw, Pitch, Roll, SensorYaw, SensorPitch, SensorRoll, ImageHeight, \
ImageWidth,FocalLength')
pc = projectionContext(Yaw,Pitch,Roll,
SensorYaw,SensorPitch,SensorRoll,ImageHeight,
ImageWidth,FocalLength)
cor_n[i],cor_e[i],cor_d[i],cup_n[i],cup_e[i],cup_d[i] = cameraOrientations(pc)
elif(metaDataType=='Medina'):
# Get average height of ground in flight area
nn_cropped,ee_cropped,dd_cropped = cropTerrainBB(terrain, cn, ce, 0)
#TODO: This should probably be on a per image basis
ground_z = np.mean(dd_cropped)
for i,row in df_all.iterrows():
# Get four corners
tl = row[['image corners tl_latitude','tl_longitude']].values
tr = row[['tr_latitude','tr_longitude']].values
bl = row[['bl_latitude','bl_longitude']].values
br = row[['br_latitude','br_longitude']].values
# Convert four corners to UTM
tle,tln,z,zl = utmConversion(tl)
tre,trn,z,zl = utmConversion(tr)
ble,bln,z,zl = utmConversion(bl)
bre,brn,z,zl = utmConversion(br)
# Get camera location
cam = np.r_[cn[i],ce[i],cd[i]]
# Get image center point
center = np.r_[np.mean([tln,trn,bln,brn]),np.mean([tle,tre,ble,bre]),ground_z]
# Get point on side of image
side = np.r_[np.mean([trn,brn]),np.mean([tre,bre]),ground_z]
# Vector from center of image to camera location
cor_vector = center - cam
cor_vector = cor_vector/np.linalg.norm(cor_vector)
# Vector from side of image to camera location
cside_vector = side - cam
cside_vector = cside_vector/np.linalg.norm(cside_vector)
# Get camera up vector using cross product of side and center vectors
cup_vector = np.cross(cor_vector,cside_vector)
if(cup_vector[2]<0): # Make sure we get the up vector and not the down vector
cup_vector = np.cross(cside_vector,cor_vector)
cor_n[i],cor_e[i],cor_d[i] = cor_vector
cup_n[i],cup_e[i],cup_d[i] = cup_vector
else:
print('Invalid MetaData Type')
sys.exit()
# Pack camera information
cameras = pd.DataFrame({'cn':cn,
'ce':ce,
'cd':cd,
'cor_n':cor_n,
'cor_e':cor_e,
'cor_d':cor_d,
'cup_n':cup_n,
'cup_e':cup_e,
'cup_d':cup_d,
'alphax':alphax,
'alphay':alphay})
return cameras,df_all
def utmConversion(row):
'''
Converts from lat lon coordinates to UTM coordinates
'''
east, north, zone, zone_letter = utm.from_latlon(row[0],row[1])
return east, north, zone, zone_letter
def cameraOrientations(pc,plotCorners=False):
'''
Applies rotation matrices to calculate the vector along which a camera is pointing
'''
# form the rotation matrix for the platform roll, pitch, yaw
pr = np.array([[1, 0, 0],
[0, np.cos(pc.Roll), np.sin(pc.Roll)],
[0, -np.sin(pc.Roll), np.cos(pc.Roll)]])
pp = np.array([[np.cos(pc.Pitch), 0, -np.sin(pc.Pitch)],
[0, 1, 0],
[np.sin(pc.Pitch), 0, np.cos(pc.Pitch)]])
py = np.array([[np.cos(pc.Yaw), | np.sin(pc.Yaw) | numpy.sin |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.sparse as sps
import time
RM_train=pd.read_csv('./input/data_train.csv')
R_test=pd.read_csv('./input/data_target_users_test.csv')
URM=pd.read_csv('./input/data_train.csv')
ICM = pd.read_csv('./input/data_ICM_title_abstract.csv')
##### URM
URM_tuples = [tuple(x) for x in URM.to_numpy()]
userList, itemList, ratingList = zip(*URM_tuples)
userList = list(userList)
userList=np.array(userList,dtype=np.int64)
itemList = list(itemList)
itemList=np.array(itemList,dtype=np.int64)
ratingList = list(ratingList) #not needed
ratingList=np.array(ratingList,dtype=np.int64) #not needed
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all = URM_all.tocsr()
#### ICM
ICM_tuples = [tuple(x) for x in ICM.to_numpy()]
itemList_icm, featureList_icm, scoreList_icm = zip(*ICM_tuples)
itemList_icm = list(itemList_icm)
itemList_icm = np.array(itemList_icm,dtype=np.int64)
featureList_icm = list(featureList_icm)
featureList_icm = np.array(featureList_icm,dtype=np.int64)
scoreList_icm = list(scoreList_icm)
scoreList_icm = np.array(scoreList_icm,dtype=np.float64)
ICM_all = sps.coo_matrix((scoreList_icm, (itemList_icm, featureList_icm)))
#### Test
userTestList = [x for x in R_test.to_numpy()]
userTestList = zip(*userTestList)
userTestList = [list(a) for a in userTestList][0]
#### make validation and test
from Base.Evaluation.Evaluator import EvaluatorHoldout
from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample
URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage = 0.80)
URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)
evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[10])
evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])
### hybrid recommender
### Usinng TF IDF
ICM_all = ICM_all.tocsr()
num_tot_items = ICM_all.shape[0]
# let's count how many items have a certain feature
items_per_feature = np.ediff1d(ICM_all.indptr) + 1
# print(items_per_feature)
IDF = np.array(np.log(num_tot_items / items_per_feature))
from scipy.sparse import diags
diags(IDF)
ICM_idf = ICM_all.copy()
ICM_idf = diags(IDF)*ICM_idf
############## top pop
item_popularity = np.ediff1d(URM_all.tocsc().indptr)
popular_items = | np.argsort(item_popularity) | numpy.argsort |
# coding=utf8
# --------------------------------------------------------
# Scene Graph Generation by Iterative Message Passing
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import argparse, json, string
from collections import Counter
import math
import os
from math import floor
import h5py as h5
import numpy as np
import pprint
import xml.etree.ElementTree as ET
"""
A script for generating an hdf5 ROIDB from the VisualGenome dataset
"""
def preprocess_object_labels(data, alias_dict={}):
for img in data:
for obj in img['objects']:
obj['ids'] = [obj['object_id']]
names = []
for name in obj['names']:
label = sentence_preprocess(name)
if label in alias_dict:
label = alias_dict[label]
names.append(label)
obj['names'] = names
def preprocess_predicates(data, alias_dict={}):
for img in data:
for relation in img['relationships']:
predicate = sentence_preprocess(relation['predicate'])
if predicate in alias_dict:
predicate = alias_dict[predicate]
relation['predicate'] = predicate
def extract_object_token(data, obj_list=[], verbose=True):
""" Builds a set that contains the object names. Filters infrequent tokens. """
token_counter = Counter()
for img in data:
for region in img['objects']:
for name in region['names']:
if not obj_list or name in obj_list:
token_counter.update([name])
tokens = set()
# pick top N tokens
token_counter_return = {}
for token, count in token_counter.most_common():
tokens.add(token)
token_counter_return[token] = count
if verbose:
print(('Keeping %d / %d objects'
% (len(tokens), len(token_counter))))
return tokens, token_counter_return
def extract_predicate_token(data, pred_list=[], verbose=True):
""" Builds a set that contains the relationship predicates. Filters infrequent tokens. """
token_counter = Counter()
total = 0
for img in data:
for relation in img['relationships']:
predicate = relation['predicate']
if not pred_list or predicate in pred_list:
token_counter.update([predicate])
total += 1
tokens = set()
token_counter_return = {}
for token, count in token_counter.most_common():
tokens.add(token)
token_counter_return[token] = count
if verbose:
print(('Keeping %d / %d predicates with enough instances'
% (len(tokens), len(token_counter))))
return tokens, token_counter_return
def merge_duplicate_boxes(data):
def IoU(b1, b2):
if b1[2] <= b2[0] or \
b1[3] <= b2[1] or \
b1[0] >= b2[2] or \
b1[1] >= b2[3]:
return 0
b1b2 = np.vstack([b1,b2])
minc = np.min(b1b2, 0)
maxc = np.max(b1b2, 0)
union_area = (maxc[2]-minc[0])*(maxc[3]-minc[1])
int_area = (minc[2]-maxc[0])*(minc[3]-maxc[1])
return float(int_area)/float(union_area)
def to_x1y1x2y2(obj):
x1 = obj['x']
y1 = obj['y']
x2 = obj['x'] + obj['w']
y2 = obj['y'] + obj['h']
return np.array([x1, y1, x2, y2], dtype=np.int32)
def inside(b1, b2):
return b1[0] >= b2[0] and b1[1] >= b2[1] \
and b1[2] <= b2[2] and b1[3] <= b2[3]
def overlap(obj1, obj2):
b1 = to_x1y1x2y2(obj1)
b2 = to_x1y1x2y2(obj2)
iou = IoU(b1, b2)
if all(b1 == b2) or iou > 0.9: # consider as the same box
return 1
elif (inside(b1, b2) or inside(b2, b1))\
and obj1['names'][0] == obj2['names'][0]: # same object inside the other
return 2
elif iou > 0.6 and obj1['names'][0] == obj2['names'][0]: # multiple overlapping same object
return 3
else:
return 0 # no overlap
num_merged = {1:0, 2:0, 3:0}
print('merging boxes..')
for img in data:
# mark objects to be merged and save their ids
objs = img['objects']
num_obj = len(objs)
for i in range(num_obj):
if 'M_TYPE' in objs[i]: # has been merged
continue
merged_objs = [] # circular refs, but fine
for j in range(i+1, num_obj):
if 'M_TYPE' in objs[j]: # has been merged
continue
overlap_type = overlap(objs[i], objs[j])
if overlap_type > 0:
objs[j]['M_TYPE'] = overlap_type
merged_objs.append(objs[j])
objs[i]['mobjs'] = merged_objs
# merge boxes
filtered_objs = []
merged_num_obj = 0
for obj in objs:
if 'M_TYPE' not in obj:
ids = [obj['object_id']]
dims = [to_x1y1x2y2(obj)]
prominent_type = 1
for mo in obj['mobjs']:
ids.append(mo['object_id'])
obj['names'].extend(mo['names'])
dims.append(to_x1y1x2y2(mo))
if mo['M_TYPE'] > prominent_type:
prominent_type = mo['M_TYPE']
merged_num_obj += len(ids)
obj['ids'] = ids
mdims = | np.zeros(4) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 08:40:18 2020
@author: xie(508)
"""
# Electronic band structures and optiacal properities of type-2 superlattice photodectors with interfacial effect. Optical Express. Vol,20,No,2(2012).PengFei Qiao.
import re
import sys
import csv
import numpy as np
#########################################
def _init():
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key, defValue = None):
try:
return _global_dict[key]
except KeyError:
return key+" Error"
def MaterParam(MatersCsv):
Maters = {}
Params = {}
material = ""
with open (MatersCsv,'r') as csvfile:
Info = csv.reader(csvfile,delimiter = ",")
for lines in Info:
if len(lines) == 1:
if len(material) != 0:
Maters[material[0]] = Params
Params = {}
material = re.findall(r'[A-z]+',lines[0])
elif len(lines) == 0:
continue
else:
for item in lines:
Temp_0 = re.split(r'\s+',item)
for i,Temp_1 in enumerate(Temp_0):
if len(Temp_1) == 0:
del Temp_0[i]
if len(Temp_0) == 2:
Params[Temp_0[0]] = float(Temp_0[1])
return Maters
#########################################
global a0; global hbar; global m0; global q0;
q0 = 1.602176462e-19 #electron charge(C); unit energy in eV(J)
hbar = 1.05457266e-34/q0 # reduced Planck const(eV*s)
m0 = (9.10956e-31)*(1e-18)/q0 #kg;ev*s^2/nm^2
def _initLoop():
global _global_dictloop
_global_dictloop = {}
def set_valueloop(key,value):
_global_dictloop[key] = value
def get_valueloop(key,defValue = None):
try:
return _global_dictloop[key]
except KeyError:
return key+" Error"
def ShowPara(material):
ac = get_value('ac_'+material); av = get_value("av_"+material)
b = get_value("b_"+material); C11 = get_value("C11_"+material)
C12 = get_value("C12_"+material); Eg0 = get_value("Eg0_"+material)
Eg77 = get_value("Eg77_"+material); a = get_value("Lattice_"+material)
mc = get_value("mc_"+material); mcc = get_value("mcc_"+material)
r1 = get_value("r1_"+material); r2 = get_value("r2_"+material)
r3 = get_value("r3_"+material); r1c = get_value("r1c_"+material)
r2c = get_value("r2c_"+material); r3c = get_value("r3c_"+material)
Ep = get_value("Ep_"+material); spin = get_value("spin_"+material)
VBO = get_value("VBO_"+material);
Ev = get_value('Ev_'+material); Ec = get_value("Ec_"+material)
Ae = get_value("Ae_"+material); At = get_value("At_"+material)
Pe = get_value("Pe_"+material); Pt = get_value("Pt_"+material)
Qe = get_value("Qe_"+material); Qt = get_value("Qt_"+material)
Rp = get_value("Rp_"+material); Sp = get_value("Sp_"+material)
Vp = get_value("Vp_"+material); U = get_value("U_"+material)
spin = get_value("spin_"+material); Pcv = get_value('Pcv_'+material)
HamBulk = get_value("HamBulk_"+material)
BulkH = get_value("BulkH_"+material)
Para_bulk = {}
Para_bulk['ac'] = ac; Para_bulk['av'] = av; Para_bulk['b'] = b
Para_bulk['C11'] = C11; Para_bulk['C12'] = C12; Para_bulk['Eg0'] = Eg0
Para_bulk['Eg77'] = Eg77; Para_bulk['a'] = a; Para_bulk['mc'] = mc;
Para_bulk['mcc'] = mcc; Para_bulk['r1'] = r1; Para_bulk['r2'] = r2;
Para_bulk['r3'] = r3; Para_bulk['r1c'] = r1c; Para_bulk['r2c'] = r2c;
Para_bulk['r3c'] = r3c; Para_bulk['Ep'] = Ep; Para_bulk['spin'] = spin;
Para_bulk['VBO'] = VBO;
return Para_bulk
def MatSetVal(material):
# =============================================================================
ac = get_value('ac_'+material); av = get_value("av_"+material)
b = get_value("b_"+material); C11 = get_value("C11_"+material)
C12 = get_value("C12_"+material); Eg0 = get_value("Eg0_"+material)
Eg77 = get_value("Eg77_"+material); a = get_value("Lattice_"+material)
mc = get_value("mc_"+material); mcc = get_value("mcc_"+material)
r1 = get_value("r1_"+material); r2 = get_value("r2_"+material)
r3 = get_value("r3_"+material); r1c = get_value("r1c_"+material)
r2c = get_value("r2c_"+material); r3c = get_value("r3c_"+material)
Ep = get_value("Ep_"+material); spin = get_value("spin_"+material)
VBO = get_value("VBO_"+material); a0 = get_value("Lattice_GaSb")
# =============================================================================
Ev = VBO
Ec = Ev + Eg0
exx = (a0-a)/a; eyy = exx; ezz = (-2*C12/C11)*exx
Ae = ac*(exx+eyy+ezz); At = hbar*hbar/(2*mcc)
Pe = -av*(exx+eyy+ezz); Pt = hbar*hbar*r1c/(2*m0)
Qe = -b/2*(exx+eyy-2*ezz); Qt = hbar*hbar*r2c/(2*m0)
Pcv = np.sqrt(Ep*hbar*hbar/(2*m0))
Rp = -(hbar*hbar/(2*m0))*np.sqrt(3)*((r2c+r3c)/2)
Sp = (hbar*hbar/(2*m0))*2*np.sqrt(3)*r3c
Vp = Pcv/np.sqrt(6)
U = Pcv/np.sqrt(3)
# =============================================================================
set_value('Ev_'+material,Ev); set_value('Ec_'+material,Ec)
set_value('exx_'+material,exx); set_value('eyy_'+material,eyy)
set_value('ezz_'+material,ezz); set_value('Ae_'+material,Ae)
set_value('At_'+material,At); set_value('Pe_'+material,Pe)
set_value('Pt_'+material,Pt); set_value('Qe_'+material,Qe)
set_value('Qt_'+material,Qt); set_value('Rp_'+material,Rp)
set_value('Sp_'+material,Sp); set_value('Vp_'+material,Vp)
set_value('U_'+material,U); set_value('Pcv_'+material,Pcv)
# =============================================================================
def Ham(kt,material):
# =============================================================================
Ev = get_value('Ev_'+material); Ec = get_value("Ec_"+material)
Ae = get_value("Ae_"+material); At = get_value("At_"+material)
Pe = get_value("Pe_"+material); Pt = get_value("Pt_"+material)
Qe = get_value("Qe_"+material); Qt = get_value("Qt_"+material)
Rp = get_value("Rp_"+material); Sp = get_value("Sp_"+material);
Vp = get_value("Vp_"+material); U = get_value("U_"+material);
spin = get_value("spin_"+material)
# =============================================================================
MatrixU0 = np.zeros((4,4),dtype=complex)
MatrixU0[0] = [Ec+Ae+At*kt*kt, -np.sqrt(3)*Vp*kt, -Vp*kt, -np.sqrt(2)*Vp*kt]
MatrixU0[1] = [np.conj(MatrixU0[0,1]), Ev-Pe-Pt*kt*kt-Qe-Qt*kt*kt, Rp*kt*kt, np.sqrt(2)*Rp*kt*kt]
MatrixU0[2] = [np.conj(MatrixU0[0,2]), np.conj(MatrixU0[1,2]), Ev-Pe-Pt*kt*kt+Qe+Qt*kt*kt, -np.sqrt(2)*(Qe+Qt*kt*kt)]
MatrixU0[3] = [np.conj(MatrixU0[0,3]), np.conj(MatrixU0[1,3]), np.conj(MatrixU0[2,3]), Ev-Pe-Pt*kt*kt-spin]
MatrixU1 = np.zeros((4,4),dtype=complex)
MatrixU1[0] = [0, 0, complex(0,np.sqrt(2)*U), complex(0,-U)]
MatrixU1[1] = [np.conj(MatrixU1[0,1]), 0, complex(0,Sp*kt), complex(0,-Sp*kt/np.sqrt(2))]
MatrixU1[2] = [np.conj(MatrixU1[0,2]), np.conj(MatrixU1[1,2]), 0, complex(0,-Sp*kt*np.sqrt(3/2))]
MatrixU1[3] = [np.conj(MatrixU1[0,3]), np.conj(MatrixU1[1,3]), | np.conj(MatrixU1[2,3]) | numpy.conj |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__all__ = ['Xray', 'XrayKin', 'XrayDyn', 'XrayDynMag']
__docformat__ = 'restructuredtext'
from .simulation import Simulation
from ..structures.layers import AmorphousLayer, UnitCell
from .. import u, Q_
from ..helpers import make_hash_md5, m_power_x, m_times_n, finderb
import numpy as np
import scipy.constants as constants
from time import time
from os import path
from tqdm.notebook import trange
r_0 = constants.physical_constants['classical electron radius'][0]
class Xray(Simulation):
r"""Xray
Base class for all X-ray scattering simulations.
Args:
S (Structure): sample to do simulations with.
force_recalc (boolean): force recalculation of results.
Keyword Args:
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
Attributes:
S (Structure): sample structure to calculate simulations on.
force_recalc (boolean): force recalculation of results.
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
energy (ndarray[float]): photon energies :math:`E` of scattering light
wl (ndarray[float]): wavelengths :math:`\lambda` of scattering light
k (ndarray[float]): wavenumber :math:`k` of scattering light
theta (ndarray[float]): incidence angles :math:`\theta` of scattering
light
qz (ndarray[float]): scattering vector :math:`q_z` of scattering light
polarizations (dict): polarization states and according names.
pol_in_state (int): incoming polarization state as defined in
polarizations dict.
pol_out_state (int): outgoing polarization state as defined in
polarizations dict.
pol_in (float): incoming polarization factor (can be a complex ndarray).
pol_out (float): outgoing polarization factor (can be a complex ndarray).
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
self._energy = np.array([])
self._wl = np.array([])
self._k = np.array([])
self._theta = np.zeros([1, 1])
self._qz = np.zeros([1, 1])
self.polarizations = {0: 'unpolarized',
1: 'circ +',
2: 'circ -',
3: 'sigma',
4: 'pi'}
self.pol_in_state = 3 # sigma
self.pol_out_state = 0 # no-analyzer
self.pol_in = None
self.pol_out = None
self.set_polarization(self.pol_in_state, self.pol_out_state)
def __str__(self, output=[]):
"""String representation of this class"""
output = [['energy', self.energy[0] if np.size(self.energy) == 1 else
'{:f} .. {:f}'.format(np.min(self.energy), np.max(self.energy))],
['wavelength', self.wl[0] if np.size(self.wl) == 1 else
'{:f} .. {:f}'.format(np.min(self.wl), np.max(self.wl))],
['wavenumber', self.k[0] if np.size(self.k) == 1 else
'{:f} .. {:f}'.format(np.min(self.k), np.max(self.k))],
['theta', self.theta[0] if np.size(self.theta) == 1 else
'{:f} .. {:f}'.format(np.min(self.theta), np.max(self.theta))],
['q_z', self.qz[0] if np.size(self.qz) == 1 else
'{:f} .. {:f}'.format(np.min(self.qz), np.max(self.qz))],
['incoming polarization', self.polarizations[self.pol_in_state]],
['analyzer polarization', self.polarizations[self.pol_out_state]],
] + output
return super().__str__(output)
def set_incoming_polarization(self, pol_in_state):
"""set_incoming_polarization
Must be overwritten by child classes.
Args:
pol_in_state (int): incoming polarization state id.
"""
raise NotImplementedError
def set_outgoing_polarization(self, pol_out_state):
"""set_outgoing_polarization
Must be overwritten by child classes.
Args:
pol_out_state (int): outgoing polarization state id.
"""
raise NotImplementedError
def set_polarization(self, pol_in_state, pol_out_state):
"""set_polarization
Sets the incoming and analyzer (outgoing) polarization.
Args:
pol_in_state (int): incoming polarization state id.
pol_out_state (int): outgoing polarization state id.
"""
self.set_incoming_polarization(pol_in_state)
self.set_outgoing_polarization(pol_out_state)
def get_hash(self, strain_vectors, **kwargs):
"""get_hash
Calculates an unique hash given by the energy :math:`E`,
:math:`q_z` range, polarization states and the ``strain_vectors`` as
well as the sample structure hash for relevant x-ray parameters.
Optionally, part of the strain_map is used.
Args:
strain_vectors (dict{ndarray[float]}): reduced strains per unique
layer.
**kwargs (ndarray[float]): spatio-temporal strain profile.
Returns:
hash (str): unique hash.
"""
param = [self.pol_in_state, self.pol_out_state, self._qz, self._energy, strain_vectors]
if 'strain_map' in kwargs:
strain_map = kwargs.get('strain_map')
if np.size(strain_map) > 1e6:
strain_map = strain_map.flatten()[0:1000000]
param.append(strain_map)
return self.S.get_hash(types='xray') + '_' + make_hash_md5(param)
def get_polarization_factor(self, theta):
r"""get_polarization_factor
Calculates the polarization factor :math:`P(\vartheta)` for a given
incident angle :math:`\vartheta` for the case of `s`-polarization
(pol = 0), or `p`-polarization (pol = 1), or unpolarized X-rays
(pol = 0.5):
.. math::
P(\vartheta) = \sqrt{(1-\mbox{pol}) + \mbox{pol} \cdot \cos(2\vartheta)}
Args:
theta (ndarray[float]): incidence angle.
Returns:
P (ndarray[float]): polarization factor.
"""
return np.sqrt((1-self.pol_in) + self.pol_in*np.cos(2*theta)**2)
def update_experiment(self, caller):
r"""update_experiment
Recalculate energy, wavelength, and wavevector as well as theta
and the scattering vector in case any of these has changed.
.. math::
\lambda & = \frac{hc}{E} \\
E & = \frac{hc}{\lambda} \\
k & = \frac{2\pi}{\lambda} \\
\vartheta & = \arcsin{\frac{\lambda q_z}{4\pi}} \\
q_z & = 2k \sin{\vartheta}
Args:
caller (str): name of calling method.
"""
from scipy import constants
if caller != 'energy':
if caller == 'wl': # calc energy from wavelength
self._energy = Q_((constants.h*constants.c)/self._wl, 'J').to('eV').magnitude
elif caller == 'k': # calc energy von wavevector
self._energy = \
Q_((constants.h*constants.c)/(2*np.pi/self._k), 'J').to('eV').magnitude
if caller != 'wl':
if caller == 'energy': # calc wavelength from energy
self._wl = (constants.h*constants.c)/self.energy.to('J').magnitude
elif caller == 'k': # calc wavelength from wavevector
self._wl = 2*np.pi/self._k
if caller != 'k':
if caller == 'energy': # calc wavevector from energy
self._k = 2*np.pi/self._wl
elif caller == 'wl': # calc wavevector from wavelength
self._k = 2*np.pi/self._wl
if caller != 'theta':
self._theta = np.arcsin(np.outer(self._wl, self._qz[0, :])/np.pi/4)
if caller != 'qz':
self._qz = np.outer(2*self._k, np.sin(self._theta[0, :]))
@property
def energy(self):
return Q_(self._energy, u.eV)
@energy.setter
def energy(self, energy):
self._energy = np.array(energy.to('eV').magnitude, ndmin=1)
self.update_experiment('energy')
@property
def wl(self):
return Q_(self._wl, u.m).to('nm')
@wl.setter
def wl(self, wl):
self._wl = np.array(wl.to_base_units().magnitude, ndmin=1)
self.update_experiment('wl')
@property
def k(self):
return Q_(self._k, 1/u.m).to('1/nm')
@k.setter
def k(self, k):
self._k = np.array(k.to_base_units().magnitude, ndmin=1)
self.update_experiment('k')
@property
def theta(self):
return Q_(self._theta, u.rad).to('deg')
@theta.setter
def theta(self, theta):
self._theta = np.array(theta.to_base_units().magnitude, ndmin=1)
if self._theta.ndim < 2:
self._theta = np.tile(self._theta, (len(self._energy), 1))
self.update_experiment('theta')
@property
def qz(self):
return Q_(self._qz, 1/u.m).to('1/nm')
@qz.setter
def qz(self, qz):
self._qz = np.array(qz.to_base_units().magnitude, ndmin=1)
if self._qz.ndim < 2:
self._qz = np.tile(self._qz, (len(self._energy), 1))
self.update_experiment('qz')
class XrayKin(Xray):
r"""XrayKin
Kinetic X-ray scattering simulations.
Args:
S (Structure): sample to do simulations with.
force_recalc (boolean): force recalculation of results.
Keyword Args:
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
Attributes:
S (Structure): sample structure to calculate simulations on.
force_recalc (boolean): force recalculation of results.
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
energy (ndarray[float]): photon energies :math:`E` of scattering light
wl (ndarray[float]): wavelengths :math:`\lambda` of scattering light
k (ndarray[float]): wavenumber :math:`k` of scattering light
theta (ndarray[float]): incidence angles :math:`\theta` of scattering
light
qz (ndarray[float]): scattering vector :math:`q_z` of scattering light
polarizations (dict): polarization states and according names.
pol_in_state (int): incoming polarization state as defined in
polarizations dict.
pol_out_state (int): outgoing polarization state as defined in
polarizations dict.
pol_in (float): incoming polarization factor (can be a complex ndarray).
pol_out (float): outgoing polarization factor (can be a complex ndarray).
References:
.. [9] <NAME> (1990). *X-ray diffraction*.
New York: Dover Publications
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
def __str__(self):
"""String representation of this class"""
class_str = 'Kinematical X-Ray Diffraction simulation properties:\n\n'
class_str += super().__str__()
return class_str
def set_incoming_polarization(self, pol_in_state):
"""set_incoming_polarization
Sets the incoming polarization factor for sigma, pi, and unpolarized
polarization.
Args:
pol_in_state (int): incoming polarization state id.
"""
self.pol_in_state = pol_in_state
if (self.pol_in_state == 1): # circ +
self.disp_message('incoming polarizations {:s} not implemented'.format(
self.polarizations[self.pol_in_state]))
self.set_incoming_polarization(3)
return
elif (self.pol_in_state == 2): # circ-
self.disp_message('incoming polarizations {:s} not implemented'.format(
self.polarizations[self.pol_in_state]))
self.set_incoming_polarization(3)
return
elif (self.pol_in_state == 3): # sigma
self.pol_in = 0
elif (self.pol_in_state == 4): # pi
self.pol_in = 1
else: # unpolarized
self.pol_in_state = 0
self.pol_in = 0.5
self.disp_message('incoming polarizations set to: {:s}'.format(
self.polarizations[self.pol_in_state]))
def set_outgoing_polarization(self, pol_out_state):
"""set_outgoing_polarization
For kinematical X-ray simulation only "no analyzer polarization" is allowed.
Args:
pol_out_state (int): outgoing polarization state id.
"""
self.pol_out_state = pol_out_state
if self.pol_out_state == 0:
self.disp_message('analyzer polarizations set to: {:s}'.format(
self.polarizations[self.pol_out_state]))
else:
self.disp_message('XrayDyn does only allow for NO analyzer polarizations')
self.set_outgoing_polarization(0)
@u.wraps(None, (None, 'eV', 'm**-1', None), strict=False)
def get_uc_atomic_form_factors(self, energy, qz, uc):
""" get_uc_atomic_form_factors
Returns the energy- and angle-dependent atomic form factors
:math: `f(q_z, E)` of all atoms in the unit cell as a vector.
Args:
energy (float, Quantity): photon energy.
qz (ndarray[float, Quantity]): scattering vectors.
uc (UnitCell): unit cell object.
Returns:
f (ndarray[complex]): unit cell atomic form factors.
"""
if (not np.isscalar(energy)) and (not isinstance(energy, object)):
raise TypeError('Only scalars or Quantities are allowed for the energy!')
f = np.zeros([uc.num_atoms, len(qz)], dtype=complex)
for i in range(uc.num_atoms):
f[i, :] = uc.atoms[i][0].get_cm_atomic_form_factor(energy, qz)
return f
@u.wraps(None, (None, 'eV', 'm**-1', None, None), strict=False)
def get_uc_structure_factor(self, energy, qz, uc, strain=0):
r"""get_uc_structure_factor
Calculates the energy-, angle-, and strain-dependent structure factor
.. math: `S(E,q_z,\epsilon)` of the unit cell:
.. math::
S(E,q_z,\epsilon) = \sum_i^N f_i \, \exp(-i q_z z_i(\epsilon))
Args:
energy (float, Quantity): photon energy.
qz (ndarray[float, Quantity]): scattering vectors.
uc (UnitCell): unit cell object.
strain (float, optional): strain of the unit cell 0 .. 1.
Defaults to 0.
Returns:
S (ndarray[complex]): unit cell structure factor.
"""
if (not np.isscalar(energy)) and (not isinstance(energy, object)):
raise TypeError('Only scalars or Quantities for the energy are allowed!')
if np.isscalar(qz):
qz = np.array([qz])
S = np.sum(self.get_uc_atomic_form_factors(energy, qz, uc)
* np.exp(1j * uc._c_axis
* np.outer(uc.get_atom_positions(strain), qz)), 0)
return S
def homogeneous_reflectivity(self, strains=0):
r"""homogeneous_reflectivity
Calculates the reflectivity :math:`R = E_p^t\,(E_p^t)^*` of a
homogeneous sample structure as well as the reflected field
:math:`E_p^N` of all substructures.
Args:
strains (ndarray[float], optional): strains of each sub-structure
0 .. 1. Defaults to 0.
Returns:
(tuple):
- *R (ndarray[complex])* - homogeneous reflectivity.
- *A (ndarray[complex])* - reflected fields of sub-structures.
"""
if strains == 0:
strains = np.zeros([self.S.get_number_of_sub_structures(), 1])
t1 = time()
self.disp_message('Calculating _homogenous_reflectivity_ ...')
# get the reflected field of the structure for each energy
R = np.zeros_like(self._qz)
for i, energy in enumerate(self._energy):
qz = self._qz[i, :]
theta = self._theta[i, :]
Ept, A = self.homogeneous_reflected_field(self.S, energy, qz, theta, strains)
# calculate the real reflectivity from Ef
R[i, :] = np.real(Ept*np.conj(Ept))
self.disp_message('Elapsed time for _homogenous_reflectivity_: {:f} s'.format(time()-t1))
return R, A
@u.wraps((None, None), (None, None, 'eV', 'm**-1', 'rad', None), strict=False)
def homogeneous_reflected_field(self, S, energy, qz, theta, strains=0):
r"""homogeneous_reflected_field
Calculates the reflected field :math:`E_p^t` of the whole sample
structure as well as for each sub-structure (:math:`E_p^N`). The
reflected wave field :math:`E_p` from a single layer of unit cells at
the detector is calculated according to Ref. [9]_:
.. math::
E_p = \frac{i}{\varepsilon_0}\frac{e^2}{m_e c_0^2}
\frac{P(\vartheta) S(E,q_z,\epsilon)}{A q_z}
For the case of :math:`N` similar planes of unit cells one can write:
.. math::
E_p^N = \sum_{n=0}^{N-1} E_p \exp(i q_z z n )
where :math:`z` is the distance between the planes (c-axis). The above
equation can be simplified to:
.. math::
E_p^N = E_p \psi(q_z,z,N)
introducing the interference function
.. math::
\psi(q_z,z,N) & = \sum_{n=0}^{N-1} \exp(i q_z z n) \\
& = \frac{1- \exp(i q_z z N)}{1- \exp(i q_z z)}
The total reflected wave field of all :math:`i = 1\ldots M` homogeneous
layers (:math:`E_p^t`) is the phase-correct summation of all individual
:math:`E_p^{N,i}`:
.. math::
E_p^t = \sum_{i=1}^M E_p^{N,i} \exp(i q_z Z_i)
where :math:`Z_i = \sum_{j=1}^{i-1} N_j z_j` is the distance of the
:math:`i`-th layer from the surface.
Args:
S (Structure, UnitCell): structure or sub-structure to calculate on.
energy (float, Quantity): photon energy.
qz (ndarray[float, Quantity]): scattering vectors.
theta (ndarray[float, Quantity]): scattering incidence angle.
strains (ndarray[float], optional): strains of each sub-structure
0 .. 1. Defaults to 0.
Returns:
(tuple):
- *Ept (ndarray[complex])* - reflected field.
- *A (ndarray[complex])* - reflected fields of substructures.
"""
# if no strains are given we assume no strain (1)
if np.isscalar(strains) and strains == 0:
strains = np.zeros([self.S.get_number_of_sub_structures(), 1])
N = len(qz) # nb of qz
Ept = np.zeros([1, N]) # total reflected field
Z = 0 # total length of the substructure from the surface
A = list([0, 2]) # cell matrix of reflected fields EpN of substructures
strainCounter = 0 # the is the index of the strain vector if applied
# traverse substructures
for sub_structures in S.sub_structures:
if isinstance(sub_structures[0], UnitCell):
# the substructure is an unit cell and we can calculate
# Ep directly
Ep = self.get_Ep(energy, qz, theta, sub_structures[0], strains[strainCounter])
z = sub_structures[0]._c_axis
strainCounter = strainCounter+1
elif isinstance(sub_structures[0], AmorphousLayer):
raise ValueError('The substructure cannot be an AmorphousLayer!')
else:
# the substructure is a structure, so we do a recursive
# call of this method
d = sub_structures[0].get_number_of_sub_structures()
Ep, temp = self.homogeneous_reflected_field(
sub_structures[0], energy, qz, theta,
strains[strainCounter:(strainCounter + d)])
z = sub_structures[0].get_length().magnitude
strainCounter = strainCounter + d
A.append([temp, [sub_structures[0].name + ' substructures']])
A.append([Ep, '{:d}x {:s}'.format(1, sub_structures[0].name)])
# calculate the interference function for N repetitions of
# the substructure with the length z
psi = self.get_interference_function(qz, z, sub_structures[1])
# calculate the reflected field for N repetitions of
# the substructure with the length z
EpN = Ep * psi
# remember the result
A.append([EpN, '{:d}x {:s}'.format(sub_structures[1], sub_structures[0].name)])
# add the reflected field of the current substructure
# phase-correct to the already calculated substructures
Ept = Ept+(EpN*np.exp(1j*qz*Z))
# update the total length $Z$ of the already calculated
# substructures
Z = Z + z*sub_structures[1]
# add static substrate to kinXRD
if S.substrate != []:
temp, temp2 = self.homogeneous_reflected_field(S.substrate, energy, qz, theta)
A.append([temp2, 'static substrate'])
Ept = Ept+(temp*np.exp(1j*qz*Z))
return Ept, A
@u.wraps(None, (None, 'm**-1', 'm', None), strict=False)
def get_interference_function(self, qz, z, N):
r"""get_interference_function
Calculates the interference function for :math:`N` repetitions of the
structure with the length :math:`z`:
.. math::
\psi(q_z,z,N) & = \sum_{n=0}^{N-1} \exp(i q_z z n) \\
& = \frac{1- \exp(i q_z z N)}{1- \exp(i q_z z)}
Args:
qz (ndarray[float, Quantity]): scattering vectors.
z (float): thickness/length of the structure.
N (int): repetitions of the structure.
Returns:
psi (ndarray[complex]): interference function.
"""
psi = (1-np.exp(1j*qz*z*N)) / (1 - np.exp(1j*qz*z))
return psi
@u.wraps(None, (None, 'eV', 'm**-1', 'rad', None, None), strict=False)
def get_Ep(self, energy, qz, theta, uc, strain):
r"""get_Ep
Calculates the reflected field :math:`E_p` for one unit cell
with a given strain :math:`\epsilon`:
.. math::
E_p = \frac{i}{\varepsilon_0} \frac{e^2}{m_e c_0^2}
\frac{P S(E,q_z,\epsilon)}{A q_z}
with :math:`e` as electron charge, :math:`m_e` as electron
mass, :math:`c_0` as vacuum light velocity,
:math:`\varepsilon_0` as vacuum permittivity,
:math:`P` as polarization factor and :math:`S(E,q_z,\sigma)`
as energy-, angle-, and strain-dependent unit cell structure
factor.
Args:
energy (float, Quantity): photon energy.
qz (ndarray[float, Quantity]): scattering vectors.
theta (ndarray[float, Quantity]): scattering incidence angle.
uc (UnitCell): unit cell object.
strain (float, optional): strain of the unit cell 0 .. 1.
Defaults to 0.
Returns:
Ep (ndarray[complex]): reflected field.
"""
import scipy.constants as c
Ep = 1j/c.epsilon_0*c.elementary_charge**2/c.electron_mass/c.c**2 \
* (self.get_polarization_factor(theta)
* self.get_uc_structure_factor(energy, qz, uc, strain)
/ uc._area) / qz
return Ep
class XrayDyn(Xray):
r"""XrayDyn
Dynamical X-ray scattering simulations.
Args:
S (Structure): sample to do simulations with.
force_recalc (boolean): force recalculation of results.
Keyword Args:
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
Attributes:
S (Structure): sample structure to calculate simulations on.
force_recalc (boolean): force recalculation of results.
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
energy (ndarray[float]): photon energies :math:`E` of scattering light
wl (ndarray[float]): wavelengths :math:`\lambda` of scattering light
k (ndarray[float]): wavenumber :math:`k` of scattering light
theta (ndarray[float]): incidence angles :math:`\theta` of scattering
light
qz (ndarray[float]): scattering vector :math:`q_z` of scattering light
polarizations (dict): polarization states and according names.
pol_in_state (int): incoming polarization state as defined in
polarizations dict.
pol_out_state (int): outgoing polarization state as defined in
polarizations dict.
pol_in (float): incoming polarization factor (can be a complex ndarray).
pol_out (float): outgoing polarization factor (can be a complex ndarray).
last_atom_ref_trans_matrices (list): remember last result of
atom ref_trans_matrices to speed up calculation.
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
self.last_atom_ref_trans_matrices = {'atom_ids': [],
'hashes': [],
'H': []}
def __str__(self):
"""String representation of this class"""
class_str = 'Dynamical X-Ray Diffraction simulation properties:\n\n'
class_str += super().__str__()
return class_str
def set_incoming_polarization(self, pol_in_state):
"""set_incoming_polarization
Sets the incoming polarization factor for sigma, pi, and unpolarized
polarization.
Args:
pol_in_state (int): incoming polarization state id.
"""
self.pol_in_state = pol_in_state
if (self.pol_in_state == 1): # circ +
self.disp_message('incoming polarizations {:s} not implemented'.format(
self.polarizations[self.pol_in_state]))
self.set_incoming_polarization(3)
return
elif (self.pol_in_state == 2): # circ-
self.disp_message('incoming polarizations {:s} not implemented'.format(
self.polarizations[self.pol_in_state]))
self.set_incoming_polarization(3)
return
elif (self.pol_in_state == 3): # sigma
self.pol_in = 0
elif (self.pol_in_state == 4): # pi
self.pol_in = 1
else: # unpolarized
self.pol_in_state = 0
self.pol_in = 0.5
self.disp_message('incoming polarizations set to: {:s}'.format(
self.polarizations[self.pol_in_state]))
def set_outgoing_polarization(self, pol_out_state):
"""set_outgoing_polarization
For dynamical X-ray simulation only "no analyzer polarization" is allowed.
Args:
pol_out_state (int): outgoing polarization state id.
"""
self.pol_out_state = pol_out_state
if self.pol_out_state == 0:
self.disp_message('analyzer polarizations set to: {:s}'.format(
self.polarizations[self.pol_out_state]))
else:
self.disp_message('XrayDyn does only allow for NO analyzer polarizations')
self.set_outgoing_polarization(0)
def homogeneous_reflectivity(self, *args):
r"""homogeneous_reflectivity
Calculates the reflectivity :math:`R` of the whole sample structure
and the reflectivity-transmission matrices :math:`M_{RT}` for
each substructure. The reflectivity of the :math:`2\times 2`
matrices for each :math:`q_z` is calculates as follow:
.. math:: R = \left|M_{RT}^t(0,1)/M_{RT}^t(1,1)\right|^2
Args:
*args (ndarray[float], optional): strains for each substructure.
Returns:
(tuple):
- *R (ndarray[float])* - homogeneous reflectivity.
- *A (ndarray[complex])* - reflectivity-transmission matrices of
sub-structures.
"""
# if no strains are given we assume no strain
if len(args) == 0:
strains = np.zeros([self.S.get_number_of_sub_structures(), 1])
else:
strains = args[0]
t1 = time()
self.disp_message('Calculating _homogenous_reflectivity_ ...')
# get the reflectivity-transmission matrix of the structure
RT, A = self.homogeneous_ref_trans_matrix(self.S, strains)
# calculate the real reflectivity from the RT matrix
R = self.calc_reflectivity_from_matrix(RT)
self.disp_message('Elapsed time for _homogenous_reflectivity_: {:f} s'.format(time()-t1))
return R, A
def homogeneous_ref_trans_matrix(self, S, *args):
r"""homogeneous_ref_trans_matrix
Calculates the reflectivity-transmission matrices :math:`M_{RT}` of
the whole sample structure as well as for each sub-structure.
The reflectivity-transmission matrix of a single unit cell is
calculated from the reflection-transmission matrices :math:`H_i`
of each atom and the phase matrices between the atoms :math:`L_i`:
.. math:: M_{RT} = \prod_i H_i \ L_i
For :math:`N` similar layers of unit cells one can calculate the
:math:`N`-th power of the unit cell :math:`\left(M_{RT}\right)^N`.
The reflection-transmission matrix for the whole sample
:math:`M_{RT}^t` consisting of :math:`j = 1\ldots M`
sub-structures is then again:
.. math:: M_{RT}^t = \prod_{j=1}^M \left(M_{RT^,j}\right)^{N_j}
Args:
S (Structure, UnitCell): structure or sub-structure to calculate on.
*args (ndarray[float], optional): strains for each substructure.
Returns:
(tuple):
- *RT (ndarray[complex])* - reflectivity-transmission matrix.
- *A (ndarray[complex])* - reflectivity-transmission matrices of
sub-structures.
"""
# if no strains are given we assume no strain (1)
if len(args) == 0:
strains = np.zeros([S.get_number_of_sub_structures(), 1])
else:
strains = args[0]
# initialize
RT = np.tile(np.eye(2, 2)[np.newaxis, np.newaxis, :, :],
(np.size(self._qz, 0), np.size(self._qz, 1), 1, 1)) # ref_trans_matrix
A = [] # list of ref_trans_matrices of substructures
strainCounter = 0
# traverse substructures
for sub_structure in S.sub_structures:
if isinstance(sub_structure[0], UnitCell):
# the sub_structure is an unitCell
# calculate the ref-trans matrices for N unitCells
temp = m_power_x(self.get_uc_ref_trans_matrix(
sub_structure[0], strains[strainCounter]),
sub_structure[1])
strainCounter += 1
# remember the result
A.append([temp, '{:d}x {:s}'.format(sub_structure[1], sub_structure[0].name)])
elif isinstance(sub_structure[0], AmorphousLayer):
raise ValueError('The substructure cannot be an AmorphousLayer!')
else:
# its a structure
# make a recursive call
temp, temp2 = self.homogeneous_ref_trans_matrix(
sub_structure[0],
strains[strainCounter:(strainCounter
+ sub_structure[0].get_number_of_sub_structures())])
A.append([temp2, sub_structure[0].name + ' substructures'])
strainCounter = strainCounter+sub_structure[0].get_number_of_sub_structures()
A.append([temp, '{:d}x {:s}'.format(sub_structure[1], sub_structure[0].name)])
# calculate the ref-trans matrices for N sub structures
temp = m_power_x(temp, sub_structure[1])
A.append([temp, '{:d}x {:s}'.format(sub_structure[1], sub_structure[0].name)])
# multiply it to the output
RT = m_times_n(RT, temp)
# if a substrate is included add it at the end
if S.substrate != []:
temp, temp2 = self.homogeneous_ref_trans_matrix(S.substrate)
A.append([temp2, 'static substrate'])
RT = m_times_n(RT, temp)
return RT, A
def inhomogeneous_reflectivity(self, strain_map, strain_vectors, **kwargs):
"""inhomogeneous_reflectivity
Returns the reflectivity of an inhomogeneously strained sample
structure for a given ``strain_map`` in position and time, as well
as for a given set of possible strains for each unit cell in the
sample structure (``strain_vectors``).
If no reflectivity is saved in the cache it is caluclated.
Providing the ``calc_type`` for the calculation the corresponding
sub-routines for the reflectivity computation are called:
* ``parallel`` parallelization over the time steps utilizing
`Dask <https://dask.org/>`_
* ``distributed`` not implemented in Python, but should be possible
with `Dask <https://dask.org/>`_ as well
* ``sequential`` no parallelization at all
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
**kwargs:
- *calc_type (str)* - type of calculation.
- *dask_client (Dask.Client)* - Dask client.
- *job (Dask.job)* - Dask job.
- *num_workers (int)* - Dask number of workers.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
# create a hash of all simulation parameters
filename = 'inhomogeneous_reflectivity_dyn_' \
+ self.get_hash(strain_vectors, strain_map=strain_map) \
+ '.npz'
full_filename = path.abspath(path.join(self.cache_dir, filename))
# check if we find some corresponding data in the cache dir
if path.exists(full_filename) and not self.force_recalc:
# found something so load it
tmp = np.load(full_filename)
R = tmp['R']
self.disp_message('_inhomogeneous_reflectivity_ loaded from file:\n\t' + filename)
else:
t1 = time()
self.disp_message('Calculating _inhomogeneousReflectivity_ ...')
# parse the input arguments
if not isinstance(strain_map, np.ndarray):
raise TypeError('strain_map must be a numpy ndarray!')
if not isinstance(strain_vectors, list):
raise TypeError('strain_vectors must be a list!')
dask_client = kwargs.get('dask_client', [])
calc_type = kwargs.get('calc_type', 'sequential')
if calc_type not in ['parallel', 'sequential', 'distributed']:
raise TypeError('calc_type must be either _parallel_, '
'_sequential_, or _distributed_!')
job = kwargs.get('job')
num_workers = kwargs.get('num_workers', 1)
# All ref-trans matrices for all unique unitCells and for all
# possible strains, given by strainVectors, are calculated in
# advance.
RTM = self.get_all_ref_trans_matrices(strain_vectors)
# select the type of computation
if calc_type == 'parallel':
R = self.parallel_inhomogeneous_reflectivity(strain_map,
strain_vectors,
RTM,
dask_client)
elif calc_type == 'distributed':
R = self.distributed_inhomogeneous_reflectivity(strain_map,
strain_vectors,
job,
num_workers,
RTM)
else: # sequential
R = self.sequential_inhomogeneous_reflectivity(strain_map,
strain_vectors,
RTM)
self.disp_message('Elapsed time for _inhomogeneous_reflectivity_:'
' {:f} s'.format(time()-t1))
self.save(full_filename, {'R': R}, '_inhomogeneous_reflectivity_')
return R
def sequential_inhomogeneous_reflectivity(self, strain_map, strain_vectors, RTM):
"""sequential_inhomogeneous_reflectivity
Returns the reflectivity of an inhomogeneously strained sample structure
for a given ``strain_map`` in position and time, as well as for a given
set of possible strains for each unit cell in the sample structure
(``strain_vectors``). The function calculates the results sequentially
without parallelization.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
# initialize
M = np.size(strain_map, 0) # delay steps
R = np.zeros([M, np.size(self._qz, 0), np.size(self._qz, 1)])
if self.progress_bar:
iterator = trange(M, desc='Progress', leave=True)
else:
iterator = range(M)
# get the inhomogeneous reflectivity of the sample
# structure for each time step of the strain map
for i in iterator:
R[i, :, :] = self.calc_inhomogeneous_reflectivity(strain_map[i, :],
strain_vectors,
RTM)
return R
def parallel_inhomogeneous_reflectivity(self, strain_map, strain_vectors,
RTM, dask_client):
"""parallel_inhomogeneous_reflectivity
Returns the reflectivity of an inhomogeneously strained sample structure
for a given ``strain_map`` in position and time, as well as for a given
set of possible strains for each unit cell in the sample structure
(``strain_vectors``). The function parallelizes the calculation over the
time steps, since the results do not depend on each other.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
dask_client (Dask.Client): Dask client.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
if not dask_client:
raise ValueError('no dask client set')
from dask import delayed # to allow parallel computation
# initialize
res = []
M = np.size(strain_map, 0) # delay steps
N = np.size(self._qz, 0) # energy steps
K = np.size(self._qz, 1) # qz steps
R = np.zeros([M, N, K])
uc_indices, _, _ = self.S.get_layer_vectors()
# init unity matrix for matrix multiplication
RTU = np.tile(np.eye(2, 2)[np.newaxis, np.newaxis, :, :], (N, K, 1, 1))
# make RTM available for all works
remote_RTM = dask_client.scatter(RTM)
remote_RTU = dask_client.scatter(RTU)
remote_uc_indices = dask_client.scatter(uc_indices)
remote_strain_vectors = dask_client.scatter(strain_vectors)
# precalculate the substrate ref_trans_matrix if present
if self.S.substrate != []:
RTS, _ = self.homogeneous_ref_trans_matrix(self.S.substrate)
else:
RTS = RTU
# create dask.delayed tasks for all delay steps
for i in range(M):
RT = delayed(XrayDyn.calc_inhomogeneous_ref_trans_matrix)(
remote_uc_indices,
remote_RTU,
strain_map[i, :],
remote_strain_vectors,
remote_RTM)
RT = delayed(m_times_n)(RT, RTS)
Ri = delayed(XrayDyn.calc_reflectivity_from_matrix)(RT)
res.append(Ri)
# compute results
res = dask_client.compute(res, sync=True)
# reorder results to reflectivity matrix
for i in range(M):
R[i, :, :] = res[i]
return R
def distributed_inhomogeneous_reflectivity(self, strain_map, strain_vectors, RTM,
job, num_worker):
"""distributed_inhomogeneous_reflectivity
This is a stub. Not yet implemented in python.
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
job (Dask.job): Dask job.
num_workers (int): Dask number of workers.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
raise NotImplementedError
def calc_inhomogeneous_reflectivity(self, strains, strain_vectors, RTM):
r"""calc_inhomogeneous_reflectivity
Calculates the reflectivity of a inhomogeneous sample structure for
given ``strain_vectors`` for a single time step. Similar to the
homogeneous sample structure, the reflectivity of an unit cell is
calculated from the reflection-transmission matrices :math:`H_i` of
each atom and the phase matrices between the atoms :math:`L_i` in the
unit cell:
.. math:: M_{RT} = \prod_i H_i \ L_i
Since all layers are generally inhomogeneously strained we have to
traverse all individual unit cells (:math:`j = 1\ldots M`) in the
sample to calculate the total reflection-transmission matrix
:math:`M_{RT}^t`:
.. math:: M_{RT}^t = \prod_{j=1}^M M_{RT,j}
The reflectivity of the :math:`2\times 2` matrices for each :math:`q_z`
is calculates as follow:
.. math:: R = \left|M_{RT}^t(1,2)/M_{RT}^t(2,2)\right|^2
Args:
strain_map (ndarray[float]): spatio-temporal strain profile.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
Returns:
R (ndarray[float]): inhomogeneous reflectivity.
"""
# initialize ref_trans_matrix
N = np.shape(self._qz)[1] # number of q_z
M = np.shape(self._qz)[0] # number of energies
uc_indices, _, _ = self.S.get_layer_vectors()
# initialize ref_trans_matrix
RTU = np.tile(np.eye(2, 2)[np.newaxis, np.newaxis, :, :], (M, N, 1, 1))
RT = XrayDyn.calc_inhomogeneous_ref_trans_matrix(uc_indices,
RTU,
strains,
strain_vectors,
RTM)
# if a substrate is included add it at the end
if self.S.substrate != []:
RTS, _ = self.homogeneous_ref_trans_matrix(self.S.substrate)
RT = m_times_n(RT, RTS)
# calculate reflectivity from ref-trans matrix
R = self.calc_reflectivity_from_matrix(RT)
return R
@staticmethod
def calc_inhomogeneous_ref_trans_matrix(uc_indices, RT, strains,
strain_vectors, RTM):
r"""calc_inhomogeneous_ref_trans_matrix
Sub-function of :meth:`calc_inhomogeneous_reflectivity` and for
parallel computing (needs to be static) only for calculating the
total reflection-transmission matrix :math:`M_{RT}^t`:
.. math:: M_{RT}^t = \prod_{j=1}^M M_{RT,j}
Args:
uc_indices (ndarray[float]): unit cell indices.
RT (ndarray[complex]): reflection-transmission matrix.
strains (ndarray[float]): spatial strain profile for single time
step.
strain_vectors (list[ndarray[float]]): reduced strains per unique
layer.
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
Returns:
RT (ndarray[complex]): reflection-transmission matrix.
"""
# traverse all unit cells in the sample structure
for i, uc_index in enumerate(uc_indices):
# Find the ref-trans matrix in the RTM cell array for the
# current unit_cell ID and applied strain. Use the
# ``knnsearch`` function to find the nearest strain value.
strain_index = finderb(strains[i], strain_vectors[int(uc_index)])[0]
temp = RTM[int(uc_index)][strain_index]
if temp is not []:
RT = m_times_n(RT, temp)
else:
raise ValueError('RTM not found')
return RT
def get_all_ref_trans_matrices(self, *args):
"""get_all_ref_trans_matrices
Returns a list of all reflection-transmission matrices for each
unique unit cell in the sample structure for a given set of applied
strains for each unique unit cell given by the ``strain_vectors``
input. If this data was saved on disk before, it is loaded, otherwise
it is calculated.
Args:
args (list[ndarray[float]], optional): reduced strains per unique
layer.
Returns:
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
"""
if len(args) == 0:
strain_vectors = [np.array([1])]*self.S.get_number_of_unique_layers()
else:
strain_vectors = args[0]
# create a hash of all simulation parameters
filename = 'all_ref_trans_matrices_dyn_' \
+ self.get_hash(strain_vectors) + '.npz'
full_filename = path.abspath(path.join(self.cache_dir, filename))
# check if we find some corresponding data in the cache dir
if path.exists(full_filename) and not self.force_recalc:
# found something so load it
tmp = np.load(full_filename)
RTM = tmp['RTM']
self.disp_message('_all_ref_trans_matrices_dyn_ loaded from file:\n\t' + filename)
else:
# nothing found so calculate it and save it
RTM = self.calc_all_ref_trans_matrices(strain_vectors)
self.save(full_filename, {'RTM': RTM}, '_all_ref_trans_matrices_dyn_')
return RTM
def calc_all_ref_trans_matrices(self, *args):
"""calc_all_ref_trans_matrices
Calculates a list of all reflection-transmission matrices for each
unique unit cell in the sample structure for a given set of applied
strains to each unique unit cell given by the ``strain_vectors`` input.
Args::
args (list[ndarray[float]], optional): reduced strains per unique
layer.
Returns:
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
"""
t1 = time()
self.disp_message('Calculate all _ref_trans_matrices_ ...')
# initialize
uc_ids, uc_handles = self.S.get_unique_layers()
# if no strain_vectors are given we just do it for no strain (1)
if len(args) == 0:
strain_vectors = [np.array([1])]*len(uc_ids)
else:
strain_vectors = args[0]
# check if there are strains for each unique unitCell
if len(strain_vectors) is not len(uc_ids):
raise TypeError('The strain vector has not the same size '
'as number of unique unit cells')
# initialize ref_trans_matrices
RTM = []
# traverse all unique unit_cells
for i, uc in enumerate(uc_handles):
# traverse all strains in the strain_vector for this unique
# unit_cell
if not isinstance(uc, UnitCell):
raise ValueError('All layers must be UnitCells!')
temp = []
for strain in strain_vectors[i]:
temp.append(self.get_uc_ref_trans_matrix(uc, strain))
RTM.append(temp)
self.disp_message('Elapsed time for _ref_trans_matrices_: {:f} s'.format(time()-t1))
return RTM
def get_uc_ref_trans_matrix(self, uc, *args):
r"""get_uc_ref_trans_matrix
Returns the reflection-transmission matrix of a unit cell:
.. math:: M_{RT} = \prod_i H_i \ L_i
where :math:`H_i` and :math:`L_i` are the atomic reflection-
transmission matrix and the phase matrix for the atomic distances,
respectively.
Args:
uc (UnitCell): unit cell object.
args (float, optional): strain of unit cell.
Returns:
RTM (list[ndarray[complex]]): reflection-transmission matrices for
all given strains per unique layer.
"""
if len(args) == 0:
strain = 0 # set the default strain to 0
else:
strain = args[0]
M = len(self._energy) # number of energies
N = np.shape(self._qz)[1] # number of q_z
K = uc.num_atoms # number of atoms
# initialize matrices
RTM = np.tile(np.eye(2, 2)[np.newaxis, np.newaxis, :, :], (M, N, 1, 1))
# traverse all atoms of the unit cell
for i in range(K):
# Calculate the relative distance between the atoms.
# The relative position is calculated by the function handle
# stored in the atoms list as 3rd element. This
# function returns a relative postion dependent on the
# applied strain.
if i == (K-1): # its the last atom
del_dist = (strain+1)-uc.atoms[i][1](strain)
else:
del_dist = uc.atoms[i+1][1](strain)-uc.atoms[i][1](strain)
# get the reflection-transmission matrix and phase matrix
# from all atoms in the unit cell and multiply them
# together
RTM = m_times_n(RTM,
self.get_atom_ref_trans_matrix(uc.atoms[i][0],
uc._area,
uc._deb_wal_fac))
RTM = m_times_n(RTM,
self.get_atom_phase_matrix(del_dist*uc._c_axis))
return RTM
def get_atom_ref_trans_matrix(self, atom, area, deb_wal_fac):
r"""get_atom_ref_trans_matrix
Calculates the reflection-transmission matrix of an atom from dynamical
x-ray theory:
.. math::
H = \frac{1}{\tau} \begin{bmatrix}
\left(\tau^2 - \rho^2\right) & \rho \\
-\rho & 1
\end{bmatrix}
Args:
atom (Atom, AtomMixed): atom or mixed atom
area (float): area of the unit cell [m²]
deb_wal_fac (float): Debye-Waller factor for unit cell
Returns:
H (ndarray[complex]): reflection-transmission matrix
"""
# check for already calculated data
_hash = make_hash_md5([self._energy, self._qz, self.pol_in_state, self.pol_out_state,
area, deb_wal_fac])
try:
index = self.last_atom_ref_trans_matrices['atom_ids'].index(atom.id)
except ValueError:
index = -1
if (index >= 0) and (_hash == self.last_atom_ref_trans_matrices['hashes'][index]):
# These are the same X-ray parameters as last time so we
# can use the same matrix again for this atom
H = self.last_atom_ref_trans_matrices['H'][index]
else:
# These are new parameters so we have to calculate.
# Get the reflection-transmission-factors
rho = self.get_atom_reflection_factor(atom, area, deb_wal_fac)
tau = self.get_atom_transmission_factor(atom, area, deb_wal_fac)
# calculate the reflection-transmission matrix
H = np.zeros([np.shape(self._qz)[0], np.shape(self._qz)[1], 2, 2], dtype=np.cfloat)
H[:, :, 0, 0] = (1/tau)*(tau**2-rho**2)
H[:, :, 0, 1] = (1/tau)*(rho)
H[:, :, 1, 0] = (1/tau)*(-rho)
H[:, :, 1, 1] = (1/tau)
# remember this matrix for next use with the same
# parameters for this atom
if index >= 0:
self.last_atom_ref_trans_matrices['atom_ids'][index] = atom.id
self.last_atom_ref_trans_matrices['hashes'][index] = _hash
self.last_atom_ref_trans_matrices['H'][index] = H
else:
self.last_atom_ref_trans_matrices['atom_ids'].append(atom.id)
self.last_atom_ref_trans_matrices['hashes'].append(_hash)
self.last_atom_ref_trans_matrices['H'].append(H)
return H
def get_atom_reflection_factor(self, atom, area, deb_wal_fac):
r"""get_atom_reflection_factor
Calculates the reflection factor from dynamical x-ray theory:
.. math:: \rho = \frac{-i 4 \pi \ r_e \ f(E,q_z) \ P(\theta)
\exp(-M)}{q_z \ A}
- :math:`r_e` is the electron radius
- :math:`f(E,q_z)` is the energy and angle dispersive atomic
form factor
- :math:`P(q_z)` is the polarization factor
- :math:`A` is the area in :math:`x-y` plane on which the atom
is placed
- :math:`M = 0.5(\mbox{dbf} \ q_z)^2)` where
:math:`\mbox{dbf}^2 = \langle u^2\rangle` is the average
thermal vibration of the atoms - Debye-Waller factor
Args:
atom (Atom, AtomMixed): atom or mixed atom
area (float): area of the unit cell [m²]
deb_wal_fac (float): Debye-Waller factor for unit cell
Returns:
rho (complex): reflection factor
"""
rho = (-4j*np.pi*r_0
* atom.get_cm_atomic_form_factor(self._energy, self._qz)
* self.get_polarization_factor(self._theta)
* np.exp(-0.5*(deb_wal_fac*self._qz)**2))/(self._qz*area)
return rho
def get_atom_transmission_factor(self, atom, area, deb_wal_fac):
r"""get_atom_transmission_factor
Calculates the transmission factor from dynamical x-ray theory:
.. math:: \tau = 1 - \frac{i 4 \pi r_e f(E,0) \exp(-M)}{q_z A}
- :math:`r_e` is the electron radius
- :math:`f(E,0)` is the energy dispersive atomic form factor
(no angle correction)
- :math:`A` is the area in :math:`x-y` plane on which the atom
is placed
- :math:`M = 0.5(\mbox{dbf} \ q_z)^2` where
:math:`\mbox{dbf}^2 = \langle u^2\rangle` is the average
thermal vibration of the atoms - Debye-Waller factor
Args:
atom (Atom, AtomMixed): atom or mixed atom
area (float): area of the unit cell [m²]
deb_wal_fac (float): Debye-Waller factor for unit cell
Returns:
tau (complex): transmission factor
"""
tau = 1 - (4j*np.pi*r_0
* atom.get_cm_atomic_form_factor(self._energy, np.zeros_like(self._qz))
* np.exp(-0.5*(deb_wal_fac*self._qz)**2))/(self._qz*area)
return tau
def get_atom_phase_matrix(self, distance):
r"""get_atom_phase_matrix
Calculates the phase matrix from dynamical x-ray theory:
.. math::
L = \begin{bmatrix}
\exp(i \phi) & 0 \\
0 & \exp(-i \phi)
\end{bmatrix}
Args:
distance (float): distance between atomic planes
Returns:
L (ndarray[complex]): phase matrix
"""
phi = self.get_atom_phase_factor(distance)
L = np.zeros([np.shape(self._qz)[0], np.shape(self._qz)[1], 2, 2], dtype=np.cfloat)
L[:, :, 0, 0] = np.exp(1j*phi)
L[:, :, 1, 1] = np.exp(-1j*phi)
return L
def get_atom_phase_factor(self, distance):
r"""get_atom_phase_factor
Calculates the phase factor :math:`\phi` for a distance :math:`d`
from dynamical x-ray theory:
.. math:: \phi = \frac{d \ q_z}{2}
Args:
distance (float): distance between atomic planes
Returns:
phi (float): phase factor
"""
phi = distance * self._qz/2
return phi
@staticmethod
def calc_reflectivity_from_matrix(M):
r"""calc_reflectivity_from_matrix
Calculates the reflectivity from an :math:`2\times2` matrix of
transmission and reflectivity factors:
.. math:: R = \left|M(0,1)/M(1,1)\right|^2
Args:
M (ndarray[complex]): reflection-transmission matrix
Returns:
R (ndarray[float]): reflectivity
"""
return np.abs(M[:, :, 0, 1]/M[:, :, 1, 1])**2
class XrayDynMag(Xray):
r"""XrayDynMag
Dynamical magnetic X-ray scattering simulations.
Adapted from Elzo et.al. [10]_ and initially realized in `Project Dyna
<http://dyna.neel.cnrs.fr>`_.
Original copyright notice:
*Copyright Institut Neel, CNRS, Grenoble, France*
**Project Collaborators:**
- <NAME>, <EMAIL>
- <NAME> (PhD, 2009-2012)
- <NAME> Sextants beamline, Synchrotron Soleil,
<EMAIL>
- <NAME> (PhD, 2010-2013) now at `LCPMR CNRS, Paris
<https://lcpmr.cnrs.fr/content/emmanuelle-jal>`_
- <NAME>, <EMAIL>
- <NAME> - Padraic Shaffer’s group - Berkeley Nat. Lab.
**Questions to:**
- <NAME>, <EMAIL>
Args:
S (Structure): sample to do simulations with.
force_recalc (boolean): force recalculation of results.
Keyword Args:
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
Attributes:
S (Structure): sample structure to calculate simulations on.
force_recalc (boolean): force recalculation of results.
save_data (boolean): true to save simulation results.
cache_dir (str): path to cached data.
disp_messages (boolean): true to display messages from within the
simulations.
progress_bar (boolean): enable tqdm progress bar.
energy (ndarray[float]): photon energies :math:`E` of scattering light
wl (ndarray[float]): wavelengths :math:`\lambda` of scattering light
k (ndarray[float]): wavenumber :math:`k` of scattering light
theta (ndarray[float]): incidence angles :math:`\theta` of scattering
light
qz (ndarray[float]): scattering vector :math:`q_z` of scattering light
polarizations (dict): polarization states and according names.
pol_in_state (int): incoming polarization state as defined in
polarizations dict.
pol_out_state (int): outgoing polarization state as defined in
polarizations dict.
pol_in (float): incoming polarization factor (can be a complex ndarray).
pol_out (float): outgoing polarization factor (can be a complex ndarray).
last_atom_ref_trans_matrices (list): remember last result of
atom ref_trans_matrices to speed up calculation.
References:
.. [10] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Ramos, <NAME>, <NAME> & <NAME>, *X-ray
resonant magnetic reflectivity of stratified magnetic structures:
Eigenwave formalism and application to a W/Fe/W trilayer*,
`<NAME>. Mater. 324, 105 (2012).
<http://www.doi.org/10.1016/j.jmmm.2011.07.019>`_
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
self.last_atom_ref_trans_matrices = {'atom_ids': [],
'hashes': [],
'A': [],
'A_phi': [],
'P': [],
'P_phi': [],
'A_inv': [],
'A_inv_phi': [],
'k_z': []}
def __str__(self):
"""String representation of this class"""
class_str = 'Dynamical Magnetic X-Ray Diffraction simulation properties:\n\n'
class_str += super().__str__()
return class_str
def get_hash(self, **kwargs):
"""get_hash
Calculates an unique hash given by the energy :math:`E`, :math:`q_z`
range, polarization states as well as the sample structure hash for
relevant x-ray and magnetic parameters. Optionally, part of the
``strain_map`` and ``magnetization_map`` are used.
Args:
**kwargs (ndarray[float]): spatio-temporal strain and magnetization
profile.
Returns:
hash (str): unique hash.
"""
param = [self.pol_in_state, self.pol_out_state, self._qz, self._energy]
if 'strain_map' in kwargs:
strain_map = kwargs.get('strain_map')
if np.size(strain_map) > 1e6:
strain_map = strain_map.flatten()[0:1000000]
param.append(strain_map)
if 'magnetization_map' in kwargs:
magnetization_map = kwargs.get('magnetization_map')
if np.size(magnetization_map) > 1e6:
magnetization_map = magnetization_map.flatten()[0:1000000]
param.append(magnetization_map)
return self.S.get_hash(types=['xray', 'magnetic']) + '_' + make_hash_md5(param)
def set_incoming_polarization(self, pol_in_state):
"""set_incoming_polarization
Sets the incoming polarization factor for circular +, circular -, sigma,
pi, and unpolarized polarization.
Args:
pol_in_state (int): incoming polarization state id.
"""
self.pol_in_state = pol_in_state
if (self.pol_in_state == 1): # circ +
self.pol_in = np.array([- | np.sqrt(.5) | numpy.sqrt |
""" Calculating SNR of the GC candidates """
import os
import numpy as np
import astropy.units as u
import astropy.constants as const
from astropy.table import Table, vstack
import matplotlib.pyplot as plt
from spectres import spectres
from tqdm import tqdm
import context
import misc
def der_snr(flux, axis=1, full_output=False):
""" Calculates the S/N ratio of a spectra.
Translated from the IDL routine der_snr.pro """
signal = np.nanmean(flux, axis=axis)
noise = 1.482602 / | np.sqrt(6.) | numpy.sqrt |
import numpy as np
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
def anglescan(S, phi, theta, z_profile, wind=True, debug=True, trace=False):
# switched positions (Jun 2019)
# This function should be called for every station
# Original Author: <NAME>
# Theta is vertical
# phi is horizontal
""" Finds an optimal ray from the source of the Supracenter to the detector, by making a guess,
and checking for the angle of minimum error. This is repeated with better guesses by taking angles around the best angle of the previous step.
Arguments:
supra_pos: [array] 3-D local coordinates of the source of the sound. Given as: [x, y, Elevation]
detec_pos: [array] 3-D local coordinates of the detector. Given as: [x, y, Elevation]
zProfile: [array] The given atmospheric profile between the source and the detector. Given as: [Height, Temperature, Wind Speed, Wind Direction] for different values of height
wind: [int] 0 - disable winds, 1 - enable winds. Temperature is still used.
n_theta, n_phi: [int] angle grid spacing of the takeoff, azimuth angles
tol: [float] Tolerance on the error of the takeoff angle
precision: [double] minimum resolution of angles
Returns:
t_arrival: [float] Direct arrival time between source and detector
Azimuth: [float] The initial azimuthal angle for the source to the detector in degrees
Takeoff: [float] The initial takeoff angle from the source to the detector in degrees
See diagram on pg 34 of SUPRACENTER for more information
"""
# Azimuths and Wind directions are measured as angles from north, and increasing clockwise to the East
phi = (phi - 90)%360
# Flip coordinate system horizontally
phi = (360 - phi)%360
phi = np.radians(phi)
theta = np.radians(theta)
# Switch to turn off winds
if not wind:
z_profile[:, 2] = 0
z_profile[:, 1] = 330
# The number of layers in the integration region
n_layers = len(z_profile)
# Slowness, as defined in SUPRACENTER on pg 35, s = 1/c
s = 1.0/z_profile[0:n_layers, 1]
# Elevation for that layer
z = z_profile[0:n_layers, 0]
# Component of wind vector in the direction of phi and phi + pi/2 respectively
u = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi)
v = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi+np.pi/2) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi+np.pi/2)
s_val = s[n_layers-1]
# ray parameter
p = s_val*np.sin(theta)/(1 + s_val*u[n_layers - 1]*np.sin(theta))
X = 0
Y = 0
#Travel time
t_arrival = 0
if trace:
T = []
T.append([S[0], S[1], S[2], t_arrival])
# ignore negative roots
| np.seterr(divide='ignore', invalid='ignore') | numpy.seterr |
import numpy as np
import csv
from scipy.optimize import minimize
from scipy.spatial.transform import Rotation as R
ID = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
def random_unit_vector():
"""Generate a random 3D unit vector
Returns:
np.array: a random 3D unit vector
"""
z = np.random.uniform(-1, 1)
theta = np.random.uniform(0, 2*np.pi)
return(np.array([
np.sqrt(1-z**2)*np.cos(theta),
np.sqrt(1-z**2)*np.sin(theta),
z
]))
def gen_observation(p, u, a, d, epsilon=1e-6):
"""Generate an observation from a point looking at a plane.
Generates an observation (distance and observation point) for a sensor at
location p looking in the direction given by the vector u looking at the
plane defined by a[0]x + a[1]y + a[2]z + d = 0.
https://rosettacode.org/wiki/Find_the_intersection_of_a_line_with_a_plane#Python
Args:
p (3-tuple of floats): the position of the sensor (x, y, z).
u (3-tuple of floats): the orientation of the sensor (x, y, z).
Does not have to be a unit vector.
a (3-tuple of floats): the equation for the line where a[0]x + a[1]y + a[2]z + d = 0.
d (float) the c portion of the line equation.
Returns:
The distance and intersection point as a tuple, for example, with distance
5.2 and intersection point (8.1, 0.3, 4):
(5.2, (8.1, 0.3, 4)) or float('inf') if the sensor does not see the plane.
Raises:
ValueError: The line is undefined.
"""
a = np.array(a)
p = np.array(p)
u = np.array(u)
if(a[0] != 0):
plane_point = np.array([-d/a[0], 0, 0])
elif(a[1] != 0):
plane_point = np.array([0, -d/a[1], 0])
elif(a[2] != 0):
plane_point = np.array([0, 0, -d/a[2]])
else:
raise ValueError("The plane with normal a=[0,0,0] is undefined")
ndotu = a.dot(u)
if abs(ndotu) < epsilon:
return float('inf')
w = p - plane_point
si = -a.dot(w) / ndotu
Psi = w + si * u + plane_point
dist = np.linalg.norm(Psi - p)
if(np.allclose((dist * u) + p, Psi)):
return (dist, Psi)
else:
return float('inf')
def angle_between(u1, u2):
"""Get the angle between two unit vectors, in radians
Args:
u1: unit vector
u2: unit vector
Returns:
(float): angle between u1 and u2, in radians
"""
u1 = np.array(u1)
u2 = np.array(u2)
assert(
np.abs(np.linalg.norm(u1) - 1 < 0.0001)
and np.abs(np.linalg.norm(u1) - 1 < 0.0001)
)
angle = np.arccos(np.dot(u1, u2) / (np.linalg.norm(u1) * np.linalg.norm(u2)))
return angle
def generate_motions(p, u, a, d, plane_center, bbox, radius=500, n=32):
"""Generate random robot motions that point sensor at plane
Generate n motions that keep the sensor at position p and orientation u
pointing at the plane given by a[0] + a[1] + a[2] + d = 0
Args:
p: 3D position of sensor on robot segment
u: heading unit vector for sensor
a: a vector for plane in equation ax+d=0
d: d scalar for plane in equation ax+d=0
plane_center: where on the plane to center the points you're aiming for
around
bbox: bounding box for the sensor, given as a 2D array with like so:
[
[xmin, xmax],
[ymin, ymax],
[zmin, zmax]
]
radius (default 500): how far from the center intersection points on the
plane should be
n (default 32): how many motions to generate
Returns:
(n x 4 x 4 array): robot motions as transforms in homogenous coordinates
"""
p = np.array(p)
u = np.array(u)
a = np.array(a)
# generate points on plane
xs = scattered_on_plane(a, d, plane_center.reshape(3), radius, n)
# generate positions for sensor in space
ps = []
while (len(ps) < len(xs)):
pt = np.array([
np.random.uniform(*bbox[0]),
np.random.uniform(*bbox[1]),
np.random.uniform(*bbox[2])
])
# check that pt is on the same side of the plane as the center of the robot
if (np.sign(np.dot(a,pt)+d) == np.sign(np.dot(a, np.array([0, 0, 0]))+d)):
# check that pt is at least 10cm away from the plane
if (np.abs(np.dot(a,pt)+d) > 100):
ps.append(pt)
# generate unit vectors that point sensor points to plane points
us = [(p - s) / np.linalg.norm(p - s) for p, s in zip(xs, ps)]
# convert list of points and unit vectors to list of transforms
tfs = points_to_transforms([p, *ps], [u, *us])
return tfs
def scattered_on_plane(a, d, center, radius, num_points):
"""Generate points scattered on the plane given by a, d
Args:
a: a parameter for plane (3D vector)
d: d parameter for plane
center: center point from which points are scattered
radius: radius of scattered points
num_points: number of scattered points
Returns:
(num_points x 3 array): coordinates of points on plane
"""
if(np.dot(a, center)+d > 0.00001):
raise ValueError("center is not on plane given by ax+d=0")
# generate a random vector and make it orthogonal to a
# https://stackoverflow.com/questions/33658620/generating-two-orthogonal-vectors-that-are-orthogonal-to-a-particular-direction
xvec = np.random.randn(3)
xvec -= xvec.dot(a) * a / np.linalg.norm(a)**2
xvec /= np.linalg.norm(xvec)
yvec = np.cross(a, xvec)
points = []
for _ in range(num_points):
xcomp = np.random.uniform(-radius, radius)
ycomp = np.random.uniform(-radius, radius)
points.append(center + (xcomp*xvec + ycomp*yvec))
return points
def points_to_transforms(points, units):
"""Convert a set of points to a set of transforms
Arguments:
points (list of 3-tuples): point positions (first is starting pos)
units (list of 3-tuples): unit vector directions (first is starting)
Returns:
(list of 4x4 np.array): transformations leading from starting point to
each other point (first will be identity)
"""
return([get_transform(points[0], units[0], pt, u) for pt, u in zip(points[1:], units[1:])])
def to_hom(vec):
"""Takes a numpy array and adds a 1 to the end of it
"""
return np.append(vec, [1])
def from_hom(vec):
"""Takes a numpy array and removes a 1 from the end of it
"""
return vec[:-1]
def get_transform(p1, u1, p2, u2):
"""Get the transform from pos. p1, rot. u1 to pos. p2, rot. u2
Arguments:
p1 (3-tuple): x, y, z coordinates of starting position
u1 (3-tuple): x, y, z coordinates of starting unit vector orientation
p2 (3-tuple): x, y, z coordinates of final position
u2 (3-tuple): x, y, z coordinates of final unit vector orientation
Returns:
(4x4 np.array): the transform from p1, u1 to p2, u2 in homogenous coord.
https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311
"""
u1 = np.array(u1)
u2 = np.array(u2)
p1 = np.array(p1)
p2 = np.array(p2)
if(np.allclose(u1, u2)):
R = np.identity(3)
else:
v = np.cross(u1, u2)
s = np.linalg.norm(v)
if(s == 0):
if(u1[0] == u2[1] and u1[1] == u2[0]): #BUG there are other cases like this that aren't covered
R = np.array([
[0, 1, 0],
[1, 0, 1],
[0, 0, 1]
])
else:
c = np.dot(u1, u2)
vx = np.array([
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]
])
R = np.identity(3) + vx + (vx @ vx) * ((1 - c)/(s*s))
new_p = R @ p1
t = p2 - new_p
tf = np.array([
[R[0][0], R[0][1], R[0][2], t[0]],
[R[1][0], R[1][1], R[1][2], t[1]],
[R[2][0], R[2][1], R[2][2], t[2]],
[0, 0, 0, 1]
])
# tf * p1 should = p2
assert(np.allclose(tf @ np.append(p1, 1), np.append(p2, 1)))
# tf * u1 (with 0 for third coordinate - no translation) should = u2
assert(np.allclose(tf @ np.append(u1, 0), np.append(u2, 0)))
return tf
def read_data(data_path):
"""Read real-world trial data from given folder
Arguments:
data_path (string): path to folder containing measurements.csv and
transforms.csv files to be read in
Returns:
(tuple of arrays): measurements array and transforms array populated with
data from data_path/measurements.csv and data_path/transforms.csv
"""
with open(data_path + "/measurements.csv") as f:
csvfile = csv.reader(f)
measurements = []
for line in csvfile:
measurements.append(np.average([float(x) for x in line[1:]]))
measurements = np.array(measurements)
with open(data_path + "/transforms.csv") as f:
csvfile = csv.reader(f)
raw_transforms = []
for line in csvfile:
items = []
for item in line:
if(item != ' '):
items.append(float(item))
raw_transforms.append(np.reshape( | np.array(items) | numpy.array |
import sys
import numpy as np
from matplotlib import pyplot as pl
from scipy.spatial.distance import pdist, squareform
from scipy.spatial import cKDTree as kdtree
def FitPlane(pnts):
"""
Given a set of 3D points pnts.shape = (x, 3),
return the normal vector (nx, ny, nz)
"""
c = pnts.mean(axis = 0)
x = pnts - c
M = np.dot(x.T, x)
return np.linalg.svd(M)[0][:,-1]
def main(n, r = 0.1):
x = (np.random.random(n) - 0.5) * 4
y = (np.random.random(n) - 0.5) * 3
z = np.exp(-x*x-y*y)
tree = kdtree(np.transpose((x, y)))
slp = np.zeros(n)
for i in range(n):
nb = tree.query_ball_point((x[i], y[i]), r)
pts = np.transpose((x[nb], y[nb], z[nb]))
nx, ny, nz = FitPlane(pts)
slp[i] = np.sqrt(nx*nx+ny*ny) / nz
slp = np.arctan(slp) * 180 / np.pi
pl.title('Numerical')
pl.scatter(x, y, c = slp)
pl.colorbar()
pl.axes().set_aspect('equal')
pl.show()
# theoretical results
rp = | np.sqrt(x*x+y*y) | numpy.sqrt |
import bayesiancoresets as bc
import numpy as np
import warnings
warnings.filterwarnings('ignore', category=UserWarning) #tests will generate warnings (due to pathological data design for testing), just ignore them
np.seterr(all='raise')
np.set_printoptions(linewidth=500)
np.random.seed(100)
tol = 1e-9
def test_empty():
x = np.random.randn(0, 0)
fd = bc.FullDataset(x)
for m in [1, 10, 100]:
fd.run(m)
assert fd.error() < tol, "full wts failed: error not 0"
assert np.all(fd.weights() == | np.ones(x.shape[0]) | numpy.ones |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import numpy as np
from scipy import constants
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def _mass_ratio(specie):
if specie in ["ions", "i"]:
mass_ratio = 1
elif specie in ["electrons", "e"]:
mass_ratio = constants.electron_mass / constants.proton_mass
else:
raise ValueError("Invalid specie")
return mass_ratio
def _convert(vdf, mass_ratio):
if vdf.attrs["UNITS"].lower() == "s^3/cm^6":
out = vdf.data.data * 1e30 / (1e6 * 0.53707 * mass_ratio ** 2)
elif vdf.attrs["UNITS"].lower() == "s^3/m^6":
out = vdf.data.data * 1e18 / (1e6 * 0.53707 * mass_ratio ** 2)
elif vdf.attrs["UNITS"].lower() == "s^3/km^6":
out = vdf.data.data / (1e6 * 0.53707 * mass_ratio ** 2)
else:
raise ValueError("Invalid unit")
return out
def psd2def(vdf):
r"""Changes units to differential energy flux.
Parameters
----------
vdf : xarray.Dataset
Time series of the 3D velocity distribution with :
* time : Time samples.
* data : 3D velocity distribution.
* energy : Energy levels.
* phi : Azimuthal angles.
* theta : Elevation angle.
Returns
-------
out : xarray.Dataset
Time series of the 3D differential energy flux with :
* time : Time samples.
* data : 3D density energy flux.
* energy : Energy levels.
* phi : Azimuthal angles.
* theta : Elevation angle.
"""
mass_ratio = _mass_ratio(vdf.attrs["species"])
tmp_data = _convert(vdf, mass_ratio)
energy = vdf.energy.data
if tmp_data.ndim == 2:
tmp_data = tmp_data[:, :, None, None]
data_r = np.reshape(tmp_data, (tmp_data.shape[0], tmp_data.shape[1],
| np.prod(tmp_data.shape[2:]) | numpy.prod |
"""
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from multiprocessing import Pool
import numpy as np
from cvxpy import norm, Minimize, Problem, square, Variable
def solveX(data):
a = data[0:3]
u = data[3:6]
z = data[6:9]
rho = data[9]
x = Variable(3,1)
g = square(norm(x - a)) + rho/2*square(norm(x - z + u))
objective = Minimize(g)
p = Problem(objective, [])
result = p.solve()
return x.value
def main():
#Solve the following consensus problem using ADMM:
#Minimize sum(f_i(x)), where f_i(x) = square(norm(x - a_i))
#Generate a_i's
np.random.seed(0)
a = np.random.randn(3, 10)
#Initialize variables to zero
x = np.zeros((3,10))
u = np.zeros((3,10))
z = np.zeros((3,1))
rho = 5
#Run 50 steps of ADMM
iters = 0
pool = Pool(processes = 10)
while(iters < 50):
#x-update: update each x_i in parallel
temp = np.concatenate((a,u,np.tile(z, (1,10)),np.tile(rho, (10,1)).transpose()), axis=0)
xnew = pool.map(solveX, temp.transpose())
x = np.array(xnew).transpose()[0]
#z-update
znew = Variable(3,1)
h = 0
for i in range(10):
h = h + rho/2*square(norm(x[:,i] - znew + u[:,i]))
objective = Minimize(h)
p = Problem(objective, [])
result = p.solve()
z = | np.array(znew.value) | numpy.array |
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import time
from scipy import stats
from sklearn.metrics import r2_score
import math
# Force using CPU globally by hiding GPU(s)
tf.config.set_visible_devices([], 'GPU')
# import edl
import evidential_deep_learning as edl
import data_loader
import trainers
import models
from models.toy.h_params import h_params
import itertools
tf.config.threading.set_intra_op_parallelism_threads(1)
import random
data_name = 'flight_delay'
original_data_path = '../flight_delay_data/'
results_path = './Results_DER/'+data_name + '_DER_results.txt'
save_loss_history = False
save_loss_history_path = './Results_DER/loss_history/'
plot_loss_history = False
plot_loss_history_path = './Results_DER/loss_curves/'
parser = argparse.ArgumentParser()
parser.add_argument("--num-trials", default=1, type=int,
help="Number of trials to repreat training for \
statistically significant results.")
parser.add_argument("--num-epochs", default=100, type=int)
parser.add_argument('--datasets', nargs='+', default=["flight_delay"],
choices=['flight_delay'])
dataset = data_name
# learning_rate = h_params[dataset]["learning_rate"]
# batch_size = h_params[dataset]["batch_size"]
learning_rate = 1e-4
batch_size = 512
neurons = 100
### New flight delay data loader for customized train/test data same with PI3NN method
xTrain, yTrain, yTrain_scale, test_data_list = data_loader.load_flight_delays('../flight_delay_data/')
# '''choose the train/test dataset '''
x_train = xTrain
y_train = yTrain
y_scale = yTrain_scale
test_idx = 0 # [0, 1, 2, 3] for test 1,2,3,4
x_test = test_data_list[test_idx][0]
y_test = test_data_list[test_idx][1]
seed = 12345
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
args = parser.parse_args()
args.datasets[0] = data_name
training_schemes = [trainers.Evidential]
datasets = args.datasets
print('--- Printing datasets:')
print(datasets)
num_trials = args.num_trials
print('num_trials:{}'.format(num_trials))
# num_trials = 3
num_epochs = args.num_epochs
dev = "/cpu:0" # for small datasets/models cpu is faster than gpu
"""" ================================================"""
RMSE = np.zeros((len(datasets), len(training_schemes), num_trials))
NLL = np.zeros((len(datasets), len(training_schemes), num_trials))
PICP_arr = np.zeros(num_trials)
MPIW_arr = np.zeros(num_trials)
R2_arr = np.zeros(num_trials)
for di, dataset in enumerate(datasets):
# print(di)
# print(dataset)
for ti, trainer_obj in enumerate(training_schemes):
for n in range(num_trials):
print('*********************************************')
print('--- data: {}, trial: {}'.format(data_name, n+1))
print('*********************************************')
# batch_size = h_params[dataset]["batch_size"]
num_iterations = num_epochs * x_train.shape[0]//batch_size
print('num_epochs: {}, num_x_data: {}, batch_size: {}, total iters {} = {} * {} // {}'.format(num_epochs, x_train.shape[0], batch_size, num_iterations, num_epochs, x_train.shape[0], batch_size))
done = False
while not done:
with tf.device(dev):
model_generator = models.get_correct_model(dataset="toy", trainer=trainer_obj)
model, opts = model_generator.create(input_shape=x_train.shape[1:], num_neurons=neurons, tf_seed=seed)
trainer = trainer_obj(model, opts, dataset, learning_rate=learning_rate)
model, rmse, nll, loss = trainer.train(x_train, y_train, x_test, y_test, y_scale, batch_size=batch_size, iters=num_iterations,
verbose=True, data_name=data_name, rnd_seed=seed, trial_num=n,
bool_plot_loss=False, bool_save_loss=True,
save_loss_path=save_loss_history_path,
plot_loss_path=plot_loss_history_path)
''' Evaluate the PICP and MPIW for each trial '''
### taken from the 'plot_ng' function from the original evidential regression code
x_test_input_tf = tf.convert_to_tensor(x_test, tf.float32)
outputs = model(x_test_input_tf)
mu, v, alpha, beta = tf.split(outputs, 4, axis=1)
epistemic_var = np.sqrt(beta / (v * (alpha - 1)))
epistemic_var = np.minimum(epistemic_var, 1e3)
y_pred_U = mu.numpy() + epistemic_var * 1.96
y_pred_L = mu.numpy() - epistemic_var * 1.96
# print('y_pred_U: {}'.format(y_pred_U))
# print('y_pred_L: {}'.format(y_pred_L))
''' Do same thing for training data in order to do OOD analysis '''
x_train_input_tf = tf.convert_to_tensor(x_train, tf.float32)
outputs_train = model(x_train_input_tf)
mu_train, v_train, alpha_train, beta_train = tf.split(outputs_train, 4, axis=1)
epistemic_var_train = np.sqrt(beta_train / (v_train * (alpha_train - 1)))
epistemic_var_train = np.minimum(epistemic_var_train, 1e3)
y_pred_U_train = mu_train.numpy() + epistemic_var_train * 1.96
y_pred_L_train = mu_train.numpy() - epistemic_var_train * 1.96
if np.isnan(y_pred_U).any() or np.isnan(y_pred_L).any():
PICP = math.nan
MPIW = math.nan
R2 = math.nan
rmse = math.nan
nll = math.nan
print('--- the y_pred_U/L contains NaN(s) in current trial')
else:
''' Calculate the confidence scores (y-axis) range from 0-1'''
y_U_cap_train = y_pred_U_train.flatten() > y_train
y_L_cap_train = y_pred_L_train.flatten() < y_train
MPIW_array_train = y_pred_U_train.flatten() - y_pred_L_train.flatten()
MPIW_train = np.mean(MPIW_array_train)
#### for test (evaluate each y_U_cap - y_L_cap in the pre-calculated MPIW_train single value
# for the confidence score)
print(y_pred_U.shape)
print(y_pred_L.shape)
print(y_test.reshape(-1).shape)
y_pred_U = y_pred_U.reshape(-1)
y_pred_L = y_pred_L.reshape(-1)
y_U_cap = y_pred_U > y_test
y_L_cap = y_pred_L < y_test
# print('y_U_cap: {}'.format(y_U_cap))
# print('y_L_cap: {}'.format(y_L_cap))
# print('y_L_cap: {}'.format(y_L_cap))
y_all_cap = y_U_cap * y_L_cap
PICP = np.sum(y_all_cap) / y_L_cap.shape[0]
MPIW_array = y_pred_U - y_pred_L
MPIW = np.mean(MPIW_array)
confidence_arr_test = [min(MPIW_train / test_width, 1.0) for test_width in MPIW_array]
confidence_arr_train = [min(MPIW_train / train_width, 1.0) for train_width in MPIW_array_train]
print('----------- OOD analysis --- confidence scores ----------------')
print('--- Train conf_scores MEAN: {}, STD: {}'.format(np.mean(confidence_arr_train), np.std(confidence_arr_train)))
print('--- Test: {} rank: {} conf_scores MEAN: {}, STD: {}'.format(test_idx+1, test_idx+1, np.mean(confidence_arr_test), np.std(confidence_arr_test)))
''' Calculate the L2 distance to the mean of training data (x-axis), range from 0-30'''
dist_arr_train = np.sqrt( | np.sum(x_train ** 2.0, axis=1) | numpy.sum |
import cv2
import numpy as np
import imutils
from collections import defaultdict
# mouse callback function
def define_points(target_img):
corners = []
refPt = []
def draw_circle(event,x,y,flags,param):
global refPt
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(param,(x,y),5,(255,0,0),-1)
refPt = [x,y]
print(type(refPt))
corners.append(refPt)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle, target_img)
while(1):
cv2.imshow('image',target_img)
k = cv2.waitKey(20) & 0xFF
# corners.append(refPt)
if k == 27:
break
cv2.destroyAllWindows()
print (corners)
new_corners = np.array(corners)
return new_corners
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def segment_by_angle_kmeans(lines,k=2, **kwargs):
"""Groups lines based on angle with k-means.
Uses k-means on the coordinates of the angle on the unit circle
to segment `k` angles inside `lines`.
"""
# Define criteria = (type, max_iter, epsilon)
default_criteria_type = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
criteria = kwargs.get('criteria', (default_criteria_type, 10, 1.0))
flags = kwargs.get('flags', cv2.KMEANS_RANDOM_CENTERS)
attempts = kwargs.get('attempts', 10)
# returns angles in [0, pi] in radians
angles = np.array([line[0][1] for line in lines])
# multiply the angles by two and find coordinates of that angle
pts = np.array([[np.cos(2*angle), np.sin(2*angle)]
for angle in angles], dtype=np.float32)
# run kmeans on the coords
labels, centers = cv2.kmeans(pts, k, None, criteria, attempts, flags)[1:]
labels = labels.reshape(-1) # transpose to row vec
# segment lines based on their kmeans label
segmented = defaultdict(list)
for i, line in zip(range(len(lines)), lines):
segmented[labels[i]].append(line)
segmented = list(segmented.values())
return segmented
def intersection(line1, line2):
"""Finds the intersection of two lines given in Hesse normal form.
Returns closest integer pixel locations.
See https://stackoverflow.com/a/383527/5087436
"""
rho1, theta1 = line1[0]
rho2, theta2 = line2[0]
A = np.array([
[np.cos(theta1), np.sin(theta1)],
[np.cos(theta2), np.sin(theta2)]
])
b = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(A, b)
x0, y0 = int(np.round(x0)), int(np.round(y0))
return [[x0, y0]]
def segmented_intersections(lines):
"""Finds the intersections between groups of lines."""
intersections = []
for i, group in enumerate(lines[:-1]):
for next_group in lines[i+1:]:
for line1 in group:
for line2 in next_group:
intersections.append(intersection(line1, line2))
return intersections
def isEqual(l1, l2):
length1 = sqrtf((l1[2] - l1[0])*(l1[2] - l1[0]) + (l1[3] - l1[1])*(l1[3] - l1[1]))
length2 = sqrtf((l2[2] - l2[0])*(l2[2] - l2[0]) + (l2[3] - l2[1])*(l2[3] - l2[1]))
product = (l1[2] - l1[0])*(l2[2] - l2[0]) + (l1[3] - l1[1])*(l2[3] - l2[1])
if (fabs(product / (length1 * length2)) < cos(CV_PI / 30)):
return false
mx1 = (l1[0] + l1[2]) * 0.5
mx2 = (l2[0] + l2[2]) * 0.5
my1 = (l1[1] + l1[3]) * 0.5
my2 = (l2[1] + l2[3]) * 0.5
dist = sqrtf((mx1 - mx2)*(mx1 - mx2) + (my1 - my2)*(my1 - my2))
if (dist > max(length1, length2) * 0.5):
return false
return true
def birdseye_correction(img = "angled.jpg"):
img = cv2.imread(img,0)
resized = imutils.resize(img, height = 1000)
copy = resized.copy()
rect = order_points(define_points(copy))
print (rect)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0]-bl[0])**2)+((br[1]-bl[1])**2))
widthB = np.sqrt(((tr[0]-tl[0])**2)+((tr[1]-tl[1])**2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0]-br[0])**2)+((tr[1]-br[1])**2))
heightB = np.sqrt(((tl[0]-bl[0])**2)+((tl[1]-bl[1])**2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([[0, 0], \
[maxWidth - 1, 0], \
[maxWidth - 1, maxHeight - 1], \
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(resized, M, (maxWidth, maxHeight))
cv2.imshow("warped", warped)
cv2.waitKey(0)
cv2.destroyAllWindows()
# gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
blurred_img = cv2.GaussianBlur(warped,(3,3),0)
binary = cv2.adaptiveThreshold(blurred_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,31,2)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(binary,cv2.MORPH_OPEN,kernel, iterations = 2)
# Apply edge detection method on the image
edges = cv2.Canny(warped,50,150,apertureSize = 3)
#
cv2.imshow("edges", edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
# This returns an array of r and theta values
lines = cv2.HoughLines(edges,1,np.pi/180, 140)
# The below for loop runs till r and theta values
# are in the range of the 2d array
for line in lines:
for r,theta in line:
# Stores the value of cos(theta) in a
a = | np.cos(theta) | numpy.cos |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf8
import argparse
import os
import random
import shutil
import time
import warnings
#from __future__ import division
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from apex import amp
import numpy as np
import onnx
import onnxruntime as ort
from export.onnx_parser import *
from export.cp_parser import *
from onnx import numpy_helper
import urllib.request
import json
import time
# display images in notebook
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
#%matplotlib inline
#G_ONNX_FILEPATH = './densenet121.onnx'
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("[gpu id:",os.environ['KERNEL_NAME_ID'],"]",'\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def load_labels(path):
with open(path) as f:
# content = f.read()
# if content.startswith(u'\ufeff'):
# content = content.encode('utf8')[3:].decode('utf8')
data = json.load(f)
return np.asarray(data)
def preprocess(input_data,dimx=1):
# convert the input data into the float32 input
img_data = input_data.astype('float32')
#normalize
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
norm_img_data = np.zeros(img_data.shape).astype('float32')
for i in range(img_data.shape[0]):
norm_img_data[i,:,:] = (img_data[i,:,:]/255 - mean_vec[i]) / stddev_vec[i]
#add batch channel
print(norm_img_data.shape)
#norm_img_data = norm_img_data.reshape(1, 3, 224, 224).astype('float32')
norm_img_data.resize(dimx, 3, 224, 224)
norm_img_data = norm_img_data.astype('float32')
return norm_img_data
def softmax(x):
x = x.reshape(-1)
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def postprocess(result):
return softmax(np.array(result)).tolist()
test_data_dir = 'resnet50v2/test_data_set'
test_data_num = 3
import glob
import os
def load_IOPuts():
# Load inputs
inputs = []
for i in range(test_data_num):
input_file = os.path.join(test_data_dir + '_{}'.format(i), 'input_0.pb')
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
print('Loaded {} inputs successfully.'.format(test_data_num))
# Load reference outputs
ref_outputs = []
for i in range(test_data_num):
output_file = os.path.join(test_data_dir + '_{}'.format(i), 'output_0.pb')
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
print('Loaded {} reference outputs successfully.'.format(test_data_num))
return inputs,ref_outputs
def onnxrt_run_resnet50(onnx_path,inputs,ref_outputs):
sess = ort.InferenceSession(onnx_path, None)
input_name = sess.get_inputs()[0].name
print('Input Name:', input_name)
# img_height = 224
# img_width = 224
#sess.run(None, {'input_1': np.random.rand(2, 3, img_height, img_width).astype('float32')})
# rtn = sess.run(None, {'actual_input_1': np.random.rand(10, 3, img_height, img_width).astype('float32')})
# return rtn
#%%time
print(onnx_path)
print(type(inputs[0]),np.size(inputs[0]))
outputs = [sess.run([], {input_name: inputs[i]})[0] for i in range(test_data_num)]
print('Predicted {} results.'.format(len(outputs)))
# Compare the results with reference outputs up to 4 decimal places
for ref_o, o in zip(ref_outputs, outputs):
np.testing.assert_almost_equal(ref_o, o, 4)
print('ONNX Runtime outputs are similar to reference outputs!')
return outputs
def onnxrt_runeval_resnet50(cp_path,onnx_path):
# Load the ONNX model
#onnx_model = onnx.load(onnx_path)
#cp_model = cp_load(torchvision.models.resnet50(pretrained=False),cp_path)
print(onnx_path)
inputs,ref_outputs = load_IOPuts()
onnxrt_result = onnxrt_run(onnx_path,inputs,ref_outputs)
# compare ONNX Runtime and PyTorch results
#np.testing.assert_allclose(to_numpy(torch_out), onnxrt_result[0], rtol=1e-03, atol=1e-05)
#print("Exported model has been tested with ONNXRuntime, and the result looks good!")
def onnxrt_runeval_resnet50_pic(cp_path,onnx_path,dimx=1):
session = ort.InferenceSession(onnx_path, None)
labels = load_labels('resnet50v2/imagenet-simple-labels.json')
image = Image.open('resnet50v2/dog224.224.jpg')
#image = Image.open('resnet50v2/dog.jpg')
#image = Image.open('resnet50v2/dog.png')
# image = Image.open('images/plane.jpg')
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name
print("input_name: ", input_name, "output_name: ", output_name)
print("Image size: ", image.size)
plt.axis('off')
display_image = plt.imshow(image)
image_data = np.array(image).transpose(2, 0, 1)
input_data = preprocess(image_data,dimx=dimx)
start = time.time()
raw_result = session.run([output_name], {input_name: input_data})
end = time.time()
res = postprocess(raw_result)
inference_time = np.round((end - start) * 1000, 2)
idx = np.argmax(res)
print('========================================')
print('Final top prediction is: ' + labels[idx])
print('========================================')
print('========================================')
print('Inference time: ' + str(inference_time) + " ms")
print('========================================')
sort_idx = np.flip(np.squeeze(np.argsort(res)))
print('============ Top 5 labels are: ============================')
print(labels[sort_idx[:5]])
print('===========================================================')
plt.axis('off')
display_image = plt.imshow(image)
def resnet50_eval(cp_path,dimx=1):
resnet50 = torchvision.models.resnet50(pretrained=False)
model = cp_load(resnet50,cp_path)
model.eval()
image = Image.open('resnet50v2/dog224.224.jpg')
image_data = np.array(image).transpose(2, 0, 1)
input_data = preprocess(image_data, dimx=dimx)
image_tensor = torch.from_numpy(input_data)
out = model(image_tensor)
_, indices = torch.sort(out, descending=True)
return indices[0]
def onnxrt_resnet50_eval(onnx_path,dimx=1):
#print('-------------cp_path-------------',cp_path)
session = ort.InferenceSession(onnx_path, None)
image = Image.open('resnet50v2/dog224.224.jpg')
#image = Image.open('resnet50v2/dog.jpg')
#image = Image.open('resnet50v2/dog.png')
# image = Image.open('images/plane.jpg')
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name
print("input_name: ", input_name, "output_name: ", output_name)
print("Image size: ", image.size)
plt.axis('off')
image_data = | np.array(image) | numpy.array |
import numpy as np
import h5py
import json
import sys
sys.path.append("C:/Users/qq651/OneDrive/Codes/")
sys.path.append("C:/Users/qq651/OneDrive/Codes/A2project")
import illustris_python as il
import matplotlib.pyplot as plt
from plotTools.plot import *
import illustrisAPI as iapi
def logMasSun(data):
if type(data) != type(np.array(0)):
data = np.array(data)
data = np.log10(data * 10 ** 10)
data[np.isinf(data)] = 0
return data
def logmass(data):
if type(data) != type(np.array(0)):
data = np.array(data)
data = | np.log10(data) | numpy.log10 |
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
from future.utils import raise_
import unittest
import copy
import os
import numpy as num
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.geometry.polygon import is_inside_polygon
from anuga.abstract_2d_finite_volumes.util import file_function
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
from anuga.config import g
from anuga.shallow_water.boundaries import Reflective_boundary, \
Field_boundary, Transmissive_momentum_set_stage_boundary, \
Transmissive_stage_zero_momentum_boundary
from anuga.abstract_2d_finite_volumes.generic_boundary_conditions\
import Transmissive_boundary, Dirichlet_boundary, \
Time_boundary, File_boundary, AWI_boundary
from anuga.file.sww import get_mesh_and_quantities_from_file
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.abstract_2d_finite_volumes.mesh_factory \
import rectangular_cross, rectangular
from anuga.shallow_water.sww_interrogate import get_maximum_inundation_elevation, \
get_maximum_inundation_location, get_maximum_inundation_data, \
get_flow_through_cross_section, get_energy_through_cross_section
class Test_sww_Interrogate(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
for file in ['flowtest.sww', 'flowtest_uniquely.sww', 'runup_test_2.sww']:
try:
os.remove(file)
except:
pass
def test_get_maximum_inundation_de0(self):
"""Test that sww information can be converted correctly to maximum
runup elevation and location (without and with georeferencing)
This test creates a slope and a runup which is maximal (~11m) at around 10s
and levels out to the boundary condition (1m) at about 30s.
"""
import time, os
from anuga.file.netcdf import NetCDFFile
verbose = False
#Setup
#from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
# Create basic mesh (100m x 100m)
points, vertices, boundary = rectangular(20, 5, 100, 50)
# Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.set_flow_algorithm('DE0')
domain.set_low_froude(0)
domain.set_minimum_storable_height(0.01)
filename = 'runup_test_3'
domain.set_name(filename)
swwfile = domain.get_name() + '.sww'
domain.set_datadir('.')
domain.format = 'sww'
domain.smooth = True
# FIXME (Ole): Backwards compatibility
# Look at sww file and see what happens when
# domain.tight_slope_limiters = 1
domain.tight_slope_limiters = 0
domain.use_centroid_velocities = 0 # Backwards compatibility (7/5/8)
Br = Reflective_boundary(domain)
Bd = Dirichlet_boundary([1.0,0,0])
#---------- First run without geo referencing
domain.set_quantity('elevation', lambda x,y: -0.2*x + 14) # Slope
domain.set_quantity('stage', -6)
domain.set_boundary( {'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
for t in domain.evolve(yieldstep=1, finaltime = 50):
pass
# Check maximal runup
runup, location, max_time = get_maximum_inundation_data(swwfile, return_time=True)
if verbose:
print('Runup, location', runup, location, max_time)
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332, 43.333332])
assert num.allclose(max_time, 10.0)
# Check runup in restricted time interval
runup, location, max_time = get_maximum_inundation_data(swwfile, time_interval=[0,9], return_time=True)
if verbose:
print('Runup, location:',runup, location, max_time)
assert num.allclose(runup, 2.66666674614)
assert num.allclose(location, [56.666668, 16.666666])
assert num.allclose(max_time, 9.0)
# Check final runup
runup, location = get_maximum_inundation_data(swwfile, time_interval=[45,50])
if verbose:
print('Runup, location:',runup, location, max_time)
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332, 33.333332])
#assert num.allclose(max_time, 45.0)
# Check runup restricted to a polygon
p = [[50,1], [99,1], [99,40], [50,40]]
runup, location = get_maximum_inundation_data(swwfile, polygon=p)
#runup = get_maximum_inundation_elevation(swwfile, polygon=p)
#location = get_maximum_inundation_location(swwfile, polygon=p)
#print runup, location, max_time
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332, 33.333332])
#assert num.allclose(max_time, 11.0)
# Check that mimimum_storable_height works
fid = NetCDFFile(swwfile, netcdf_mode_r) # Open existing file
stage = fid.variables['stage_c'][:]
z = fid.variables['elevation_c'][:]
xmomentum = fid.variables['xmomentum_c'][:]
ymomentum = fid.variables['ymomentum_c'][:]
for i in range(stage.shape[0]):
h = stage[i]-z # depth vector at time step i
# Check every node location
for j in range(stage.shape[1]):
# Depth being either exactly zero implies
# momentum being zero.
# Or else depth must be greater than or equal to
# the minimal storable height
if h[j] == 0.0:
assert xmomentum[i,j] == 0.0
assert ymomentum[i,j] == 0.0
else:
assert h[j] >= 0.0
fid.close()
# Cleanup
os.remove(swwfile)
#------------- Now the same with georeferencing
domain.time=0.0
E = 308500
N = 6189000
#E = N = 0
domain.geo_reference = Geo_reference(56, E, N)
domain.set_quantity('elevation', lambda x,y: -0.2*x + 14) # Slope
domain.set_quantity('stage', -6)
domain.set_boundary( {'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
for t in domain.evolve(yieldstep=1, finaltime = 50):
pass
# Check maximal runup
runup, location = get_maximum_inundation_data(swwfile)
#print 'Runup, location', runup, location, max_time
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332+E, 43.333332+N])
#assert num.allclose(max_time, 10.0)
# Check runup in restricted time interval
runup, location = get_maximum_inundation_data(swwfile, time_interval=[0,9])
#print 'Runup, location:',runup, location, max_time
assert num.allclose(runup, 2.66666674614)
assert num.allclose(location, [56.666668+E, 16.666666+N])
#assert num.allclose(max_time, 9.0)
# Check final runup
runup, location = get_maximum_inundation_data(swwfile, time_interval=[45,50])
#print 'Runup, location:',runup, location, max_time
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332+E, 33.333332+N])
#assert num.allclose(max_time, 45.0)
# Check runup restricted to a polygon
p = num.array([[50,1], [99,1], [99,40], [50,40]], num.int) + num.array([E, N], num.int)
runup, location = get_maximum_inundation_data(swwfile, polygon=p)
#print runup, location, max_time
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332+E, 33.333332+N])
#assert num.allclose(max_time, 11.0)
# Cleanup
os.remove(swwfile)
def test_get_maximum_inundation_1_5(self):
"""Test that sww information can be converted correctly to maximum
runup elevation and location (without and with georeferencing)
This test creates a slope and a runup which is maximal (~11m) at around 10s
and levels out to the boundary condition (1m) at about 30s.
"""
import time, os
from anuga.file.netcdf import NetCDFFile
#Setup
#from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
# Create basic mesh (100m x 100m)
points, vertices, boundary = rectangular(20, 5, 100, 50)
# Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.set_flow_algorithm('1_5')
domain.default_order = 2
domain.set_minimum_storable_height(0.01)
filename = 'runup_test_3'
domain.set_name(filename)
swwfile = domain.get_name() + '.sww'
domain.set_datadir('.')
domain.format = 'sww'
domain.smooth = True
# FIXME (Ole): Backwards compatibility
# Look at sww file and see what happens when
# domain.tight_slope_limiters = 1
domain.tight_slope_limiters = 0
domain.use_centroid_velocities = 0 # Backwards compatibility (7/5/8)
Br = Reflective_boundary(domain)
Bd = Dirichlet_boundary([1.0,0,0])
#---------- First run without geo referencing
domain.set_quantity('elevation', lambda x,y: -0.2*x + 14) # Slope
domain.set_quantity('stage', -6)
domain.set_boundary( {'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
for t in domain.evolve(yieldstep=1, finaltime = 50):
pass
# Check maximal runup
runup = get_maximum_inundation_elevation(swwfile)
location = get_maximum_inundation_location(swwfile)
#print 'Runup, location', runup, location
assert num.allclose(runup, 6.33333333) or \
num.allclose(runup, 6) or \
num.allclose(runup, 12) # old limiters
assert num.allclose(location[0], 38.33333333) or \
num.allclose(location[0], 40.0) or \
num.allclose(location[0], 10)
# Check final runup
runup = get_maximum_inundation_elevation(swwfile, time_interval=[45,50])
location = get_maximum_inundation_location(swwfile, time_interval=[45,50])
#print 'Runup, location:',runup, location
assert num.allclose(runup, 1.666666666)
assert num.allclose(location[0], 61.666666)
# Check runup restricted to a polygon
p = [[50,1], [99,1], [99,49], [50,49]]
runup = get_maximum_inundation_elevation(swwfile, polygon=p)
location = get_maximum_inundation_location(swwfile, polygon=p)
#print runup, location
assert num.allclose(runup, 3.6666666)
assert num.allclose(location[0], 51.6666666)
# Check that mimimum_storable_height works
fid = NetCDFFile(swwfile, netcdf_mode_r) # Open existing file
stage = fid.variables['stage'][:]
z = fid.variables['elevation'][:]
xmomentum = fid.variables['xmomentum'][:]
ymomentum = fid.variables['ymomentum'][:]
for i in range(stage.shape[0]):
h = stage[i]-z # depth vector at time step i
# Check every node location
for j in range(stage.shape[1]):
# Depth being either exactly zero implies
# momentum being zero.
# Or else depth must be greater than or equal to
# the minimal storable height
if h[j] == 0.0:
assert xmomentum[i,j] == 0.0
assert ymomentum[i,j] == 0.0
else:
assert h[j] >= domain.minimum_storable_height
fid.close()
# Cleanup
os.remove(swwfile)
#------------- Now the same with georeferencing
domain.time=0.0
E = 308500
N = 6189000
#E = N = 0
domain.geo_reference = Geo_reference(56, E, N)
domain.set_quantity('elevation', lambda x,y: -0.2*x + 14) # Slope
domain.set_quantity('stage', -6)
domain.set_boundary( {'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
for t in domain.evolve(yieldstep=1, finaltime = 50):
pass
# Check maximal runup
runup = get_maximum_inundation_elevation(swwfile)
location = get_maximum_inundation_location(swwfile)
#print runup, location
assert num.allclose(runup,6.33333333) or \
num.allclose(runup, 6) or \
num.allclose(runup, 12) # old limiters
assert num.allclose(location[0], 38.34+E) or \
num.allclose(location[0], 40+E) or \
num.allclose(location[0], 10+E)
# Check final runup
runup = get_maximum_inundation_elevation(swwfile, time_interval=[45,50])
location = get_maximum_inundation_location(swwfile, time_interval=[45,50])
#print runup, location
#1.66666666667 [308561.66, 6189006.5]
assert num.allclose(runup, 1.666666666)
assert num.allclose(location[0], 61.66+E)
# Check runup restricted to a polygon
p = num.array([[50,1], [99,1], [99,49], [50,49]], num.int) + num.array([E, N], num.int) #array default#
runup = get_maximum_inundation_elevation(swwfile, polygon=p)
location = get_maximum_inundation_location(swwfile, polygon=p)
#print runup, location
assert num.allclose(runup, 3.66666666)
assert | num.allclose(location[0], 51.66+E) | numpy.allclose |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-07-19 17:45:06
# @Author : helingjie-maskmind (<EMAIL>)
# @Link : ${link}
# @Version : $Id$
import os
import numpy as np
import sys
from matplotlib.pyplot import plot
from matplotlib.pyplot import show
N = int(input("please input N"))
weights = np.ones(N) / N
print("Weights", weights)
c = np.loadtxt('data.csv', delimiter=',', usecols=(6,), unpack=True)
sma = np.convolve(weights, c)[N-1:-N+1]
deviation=[]
c_length=len(c)
for i in range(N-1,c_length):
if i+N<c_length:
dev=c[i:i+N]
else:
dev=c[-N:]
averages=np.zeros(N)
# fill 构造元素值相同的数组
averages.fill(sma[i-N-1])
dev = dev - averages
dev = dev ** 2
dev = np.sqrt(np.mean(dev))
deviation.append(dev)
deviation = 2 * | np.array(deviation) | numpy.array |
import os
import yaml
import random
import argparse
import numpy as np
import torch
import tqdm
from os import path as osp
from dataloader.video_loader import DecordVideoReader
from utils.data_processing import *
def evaluate_video(models, mids, dataloader):
for model in models:
model.eval()
mdn_loss_list = [0] * len(models)
num_loss_list = [0] * len(models)
mean_dict_list = [{} for m in models]
var_dict_list = [{} for m in models]
for imgs, scores in tqdm.tqdm(dataloader, desc="Detecting objects"):
scores_cpu = scores.cpu()
with torch.no_grad():
for i, (mean_dict, var_dict, model) in enumerate(zip(mean_dict_list, var_dict_list, models)):
_, mdn_output = model(imgs, scores=scores)
pi, sigma, mu = mdn_output[2], mdn_output[3], mdn_output[4]
mdn_loss_list[i] += mdn_output[1].item()
num_loss_list[i] += 1
mean = (mu * pi).sum(-1)
var = ((sigma**2 + mu**2 - mean.unsqueeze(-1)**2) * pi).sum(-1)
for i in range(len(mean)):
lab = scores_cpu[i].item()
if lab not in mean_dict:
mean_dict[lab] = [mean[i].item()]
var_dict[lab] = [var[i].item()]
else:
mean_dict[lab] += [mean[i].item()]
var_dict[lab] += [var[i].item()]
for i, (mean_dict, var_dict) in enumerate(zip(mean_dict_list, var_dict_list)):
print("profile of Model %d" % mids[i])
print('K N Mean Var MSE')
se_dict = dict()
count_dict = dict()
keys = [int(k) for k in mean_dict.keys()]
keys.sort()
for k in keys:
samples = len(mean_dict[k])
se = ((np.array(mean_dict[k]) - k) ** 2).sum()
mean = np.mean(mean_dict[k])
var = np.mean(var_dict[k])
se_dict[k] = se
count_dict[k] = samples
print('{}: {} {:.2f} {:.2F} {:.2f}'.format(k, samples, mean, var, se / samples))
se = np.array(list(se_dict.values()))
count = np.array(list(count_dict.values()))
print('MSE: {:.2f}'.format(se.sum() / count.sum()))
print('NLL: {:.2f}'.format(mdn_loss_list[i] / num_loss_list[i]))
mdn_loss_list[i] /= num_loss_list[i]
return mdn_loss_list
def train_models(epochs, model_configs, mids, train_dataloader, valid_dataloader, weight, checkpoint_dir):
for i, model_config in enumerate(model_configs):
for module_def in model_config:
if module_def["type"] == "hmdn":
print("Model_%d: M: %s, H: %s, eps: %s" % (mids[i], module_def["M"], module_def["num_h"], module_def["eps"]))
break
models = [Darknet(model_config, weight).to(config.device) for model_config in model_configs]
parameters = []
for model in models:
model.apply(weights_init_normal)
model.load_darknet_weights("weights/yolov3-tiny.weights")
model.train()
parameters += model.parameters()
optimizer = torch.optim.Adam(parameters, lr=0.001)
for epoch in range(epochs):
total_loss = 0
num_batchs = 0
for imgs, scores in tqdm.tqdm(train_dataloader, desc="epoch %d/%d" % (epoch+1, epochs)):
loss = 0.0
for model in models:
_, mdn_output = model(imgs, scores=scores)
wta_loss = mdn_output[0]
mdn_loss = mdn_output[1]
if epoch < 5:
loss += wta_loss + 0.0 * mdn_loss
else:
loss += 0.5 * wta_loss + mdn_loss
# mdn metric
total_loss += mdn_loss.item()
num_batchs += 1
model.seen += imgs.size(0)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print("mdn_loss: %.3f" % (total_loss / num_batchs))
nlls = evaluate_video(models, mids, valid_dataloader)
for i, model in enumerate(models):
torch.save(model.state_dict(), os.path.join(checkpoint_dir, f"cmdn_{mids[i]}.pth"))
return np.array(nlls)
def train_cmdn(opt, vr, lr, train_idxs, valid_idxs, score_weight):
torch.manual_seed(opt.random_seed)
np.random.seed(opt.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(opt.random_seed)
# Get data configuration
model_group_config = parse_model_config(config.cmdn_config)
model_configs = parse_model_group(model_group_config)
checkpoint_dir = get_checkpoint_dir(opt)
os.makedirs(checkpoint_dir, exist_ok=True)
train_video_loader = VideoLoader(vr, train_idxs, lr, batch_size=opt.cmdn_train_batch)
valid_video_loader = VideoLoader(vr, valid_idxs, lr, batch_size=opt.cmdn_train_batch)
nlls = np.zeros([len(model_configs)])
model_batch = len(model_configs)
for i in range(int(math.ceil(len(model_configs) / model_batch))):
model_config_batch = model_configs[i*model_batch: (i+1) * model_batch]
nlls[i*model_batch: (i+1)*model_batch] = train_models(opt.cmdn_train_epochs, model_config_batch, range(i*model_batch, (i+1)*model_batch), train_video_loader, valid_video_loader, score_weight, checkpoint_dir)
best_model = | np.argmin(nlls) | numpy.argmin |
from __future__ import print_function
import inspect
from math import pi, sqrt, factorial
import qutip
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches as mpatches
from matplotlib.gridspec import GridSpec
from os import path
from scipy.special import genlaguerre
from scipy.linalg import svd, expm
from scipy.optimize import brute
from scipy import signal
from mpl_toolkits.axes_grid1 import ImageGrid
import shutil
__all__ = [
'Reporter', 'print_costs', 'print_grads', 'save_waves', 'plot_waves',
'save_script', 'liveplot_waves', 'liveplot_prop', 'plot_fidelity', 'plot_unitary', 'plot_cwigs',
'verify_from_setup', 'verify_master_equation', 'plot_matrix', 'plot_states',
'verify_sensitivity', 'verify_dispersion_sensitivity', 'verify_with_response',
'set_output_fmt', 'plot_penalties', 'plot_trajectories', 'cutoff'
]
def run_reporter(fn, data):
args = [data[k] for k in inspect.getargspec(fn).args if k != 'self']
fn(*args)
OUTPUT_FMT = 'pdf'
def set_output_fmt(fmt):
"""
Set the file suffix used for matplotlib.savefig. By default this is pdf
"""
global OUTPUT_FMT
OUTPUT_FMT = fmt
class Reporter(object):
"""
Base reporter class. Subclass and implement run method to use
Parameters
----------
spacing : int
Number of iterations to perform between evaluations of this reporter
"""
def __init__(self, spacing=1):
self.spacing = spacing
self.n_call = 0
def __call__(self, force=False, **kwargs):
if force or self.n_call % self.spacing == 0:
args = [kwargs[k] for k in inspect.getargspec(self.run).args[1:]]
self.run(*args)
self.n_call += 1
def run(self, *args):
raise NotImplementedError
class print_costs(Reporter):
"""
Prints the current fidelity from each setup, and the cost from each penalty
"""
# TODO: Replace this with a Logging solution for better support of
# multiprocessing in Spyder (which dosn't let children print
# to STDOUT)
def run(self, fids, pen_costs, n_iter):
print(n_iter, '- Fids:', end=' ')
print(' '.join(['%.7g' % c for c in fids]), end=' ')
if len(pen_costs):
print('Penalties:', end=' ')
print(' '.join(['%.7g' % c for c in pen_costs]))
class cutoff(Reporter):
"""
Raise exception is we go too many rounds without going over a threshold
"""
def __init__(self, cut_rounds=10, cut_fid=.715):
super(cutoff, self).__init__()
self.cut_rounds = cut_rounds
self.cut_fid = cut_fid
def run(self, fids, pen_costs, n_iter):
if np.mean(fids) < self.cut_fid and n_iter>self.cut_rounds:
txt = 'Failed to get fid > %.3f in %d rounds' % (self.cut_fid, self.cut_rounds)
raise Exception(txt)
class print_grads(Reporter):
"""
Prints the maximum gradient value for both the control and auxiliary parameters
"""
def run(self, fid_grads, aux_fid_grads):
print('Max Fid Grad:', abs(fid_grads).max(), end=' ')
if aux_fid_grads.size:
print('Max Aux Grad:', abs(aux_fid_grads).max())
else:
print('')
class save_waves(Reporter):
"""
Saves the controls in a .npz file. To retrieve the data, use
``np.load('waves.npz')``, which returns a dictionary-like object.
Parameters
----------
wave_names : List of str
Names of the controls when saved in dictionary. There should be
N_CTRLS entries in this list.
"""
def __init__(self, wave_names, spacing):
super(save_waves, self).__init__(spacing)
self.wave_names = wave_names
def run(self, outdir, sim_controls, dt, n_ss, raw_controls, shape_func, response, tot_cost):
print('saving...')
wave_dict = {'sim_'+k:w for k, w in zip(self.wave_names, sim_controls)}
wave_dict.update({'raw_'+k:w for k, w in zip(self.wave_names, raw_controls)})
if response is not None:
pad = np.zeros((len(raw_controls), len(response)))
awg_controls = np.hstack([raw_controls * shape_func, pad])
else:
awg_controls = raw_controls * shape_func
wave_dict.update({k:w for k, w in zip(self.wave_names, awg_controls)})
wave_dict['sim_dt'] = dt / float(n_ss)
wave_dict['dt'] = dt
wave_dict['n_ss'] = n_ss
wave_dict['response'] = response
np.savez(path.join(outdir, 'waves.npz'), **wave_dict)
class plot_waves(Reporter):
"""
Uses matplotlib to plot the current waves, and saves them under
waves.pdf in the output directory. Since plotting with matplotlib
can be slow, make sure the spacing is set reasonably so plotting
does not dominate the execution time.
"""
def __init__(self, wave_names, spacing=5, iq_pairs=False, last_only=False):
super(plot_waves, self).__init__(spacing)
self.wave_names = wave_names
self.iq_pairs = iq_pairs
self.last_only = last_only
n_ax = len(wave_names)
if iq_pairs:
n_ax //= 2
self.fft_fig, self.fft_axes = plt.subplots(n_ax, 1)
else:
self.fig = plt.figure()
gs1 = GridSpec(n_ax, 2)
for i in range(n_ax/2):
self.fig.add_subplot(gs1[i*2, 0])
self.fig.add_subplot(gs1[i*2+1, 0])
self.fig.add_subplot(gs1[i*2:i*2+2, 1])
self.axes = self.fig.axes
def run(self, outdir, full_controls, dt, n_ss):
print('Plotting...')
sim_dt = dt / n_ss
wave_axes = [ax for idx,ax in enumerate(self.axes) if idx%3 in [0,1]]
fft_axes = [ax for idx,ax in enumerate(self.axes) if idx%3 in [2,]]
if 1:
#for ax_row in self.axes:
# for ax in ax_row:
for ax in self.axes:
lines = ax.get_lines()
ax.clear()
nlines = len(lines)
for idx, line in enumerate(lines):
xs = line.get_xdata()
ys = line.get_ydata()
alpha = (0.5*idx)/nlines + 0.2
ax.plot(xs, ys, 'k-', alpha=alpha)
# if self.last_only:
# for ax in self.axes:
# ax.clear()
if self.iq_pairs:
for ax, wave in zip(self.axes, full_controls[::2]):
ax.clear()
ax.plot(wave, label='I')
for ax, wave, name in zip(self.axes, full_controls[1::2], self.wave_names):
ax.plot(wave, label='Q')
ax.set_ylabel(name)
c_waves = full_controls[::2] + 1j*full_controls[1::2]
fft_waves = np.fft.fftshift(abs(np.fft.fft(c_waves, axis=1))**2)
fft_freqs = 1e3 * np.fft.fftshift(np.fft.fftfreq(c_waves.shape[1], sim_dt))
for ax, fft in zip(self.fft_axes, fft_waves):
ax.clear()
ax.plot(fft_freqs, fft)
ax.set_xlim(-80, 80)
self.fft_fig.savefig(path.join(outdir, 'waves_fft.%s' % OUTPUT_FMT))
else:
for idx, (ax, wave) in enumerate(zip(wave_axes, full_controls)):
ax.set_yticks(np.linspace(min(int(np.floor(min(wave))), -1),
max(int( np.ceil(max(wave))), 1),
5))
if idx != len(self.axes)-1:
ax.set_xticks([])
else:
ax.set_xticks(range(0, len(wave)+1, 100))
ax.plot([0, len(wave)], [0,0], 'k--', lw=0.5)
ax.set_xlim(0, len(wave))
ax.plot(wave, 'r-')
for idx, ax in enumerate(fft_axes):
c_waves = full_controls[2*idx] + 1j*full_controls[2*idx+1]
fft_wave = np.fft.fftshift(abs(np.fft.fft(c_waves))**2)
fft_freqs = 1e3 * np.fft.fftshift(np.fft.fftfreq(len(c_waves), sim_dt))
start = len(fft_wave) * (0.5 - .1) #p/m 50 MHz
stop = len(fft_wave) * (0.5 + .1)
ax.plot(fft_freqs[start:stop], fft_wave[start:stop], 'r-')
ax.set_yticklabels([])
if idx == 0:
ax.set_xticklabels([])
for ax, wave_name in zip(wave_axes, self.wave_names):
ax.set_title(wave_name, x=-0.075, y=0.25)
try:
self.fig.savefig(path.join(outdir, 'waves.%s' % OUTPUT_FMT))
except IOError:
print('*** Unable to save waves fig. Is it open?')
class save_script(Reporter):
"""
Saves the script calling this function in the output
directory. Is only ever evaluated once
"""
def __init__(self, script_name):
super(save_script, self).__init__()
self.script_name = script_name
self.copied = False
def run(self, outdir):
if not self.copied:
shutil.copy(self.script_name, outdir + '/script.py')
self.copied = True
class liveplot_waves(Reporter):
"""
Use the liveplot module to plot waves. Requires liveplot to be
installed and active::
pip install liveplot
python -m liveplot
"""
def __init__(self, wave_names, spacing=1):
super(liveplot_waves, self).__init__(spacing)
from liveplot import LivePlotClient
self.client = LivePlotClient()
self.client.clear()
self.wave_names = wave_names
def run(self, sim_controls, fids):
for wave, name in zip(sim_controls, self.wave_names):
self.client.plot_y(name, wave)
for i, fid in enumerate(fids):
self.client.append_y('fid%d' % i, fid)
self.client.append_y('log_infid%d' % i, np.log(1 - fid))
class liveplot_prop(Reporter):
"""
Use the liveplot module to plot waves. Requires liveplot to be
installed and active::
pip install liveplot
python -m liveplot
"""
def __init__(self, spacing=1):
super(liveplot_prop, self).__init__(spacing)
from liveplot import LivePlotClient
self.client = LivePlotClient()
self.client.clear()
def run(self, props):
for i, prop in enumerate(props):
self.client.plot_z('prop%d' % i, abs(prop))
class plot_fidelity(Reporter):
"""
Plots the progress of the fidelity as a function of iteration
"""
def __init__(self, spacing=1):
super(plot_fidelity, self).__init__(spacing)
self.all_fids = None
def run(self, outdir, fids):
n_fids = len(fids)
if self.all_fids is None:
self.all_fids = [[] for _ in range(n_fids)]
f1, ax1 = plt.subplots(1, 1)
f2, ax2 = plt.subplots(1, 1)
for fid_list, fid in zip(self.all_fids, fids):
fid_list.append(fid)
ax1.plot(range(len(fid_list)), fid_list, 's-')
ax2.plot(range(len(fid_list)),1 - np.array(fid_list), 's-')
ax2.set_yscale('log')
try:
f1.savefig(path.join(outdir, 'fidelity.%s' % OUTPUT_FMT))
f2.savefig(path.join(outdir, 'infidelity.%s' % OUTPUT_FMT))
except IOError:
print('*** Figure saving failed, is the pdf open elsewhere?')
plt.close(f1)
plt.close(f2)
class plot_penalties(Reporter):
"""
Plots the progress of the fidelity as a function of iteration
"""
def __init__(self, spacing=1):
super(plot_penalties, self).__init__(spacing)
def run(self, outdir, pen_hist):
if len(pen_hist) == 0:
return
pen_hist = np.array(pen_hist)
f, axes = plt.subplots(pen_hist.shape[1], 1)
for ax, pens in zip(axes, pen_hist.T):
ax.plot(pens)
f.savefig(path.join(outdir, 'penalties.%s' % OUTPUT_FMT))
plt.close(f)
class plot_unitary(Reporter):
def run(self, outdir, setups, props, fids, **kwargs):
U_target = setups[0].U_target
U_total = props[0]
fid = fids[0]
if U_target.shape[0] != U_target.shape[1]:
U_target = U_target.T
f, (ax1, ax2) = plt.subplots(1, 2)
plot_matrix(U_target, ax=ax1)
ax1.set_title('Target')
plot_matrix(U_total, ax=ax2)
ax2.set_title('Actual (fid = %.04f)' % fids[0])
f.savefig(path.join(outdir, 'unitary.%s' % OUTPUT_FMT))
plt.close(f)
class plot_states(Reporter):
def run(self, outdir, setups, props, fids, **kwargs):
f, (ax1, ax2, ax3) = plt.subplots(1, 3)
plot_matrix(setups[0].inits.T, ax=ax1)
ax1.set_title('Initial')
plot_matrix(setups[0].finals.T, ax=ax2)
ax2.set_title('Final')
plot_matrix(props[0], ax=ax3)
ax3.set_title('Actual (fid = %.04f)' % fids[0])
f.savefig(path.join(outdir, 'states.%s' % OUTPUT_FMT))
plt.close(f)
class plot_trajectories(Reporter):
"""
Plot probability trajectories for a given setup.
"""
def __init__(self, setup, spacing, taylor_order=20):
super(plot_trajectories, self).__init__(spacing)
self.setup = setup
self.taylor_order = taylor_order
def run(self, outdir, sim_controls, aux_params, dt, n_ss):
print('Plotting trajectories...')
dt = dt / float(n_ss)
setup = self.setup
t_order = self.taylor_order
f, axes = plt.subplots(len(self.setup.inits), 1)
for i_state, (init, final, ax) in enumerate(zip(self.setup.inits, self.setup.finals, axes)):
probs = []
psi = init.copy()
for i, time_slice in enumerate(sim_controls.T):
L = -1j * dt * (setup.H0 + sum(c*Hc for c,Hc in zip(time_slice, setup.Hcs)))
psi_k = psi
for k in range(1, t_order+1):
psi_k = L.dot(psi_k) / k
psi += psi_k
probs.append(np.abs(psi)**2)
ovlp = np.abs(np.sum(final.conj() * psi))**2
ax.imshow(np.array(probs).T, interpolation='nearest', aspect='auto', origin='lower')
ax.set_xlim(-0.5, len(probs))
ax.set_ylim(-0.5, len(psi))
ax.set_title('State %d, ovlp: %.04f' % (i_state, ovlp))
f.tight_layout()
f.savefig(path.join(outdir, 'trajectories.%s' % OUTPUT_FMT))
plt.close(f)
class plot_cwigs(Reporter):
def __init__(self, dim, spacing=5, indices=None, max_alpha=3.5, n_pts=100):
super(plot_cwigs, self).__init__(spacing)
xs = np.linspace(-3.5, 3.5, 100)
X, Y = np.meshgrid(xs, xs)
disps = (X + 1j*Y).flatten()
self.dim = dim
self.n_pts = n_pts
self.M = wigner_mat(disps, dim)
self.paulis = [
qutip.qeye(2), qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()
]
self.paulis = [qutip.tensor(p, qutip.qeye(dim)).full() for p in self.paulis]
self.indices = indices
if indices is None:
self.indices = slice(None, None)
self.fig = None
self.grid = None
def run(self, setups, props, aux_params, outdir):
print('plotting wigners...')
finals = setups[0].finals[self.indices]
prop_inits = props[0].T[self.indices]
if setups[0].gauge_ops is not None:
gauge_prop = expm(-1j*sum(gv*gop for gv, gop in zip(aux_params, setups[0].gauge_ops)))
finals = finals.dot(gauge_prop.conj())
# finals = gauge_prop.dot(finals.T).T
prop_inits = gauge_prop.conj().T.dot(prop_inits.T).T
if self.fig is None:
self.fig = plt.figure()
self.grid = ImageGrid(self.fig, 111, nrows_ncols=(2*len(finals), 4), axes_pad=0)
for ax in self.grid:
ax.set_xticks([])
ax.set_yticks([])
for ax, name in zip(self.grid, 'I,X,Y,Z'.split(',')):
ax.set_title(name)
fig, grid = self.fig, self.grid
i = 0
for k, (prop_init, final) in enumerate(zip(prop_inits, finals)):
grid[i].set_ylabel('Prop*Init[%d]' % k, rotation='horizontal', ha='right')
for wig in self.cond_wigs(prop_init):
grid[i].imshow(wig, vmin=-1, vmax=1)
i += 1
grid[i].set_ylabel('Final[%d]' % k, rotation='horizontal', ha='right')
for wig in self.cond_wigs(final):
grid[i].imshow(wig, vmin=-1, vmax=1)
i += 1
fig.savefig(path.join(outdir, 'cwigs.%s' % OUTPUT_FMT))
def cond_wigs(self, psi):
d = psi.shape[0] / 2
rho = np.outer(psi, psi.conj())
for op in self.paulis:
op_rho = op.dot(rho)
ptrace_op_rho = op_rho[:d,:d] + op_rho[d:,d:]
yield self.M.dot(vectorize(ptrace_op_rho)).reshape((self.n_pts, self.n_pts))
class verify_from_setup(Reporter):
"""
Evaluate the fidelity from the given setup. This can serve as a consistency
check, for instance to ensure the fidelity is unchanged by introducing the
F state
"""
def __init__(self, setup, spacing):
super(verify_from_setup, self).__init__(spacing)
self.setup = setup
def run(self, sim_controls, aux_params, dt, n_ss):
dt = dt / float(n_ss)
print('*' * 80)
print('verifying...', end=' ')
_, fid, _, _ = self.setup.get_fids(sim_controls, aux_params, dt)
print('fid = %.7g' % fid)
print('*' * 80)
class verify_with_response(Reporter):
def __init__(self, setup, spacing, response):
super(verify_with_response, self).__init__(spacing)
self.setup = setup
self.response = response
def run(self, awg_controls, aux_params, dt, n_ss):
dt = dt / float(n_ss)
f = plt.figure()
controls = np.kron(awg_controls, np.identity(n_ss)[:,0])
plt.plot(controls[0,:], 'ks')
controls = np.array([
np.convolve(controls[i,:], self.response, mode='full')
for i in range(controls.shape[0])])
plt.plot(controls[0,:])
plt.plot(controls[1,:])
plt.savefig('new_waves.%s' % OUTPUT_FMT)
print('Verifying with alternate response function...', end=' ')
_, fid, _, _ = self.setup.get_fids(controls, aux_params, dt)
print('fid = %.7g' % fid)
plt.close(f)
class verify_master_equation(Reporter):
def __init__(self, setup, c_ops, spacing):
super(verify_master_equation, self).__init__(spacing)
self.setup = setup
self.c_ops = c_ops
def run(self, sim_controls, dt, n_ss):
print('='*80)
print('verifying with qutip...')
dt = dt / float(n_ss)
H0_arr, Hcs_arr = self.setup.H0, self.setup.Hcs
inits, finals = self.setup.inits, self.setup.finals
dims = [H0_arr.shape[0]]
H0 = qutip.Qobj(H0_arr, dims=[dims, dims])
Hcs = [qutip.Qobj(Hc, dims=[dims, dims]) for Hc in Hcs_arr]
c_ops = [qutip.Qobj(op, dims=[dims, dims]) for op in self.c_ops]
inits = [qutip.Qobj(s, dims=[dims, [1]]) for s in inits]
finals = [qutip.Qobj(s, dims=[dims, [1]]) for s in finals]
n_states = len(inits)
H = [H0] + [[Hc, w] for Hc, w in zip(Hcs, sim_controls)]
tlist = dt * np.arange(sim_controls.shape[1])
fid = 0
for i in range(n_states):
for j in range(i, n_states):
init = inits[i] * inits[j].dag()
final = finals[i] * finals[j].dag()
prop_init = qutip.mesolve(H, init, tlist, c_ops, {}).states[-1]
sub_fid = (final * prop_init.dag()).tr()
print('sub_fid', i, j, sub_fid)
if i != j:
sub_fid *= 2
fid += sub_fid.real
fid = np.sqrt(fid) / n_states
print('tot fid', fid)
print('='*80)
class verify_sensitivity(Reporter):
"""
Evaluate the fidelity from the given setup varying some parameters.
delta_list is a tuple/list containing a tuple of (name, H, amps), e.g.
[('sz', Hsigmaz, np.linspace(-1e-4, 1e-4, 5))]
"""
def __init__(self, setup, spacing, delta_list):
super(verify_sensitivity, self).__init__(spacing)
self.setup = setup
self.delta_list = delta_list
def run(self, sim_controls, aux_params, dt, n_ss, outdir):
dt = dt / float(n_ss)
_, fid0, _, _ = self.setup.get_fids(sim_controls, aux_params, dt)
for name, dH, amps in self.delta_list:
if isinstance(dH, qutip.Qobj):
dH = dH.full()
print('Varying', name)
fids = []
orig_H0 = self.setup.H0.copy()
for amp in amps:
if amp == 0:
fid = fid0
else:
self.setup.H0 = orig_H0 + amp * dH
_, fid, _, _ = self.setup.get_fids(sim_controls, aux_params, dt)
print('\t%.4g: %.4g' % (amp, fid))
fids.append(fid)
self.setup.H0 = orig_H0
f, ax = plt.subplots(1, 1)
ax.plot(np.array(amps) / 2 / np.pi * 1e6, 1 - np.array(fids), 'ks')
ax.set_xlabel('Amplitude / 2pi [kHz]')
ax.set_ylabel('Infidelity')
ax.set_title('Sensitivity to ' + name)
f.savefig(path.join(outdir, 'sens_%s.%s' % (name, OUTPUT_FMT)))
plt.close(f)
class verify_dispersion_sensitivity(Reporter):
"""
Evaluate the fidelity from the given setup varying dispersion.
disp_list specifies the dispersions to use, in fractional change / GHz.
"""
def __init__(self, setup, spacing, disp_list):
super(verify_dispersion_sensitivity, self).__init__(spacing)
self.setup = setup
self.disp_list = disp_list
def run(self, sim_controls, aux_params, dt, n_ss, outdir):
dt = dt / float(n_ss)
n_ctrls = sim_controls.shape[0]
controlsF = [np.fft.rfft(sim_controls[i,:]) for i in range(n_ctrls)]
freqs = np.fft.rfftfreq(sim_controls.shape[1], dt)
print('Varying dispersion')
fids = []
for amp in self.disp_list:
filt = (1 + freqs * amp)
filt[filt<0] = 0
filt[filt>2] = 2
controls = np.array([
np.fft.irfft(controlsF[i] * filt)
for i in range(n_ctrls)])
_, fid, _, _ = self.setup.get_fids(controls, aux_params, dt)
print('\t%.4g: %.4g' % (amp, fid))
fids.append(fid)
f, ax = plt.subplots(1, 1)
ax.plot(np.array(self.disp_list) * 10, 1 - np.array(fids), 'ks')
ax.set_xlabel('pct change @ 100 MHz')
ax.set_ylabel('Infidelity')
ax.set_title('Dispersion sensitivity')
f.savefig(path.join(outdir, 'sens_dispersion.%s' % OUTPUT_FMT))
plt.close(f)
def plot_matrix(M, ax=None, smap=None, labels=None):
yshape, xshape = M.shape
if max(xshape, yshape) < 8:
lw = 1
else:
lw = 0.5
R = 0.4
if ax is None:
f = plt.figure()
ax = f.add_subplot(111)
ax.set_aspect('equal')
ax.set_xlim(0, xshape)
ax.set_ylim(0, yshape)
for x in range(xshape):
ax.plot([x, x], [0, yshape], 'k', lw=lw)
for y in range(yshape):
ax.plot([0, xshape], [y, y], 'k', lw=lw)
for i in range(yshape):
for j in range(xshape):
vec = M[i,j]
if np.abs(vec)**2 < 1e-3:
continue
if smap:
fc = smap.to_rgba(np.abs(vec)**2)
else:
fc = 'None'
x = j
y = yshape - i - 1
ax.add_patch(mpatches.Circle([x+0.5,y+0.5], R*np.abs(vec), fc=fc, lw=lw))
ax.plot([x+0.5, x+0.5+R*vec.real], [y+0.5,y+0.5+R*vec.imag], 'k', lw=lw)
if labels is None:
ax.set_xticks([])
ax.set_yticks([])
else:
ax.set_xticks(np.arange(xshape)+0.5)
ax.set_yticks(np.arange(yshape)+0.5)
labels = ['$|%s\\rangle$' % s for s in labels]
ax.set_xticklabels(labels)
ax.set_yticklabels(list(reversed(labels)))
return ax
def cond_wigs(state, xs):
mat = state.data.todense().reshape(state.dims[0])
mat = mat[:2, :]
q_vecs_t, coefs, c_vecs = svd(mat, full_matrices=False)
q_vecs = q_vecs_t.T
assert len(q_vecs) == len(c_vecs) == 2, (c_vecs.shape, q_vecs.shape)
c_vecs = np.diag(coefs).dot(c_vecs)
q_vecs = list(map(qutip.Qobj, q_vecs))
c_vecs = list(map(qutip.Qobj, c_vecs))
# assert q.tensor(q_vecs[0], c_vecs[0]) + q.tensor(q_vecs[1], c_vecs[1]) == state
paulis = [qutip.qeye(2), qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()]
wigs = []
for q_op in paulis:
wig = 0
for j in range(2):
wig += (q_vecs[j].dag() * q_op * q_vecs[j])[0,0] * qutip.wigner(c_vecs[j] * c_vecs[j].dag(), xs, xs, g=2)
od_coef = (q_vecs[0].dag() * q_op * q_vecs[1])[0,0]
od_wig = wig_imag(c_vecs[0] * c_vecs[1].dag(), xs, xs, g=2)
wig += od_coef * od_wig
wig += od_coef.conj() * od_wig.conj()
wigs.append(wig.real)
return wigs
def wig_imag(rho, xvec, yvec, g=2):
"""
Using Laguerre polynomials from scipy to evaluate the Wigner function for
the density matrices :math:`|m><n|`, :math:`W_{mn}`. The total Wigner
function is calculated as :math:`W = \sum_{mn} \\rho_{mn} W_{mn}`.
"""
M = np.prod(rho.shape[0])
X, Y = np.meshgrid(xvec, yvec)
A = 0.5 * g * (X + 1.0j * Y)
W = np.zeros(np.shape(A), dtype=np.complex)
B = 4 * abs(A) ** 2
for m in range(M):
if abs(rho[m, m]) > 0.0:
W += np.real(rho[m, m] * (-1) ** m * genlaguerre(m, 0)(B))
for n in range(m + 1, M):
if abs(rho[m, n]) > 0.0:
W += 2.0 * rho[m, n] * (-1)**m * (2*A)**(n-m) * sqrt(factorial(m)/factorial(n)) * genlaguerre(m, n-m)(B)
return 0.5 * W * g ** 2 * np.exp(-B / 2) / pi
def optimize_gauge(props, targets, gauge_ops):
'''
Optimize a set of gauge transformation given by gauge_ops.
The parameters are lists with one element for each setup.
'''
n_gauge = len(gauge_ops[0])
def gauge_transform(g_vals, g_ops):
total = None
if n_gauge == 1:
g_vals = [g_vals]
for g_val, g_op in zip(g_vals, g_ops):
g_prop = expm(-1j * g_val * g_op)
if total is None:
total = g_prop
else:
total = total.dot(g_prop)
return total
def apply_gauges(gauge_vals, g_ops_row, targ):
t = gauge_transform(gauge_vals, g_ops_row)
return np.dot(targ, t)
def gauge_cost(gauge_vals):
cost = 0
for prop, targ, g_ops_row in zip(props, targets, gauge_ops):
norm = np.sum(abs(targ)**2)
targ_after = apply_gauges(gauge_vals, g_ops_row, targ)
if targ_after.shape == prop.shape:
overlap = np.sum(targ_after.conj() * prop) / norm
else:
overlap = np.sum(targ_after.T.conj() * prop) / norm
fid = abs(overlap)
cost += 1 - fid
cost = cost / len(props)
return cost
ranges = [slice(0, 2*np.pi, .2)] * n_gauge
g_vals = brute(gauge_cost, ranges)
return g_vals, [apply_gauges(g_vals, g_ops, targ) for g_ops, targ in zip(gauge_ops, targets)]
def wigner_mat(disps, d):
"""
Construct the matrix M such that M(alpha)*vec(rho) = Wigner(alpha)
The matrix M will be of dimension (N, d^2) where N is the number of
displacements and d is the maximum photon number.
Here vec(rho) deconstructs rho into a basis of d^2 hermitian operators.
The first d elements of vec(rho) are the diagonal elements of rho, the
next d*(d-1)/2 elements are the real parts of the upper triangle,
and the last d*(d-1)/2 elements are the imaginary parts of the upper triangle.
Elements of M are then M(a, (i, j)) = <j|D(-a) P D(a) |i> with displacement
operator D and parity operator P.
See http://dx.doi.org/10.1103/PhysRev.177.1882, esp. eq. 7.15
"""
n_disp = len(disps)
n_offd = (d * (d - 1)) // 2
dm = | np.zeros((n_disp, d * d)) | numpy.zeros |
# coding: utf-8
import warnings
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import FLOAT_DTYPES
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
class CutOff(BaseEstimator, TransformerMixin):
"""Transform features cutting values out of established range
Args:
feature_range: Range of allowed values, default=`(0,1)`
Usage:
The recommended way of using this is::
from sklearn.pipeline import Pipeline
minmax_scaler = preprocessing.MinMaxScaler()
dsapp_cutoff = CutOff()
lr = linear_model.LogisticRegression()
pipeline =Pipeline([
('minmax_scaler',minmax_scaler),
('dsapp_cutoff', dsapp_cutoff),
('lr', lr)
])
pipeline.fit(X_train, y_train)
pipeline.predict(X_test)
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
return self
def transform(self, X):
feature_range = self.feature_range
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if | np.any(X > feature_range[1]) | numpy.any |
import numpy as np
from scipy.stats.mstats import pearsonr
from sciunit import scores
from sciunit import errors
from cognibench.capabilities import PredictsLogpdf, ReturnsNumParams
from overrides import overrides
from cognibench.utils import negloglike
class BoundedScore(scores.FloatScore):
@overrides
def __init__(self, *args, min_score, max_score, **kwargs):
"""
Initialize the score. This class requires two mandatory keyword-only arguments.
Parameters
----------
score : float
Score value.
min_score : float
This value is used to clip the score value when coloring the scores in a notebook environment. This is necessary to avoid using very small/large values during coloring which crashes sciunit. However, this value does not affect the original score value or their ordering in any way.
max_score : float
This value is used to clip the score value when coloring the scores in a notebook environment. This is necessary to avoid using very small/large values during coloring which crashes sciunit. However, this value does not affect the original score value or their ordering in any way.
"""
super().__init__(*args, **kwargs)
self.min_score = min_score
self.max_score = max_score
class HigherBetterScore(BoundedScore):
_description = "Score values where higher is better"
@overrides
def color(self, value=None):
"""
Ensure that a normalized value is passed to parent class' color method which does the real work.
Parameters
----------
value : float
Score value to color. If None, function uses `self.score`
See Also
--------
:py:mod:`sciunit.scores`
"""
return super().color(self.norm_score)
@property
@overrides
def norm_score(self):
"""
Used for sorting. Lower is better.
Returns
-------
float
Score value normalized to 0-1 range computed by clipping self.score to the min/max range and then transforming to a value in [0, 1].
"""
clipped = min(self.max_score, max(self.min_score, self.score))
normalized = (clipped - self.min_score) / (self.max_score - self.min_score)
return normalized
class LowerBetterScore(BoundedScore):
"""
LowerBetterScore is a score type where lower values are better than larger values (e.g. mean squared error). This property is used by sciunit library when sorting or color coding the scores.
"""
_description = "Score values where lower is better"
@overrides
def color(self, value=None):
"""
Ensure that a normalized value is passed to parent class' color method which does the real work.
Parameters
----------
value : float
Score value to color. If None, function uses `self.score`
See Also
--------
:py:mod:`sciunit.scores`
"""
return super().color(self.norm_score)
@property
@overrides
def norm_score(self):
"""
Used for sorting. Lower is better.
Returns
-------
float
Score value normalized to 0-1 range computed by clipping self.score to the min/max range and then transforming to a value in [0, 1].
"""
neg_min = -self.min_score
neg_max = -self.max_score
clipped = max(neg_max, min(neg_min, -self.score))
normalized = 1 - (clipped - neg_min) / (neg_max - neg_min)
return normalized
class NLLScore(LowerBetterScore):
"""
Negative log-likelihood score object.
This score object requires a corresponding test model to predict logpdf (or logpmf).
"""
required_capabilities = (PredictsLogpdf,)
@classmethod
def compute(cls, actions, predictions):
"""
Return NLL score as a Score object from a sequence of actions
and logpdf/logpmf predictions.
"""
nll = negloglike(actions, predictions)
return cls(nll)
class AICScore(LowerBetterScore):
"""
Akaike Information Criterion score object.
This score object requires a corresponding test model
- to predict logpdf (or logpmf),
- to be able to return its number of parameters.
"""
required_capabilities = (PredictsLogpdf, ReturnsNumParams)
@classmethod
def compute(cls, actions, predictions, *args, n_model_params):
"""
Return AIC score as a Score object from a sequence of actions
and logpdf/logpmf predictions.
"""
nll = negloglike(actions, predictions)
return cls(2 * nll + 2 * n_model_params)
class BICScore(LowerBetterScore):
"""
Bayesian Information Criterion score object.
This score object requires a corresponding test model
- to predict logpdf (or logpmf),
- to be able to return its number of parameters.
"""
required_capabilities = (PredictsLogpdf, ReturnsNumParams)
@classmethod
def compute(cls, actions, predictions, *args, n_model_params, n_samples):
"""
Return BIC score as a Score object from a sequence of actions
and logpdf/logpmf predictions.
"""
nll = negloglike(actions, predictions)
regularizer = | np.dot(n_model_params, n_samples) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 25 12:18:37 2016
@author: virati
This script will try to import raw hdEEG data in a continuous way, preprocess it, and generate the figures needed for "Aim 2" - Mapping Cortical Responses/Signatures to Stimulation Parameters
THIS IS AN UPDATED FILE NOW SPECIFIC TO 906 until I fix the code to be modular/OOP
"""
import scipy
import scipy.io as sio
import scipy.signal as sig
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.stats as stats
import mne
import pdb
import h5py
from collections import defaultdict
from DBSpace.visualizations import EEG_Viz as EEG_Viz
plt.rcParams['image.cmap'] = 'jet'
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 30}
matplotlib.rc('font', **font)
plt.close('all')
#%%
#data_dir = '/run/media/virati/Stokes/MDD_Data/hdEEG/Continuous/CHIRPS/'
data_dir = '/home/virati/MDD_Data/hdEEG/Continuous/CHIRPS/'
def extract_raw_mat(fname=[]):
if fname == []:
pt_dir = 'DBS906/'
file = 'DBS906_TurnOn_Day1_Sess1_20150827_024013.mat'
data_dir = '/home/virati/B04/'
Inp = sio.loadmat(data_dir + pt_dir + file)
else:
Inp = sio.loadmat(fname)
#Find the key corresponding to the data
data_key = [key for key in Inp.keys() if key[0:3] == 'DBS']
#Spectrogram of the first channel to see
chann = 32
#sg_sig = sig.decimate(Inp[data_key[0]][chann,:],q=10)
sg_sig = Inp[data_key[0]][chann,:]
#do filtering here
sos_lpf = sig.butter(10,20,fs=1000,output='sos')
fsg_sig = sig.sosfilt(sos_lpf,sg_sig)
T,F,SG = sig.spectrogram(sg_sig,nfft=2**10,window='blackmanharris',nperseg=1024,noverlap=500,fs=1000)
fig,ax1 = plt.subplots()
ax1.pcolormesh(F,T,10*np.log10(SG))
ax2 = ax1.twinx()
ax2.plot(np.linspace(0,fsg_sig.shape[0]/1000,fsg_sig.shape[0]),fsg_sig)
#Data matrix generation
Data_matr = Inp[data_key[0]]
#Spectrogram of the first channel to see
t_bounds = {'Pre_STIM':(760,780), 'BL_STIM':(790,810)}
t_vect = np.linspace(0,Data_matr.shape[1]/1000,Data_matr.shape[1])
signal = defaultdict(dict)
for ts, tt in t_bounds.items():
t_loc = np.where(np.logical_and(t_vect > tt[0],t_vect < tt[1]))[0]
signal[ts] = Inp[data_key[0]][:,t_loc] - np.mean(Inp[data_key[0]][:,t_loc],0)
#Save DataStructure
sio.savemat('/tmp/test',signal)
#%%
def load_raw_mat(fname):
signal = sio.loadmat(fname)
return signal['EXPORT']['chann'][0][0]
#for condit in ['OnTarget','OffTarget']:
for condit in ['OnTarget']:
pt = 'DBS906'
#condit = 'OffTarget'
file = data_dir + pt + '_Sample_Chirp_template/' + pt + '_' + condit + '_all.mat'
signal = load_raw_mat(fname=file)
def EEG_to_Matr(signal):
data = []
for ch in range(257):
data.append(signal[:,ch][0][0][0][0][0])
data = np.array(data)
return data
#%%
data = EEG_to_Matr(signal)
mean_sig = np.mean(data,0)
#Re-reference to mean
for ch in range(257):
data[ch] = data[ch] - mean_sig
#Decimate down all the data
test_dec = sig.decimate(data,10,zero_phase=True)
plt.plot(test_dec.T)
plt.title('Plotting the decimated Data')
#%%
ds_fact = 1
fs = 500
epoch = defaultdict(dict)
alpha_t = defaultdict(dict)
nfft=512
#calculate PSD of each channel
snippets = {'Baseline':(0,21000),'EarlyStim':(27362,27362+21000)}
for elabel,ebos in snippets.items():
#channel x NFFT below
P = np.zeros((257,257))
alpha_pow = np.zeros((257,85))
for ch in range(257):
sig_filt = sig.decimate(data[ch][ebos[0]:ebos[1]],ds_fact,zero_phase=True)
#just do a welch estimate
f,Pxx = sig.welch(sig_filt,fs=fs/ds_fact,window='blackmanharris',nperseg=512,noverlap=128,nfft=2**10)
#First, we're going to go through the timseries, segment it out, and classify each segment in a partial-responsive GMM model
#do a spectrogram and then find median
F,T,SG = sig.spectrogram(sig_filt,nperseg=256,noverlap=10,window=sig.get_window('blackmanharris',256),fs=fs/ds_fact,nfft=512)
#Take the median along the time axis of the SG to find the median PSD for the epoch label
Pxx = np.median(SG,axis=1)
#find timeseries of alpha oscillatory power
falpha = np.where(np.logical_and(F > 8,F < 14))
#Frequency is what dimension?? probably 1
alpha_tcourse = np.median(SG[falpha,:],1)
P[ch,:] = Pxx
alpha_pow[ch,:] = alpha_tcourse
epoch[elabel] = P
alpha_t[elabel] = alpha_pow
#Compute diff PSD
diff_PSD = 10*np.log10(epoch['EarlyStim']) - 10*np.log10(epoch['Baseline'])
#%%
plt.figure()
_ = plt.plot(F,diff_PSD.T,alpha=0.2)
plt.axhline(y=0,linewidth=5)
plt.title('Plotting the change in PSD from Baseline to Early Stim')
#%%
def plot_ts_chann(data,ch,ds_fact=1):
plt.figure()
sel_sig = sig.decimate(data[ch][:],ds_fact,zero_phase=True)
plt.plot(sel_sig)
#%%
fig,ax = plt.subplots()
P = epoch['EarlyStim']
ax.axhline(y=0)
ax.plot(F,10*np.log10(P.T))
# Only draw spine between the y-ticks
ax.spines['left'].set_bounds(-1, 1)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.title('Plotting the Early Stim Epoch Alone')
#%%
# Here we'll plot the decimates ts
# choose a random subsample of channels
from numpy.random import default_rng
#rand_channs = default_rng().choice(257,size=5,replace=False)
rand_channs = [32]
ds_fact=5
decimated_sigs = sig.decimate(data[rand_channs][:],ds_fact,zero_phase=True)
plt.figure()
plt.plot(decimated_sigs.T)
#%%
#%%
#Do a spectrogram of one of the channels
ds_fact = 1
ch = [225]
if len(ch) == 1:
sel_sig = sig.decimate(data[ch[0]][:],ds_fact,zero_phase=True)
else:
sel_sig = sig.decimate(data[ch[0]][:] - data[ch[1]][:],ds_fact,zero_phase=True)
plt.figure()
F,T,SG = sig.spectrogram(sel_sig,nperseg=512,noverlap=500,window=sig.get_window('blackmanharris',512),fs=fs/ds_fact)
def poly_sub(fVect,psd,order=1):
polyCoeff = np.polyfit(fVect,10*np.log10(psd),order)
polyfunc = np.poly1d(polyCoeff)
polyitself = polyfunc(fVect)
postpsd = 10**(10*np.log10(psd) - polyitself)
if (postpsd == 0).any(): raise Exception;
#plt.figure()
#plt.plot(10*np.log10(psd))
#plt.plot(polyitself);pdb.set_trace()
return postpsd
def poly_sub_SG(f,SG):
post_SG = np.zeros_like(SG)
for ii in range(SG.shape[1]):
post_SG[:,ii] = poly_sub(f,SG[:,ii])
return post_SG
#pSG = poly_sub_SG(F,SG)
def norm_SG(f,SG):
baseline = np.mean(SG[:,0:1000],axis=1)
plt.plot(baseline)
post_SG = np.zeros_like(SG)
for ii in range(SG.shape[1]):
post_SG[:,ii] = SG[:,ii]/baseline
return post_SG
nSG = norm_SG(F,SG)
plt.figure()
plt.pcolormesh(T,F,10*np.log10(SG),rasterized=True)
alpha_idxs = np.where(np.logical_and(F < 7,F>2))
plt.plot(T,10*np.log10(np.mean(nSG[alpha_idxs,:].squeeze(),axis=0)))
plt.title('TimeFrequency Signal of Channel ' + str(ch))
#%%
plt.figure()
plt.plot(sel_sig)
#take out sel_sig and sweep the chirp through it
#%%
#Do Chirplet Search here
tvect = np.linspace(0,5,5*1000)
simil = np.zeros((20,20))
index = | np.zeros((20,20)) | numpy.zeros |
from typing import Optional, List, Callable, Dict
import abc
import numpy as np
from acme import types, specs
from mtsgi.utils.graph_utils import SubtaskGraph
from mtsgi.envs.base_config import WobConfig # only for typing
class BaseWoBEnv(abc.ABC):
def __init__(
self,
rank: int,
config_factory: List[Callable[[], WobConfig]],
keep_pristine: bool,
verbose_level: Optional[int] = 0,
):
# Set environment configurations
self.rank = rank
# Set task-info. (For multi-task env, set this in reset_task())
self._configs = config_factory
self._keep_pristine = keep_pristine
assert all([callable(config) for config in self._configs])
# Sample at least one valid task config (default seed = 0).
self.reset_task(task_index=0)
# params
self.env_done = False
self.step_count = 0
# TODO: Do we need action mask?
self.action_mask = False
self._verbose_level = verbose_level
# Set observation specs.
observation = self.reset()
self._observation_specs = {
k: specs.Array(shape=v.shape, dtype=v.dtype, name=k) \
for k, v in observation.items()
}
def reset_task(self, task_index: Optional[int] = None):
# TODO: Remove the assertion and support random task sampling when different
# graph across the batch is supported. (See environment_loop.py L225)
assert task_index is not None
config_factory = self._configs[task_index % len(self._configs)]
self.config: WobConfig = config_factory(seed=task_index,
keep_pristine=self._keep_pristine)
from mtsgi.envs import base_config as bc
self._subtask_pool_name_to_id = bc.SUBTASK_POOL_NAME_TO_ID
self._option_id_to_name = bc.OPTION_ID_TO_NAME
self._option_name_to_id = bc.OPTION_NAME_TO_ID
return self.config
@property
def num_total_subtasks(self):
return len(self._subtask_pool_name_to_id)
@property
def action_dim(self):
# TODO: This might differ from num_total_subtasks.
return len(self._option_id_to_name)
@property
def num_graphs(self):
return self.config.num_graphs
@property
def graph(self):
return self.config.graph
@property
def subtasks(self):
return self.config.subtasks
@property
def subtask_reward(self):
return self.config.subtask_reward
@property
def num_subtasks(self):
return len(self.subtasks)
@property
def max_step(self):
return self.config.max_step
@property
def task(self):
return self.config
@property
def pool_to_index(self):
return self.config._pool_to_index
@property
def index_to_pool(self):
return self.config._index_to_pool
### Communicating with the agent (actor) --> should be id-based
@property
def task_embedding(self) -> SubtaskGraph:
reward_vec = np.zeros((self.num_subtasks,), dtype=np.float)
for i, subtask in enumerate(self.subtasks):
reward_vec[i] = self.subtask_reward[subtask]
# TODO: generate ground truth graph information that GRProp can interpret.
return SubtaskGraph(
numP=None,
numA=None,
index_to_pool=self.index_to_pool,
pool_to_index=self.pool_to_index,
subtask_reward=reward_vec, # index-based
W_a=None,
W_o=None,
ORmat=None,
ANDmat=None,
tind_by_layer=None
)
### For connecting adaptation-test envs ~~ ###
def set_task(self, task):
"""fixed the current task to the given task. used for aligning the task
between the adaptation & test environments.
"""
self.config = task
def reset(self):
# Reset episode-based internal states
self.step_count = 0
# TODO: Do we need action mask?
self.action_mask = False # XXX (prev) action is not valid when env resets
self.done = False
self.step_done = False
self.mask = None
self.completion = None
self.eligibility = None
# Reset & update batch_params
self._update_batch_params(reset=True)
assert not self.done, "done cannot be True here"
observation = self._get_observation()
return observation
def _update_batch_params(self, action=None, step=None, reset=False):
'''Reset & Update mask, completion, and eligibility.
'''
if reset:
completion = {name: False for name in self.subtasks}
else:
completion = self._update_completion(action)
self.eligibility = self._update_eligibility(completion)
self.mask = self._update_mask(completion)
self.completion = completion
# 2. time_over
if step is not None:
self.step_count += step
time_over = self.step_count >= self.max_step # Time exceeds episode limit
agent_over = not any([m and e for m, e in zip(self.mask, self.eligibility)]) # No more action (option) left to execute
action_over = self._check_environment_done(action) # Current action terminates environment
self.done = time_over or agent_over or action_over
self.step_done = time_over
if self._verbose_level > 1 and self.rank == 0:
print('='*40)
print('[%d] Available options:'%(self.step_count))
for subtask_name in self.eligibility:
if self.eligibility[subtask_name] and self.mask[subtask_name]:
print(subtask_name)
def _get_observation(self):
# Comprehensive version. slower.
completion = self._dict_to_array(
input=self.completion,
dim=self.num_total_subtasks,
mapping=self._subtask_pool_name_to_id
)
# Note: the dimension (action_dim) should be consistent with
# WobConfig's pool_to_index (_construct_mappings).
mask = self._dict_to_array(
input=self.mask,
dim=self.action_dim,
# TODO: This had different number of subtasks from SUBTASK_POOL_NAME_TO_ID,
# which results in differnt dimension (completion.shape != mask)
mapping=self._option_name_to_id
)
eligibility = self._dict_to_array(
input=self.eligibility,
dim=self.action_dim,
# TODO: This had different number of subtasks from SUBTASK_POOL_NAME_TO_ID,
# which results in differnt dimension (completion.shape != eligibility)
mapping=self._option_name_to_id
)
# Faster version. Use it if this is a bottleneck
#completion = np.array([self.completion[name] if name in self.completion else 0 for name in SUBTASK_POOL_NAME_TO_ID], dtype=np.float32)
#mask = np.array([self.mask[name] if name in self.mask else 0 for name in OPTION_NAME_TO_ID], dtype=np.float32)
#eligibility = np.array([self.eligibility[name] if name in self.eligibility else 0 for name in OPTION_NAME_TO_ID], dtype=np.float32)
observation = {
'mask': mask,
'completion': completion,
'eligibility': eligibility,
'option_success': np.asarray(True),
}
# Remaining episode step.
remaining_steps = (self.max_step + 1 - self.step_count)
observation.update(
# TODO: Do we need action mask?
action_mask=np.array(self.action_mask, dtype=np.float32),
remaining_steps=np.array(remaining_steps, dtype=np.int32),
termination= | np.array(self.done, dtype=np.bool) | numpy.array |
"""
This is the main script of main GUI of the OXCART Atom Probe.
@author: <NAME> <<EMAIL>>
"""
import sys
import numpy as np
import nidaqmx
import time
import threading
import datetime
import os
# PyQt and PyQtgraph libraries
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QScreen, QPixmap, QImage
import pyqtgraph as pg
import pyqtgraph.exporters
# Serial ports and Camera libraries
import serial.tools.list_ports
from pypylon import pylon
# Local project scripts
import oxcart
import variables
from devices.camera import Camera
from devices import initialize_devices
class Ui_OXCART(Camera, object):
"""
The GUI class of the Oxcart
"""
def __init__(self, devices, tlFactory, cameras, converter, lock):
super().__init__(devices, tlFactory, cameras, converter) # Cameras variables and converter
self.lock = lock # Lock for thread ...
def setupUi(self, OXCART):
OXCART.setObjectName("OXCART")
OXCART.resize(3400, 1800)
self.centralwidget = QtWidgets.QWidget(OXCART)
self.centralwidget.setObjectName("centralwidget")
# self.vdc_time = QtWidgets.QWidget(self.centralwidget)
self.vdc_time = pg.PlotWidget(self.centralwidget)
self.vdc_time.setGeometry(QtCore.QRect(530, 260, 500, 500))
self.vdc_time.setObjectName("vdc_time")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(730, 210, 80, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(3030, 1520, 314, 106))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.start_button = QtWidgets.QPushButton(self.layoutWidget)
self.start_button.setObjectName("start_button")
self.gridLayout_2.addWidget(self.start_button, 1, 0, 1, 1)
self.stop_button = QtWidgets.QPushButton(self.layoutWidget)
self.stop_button.setObjectName("stop_button")
self.gridLayout_2.addWidget(self.stop_button, 2, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(1230, 210, 156, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
# self.detection_rate_viz = QtWidgets.QWidget(self.centralwidget)
self.detection_rate_viz = pg.PlotWidget(self.centralwidget)
self.detection_rate_viz.setGeometry(QtCore.QRect(1080, 260, 500, 500))
self.detection_rate_viz.setObjectName("detection_rate_viz")
self.label_19 = QtWidgets.QLabel(self.centralwidget)
self.label_19.setGeometry(QtCore.QRect(710, 830, 134, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
###
# self.visualization = QtWidgets.QWidget(self.centralwidget)
self.visualization = pg.PlotWidget(self.centralwidget)
self.visualization.setGeometry(QtCore.QRect(530, 870, 500, 500))
self.visualization.setObjectName("visualization")
self.detector_circle = pg.QtGui.QGraphicsEllipseItem(0, 0, 2400, 2400) # x, y, width, height
self.detector_circle.setPen(pg.mkPen(color=(255, 0, 0), width=1))
self.visualization.addItem(self.detector_circle)
###
self.label_24 = QtWidgets.QLabel(self.centralwidget)
self.label_24.setGeometry(QtCore.QRect(1280, 820, 51, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
# self.temperature = QtWidgets.QWidget(self.centralwidget)
self.temperature = pg.PlotWidget(self.centralwidget)
self.temperature.setGeometry(QtCore.QRect(2530, 1400, 411, 311))
self.temperature.setObjectName("temperature")
self.label_18 = QtWidgets.QLabel(self.centralwidget)
self.label_18.setGeometry(QtCore.QRect(10, 1150, 101, 41))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.Error = QtWidgets.QLabel(self.centralwidget)
self.Error.setGeometry(QtCore.QRect(530, 1400, 1241, 51))
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
font.setStrikeOut(False)
self.Error.setFont(font)
self.Error.setAlignment(QtCore.Qt.AlignCenter)
self.Error.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.Error.setObjectName("Error")
self.diagram = QtWidgets.QLabel(self.centralwidget)
self.diagram.setGeometry(QtCore.QRect(10, 1190, 481, 371))
self.diagram.setText("")
self.diagram.setObjectName("diagram")
self.label_29 = QtWidgets.QLabel(self.centralwidget)
self.label_29.setGeometry(QtCore.QRect(1810, 830, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_29.setFont(font)
self.label_29.setObjectName("label_29")
self.label_30 = QtWidgets.QLabel(self.centralwidget)
self.label_30.setGeometry(QtCore.QRect(1810, 230, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_30.setFont(font)
self.label_30.setObjectName("label_30")
self.label_31 = QtWidgets.QLabel(self.centralwidget)
self.label_31.setGeometry(QtCore.QRect(2700, 840, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_31.setFont(font)
self.label_31.setObjectName("label_31")
self.label_32 = QtWidgets.QLabel(self.centralwidget)
self.label_32.setGeometry(QtCore.QRect(2700, 220, 110, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_32.setFont(font)
self.label_32.setObjectName("label_32")
self.label_33 = QtWidgets.QLabel(self.centralwidget)
self.label_33.setGeometry(QtCore.QRect(2220, 800, 171, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_33.setFont(font)
self.label_33.setObjectName("label_33")
self.label_34 = QtWidgets.QLabel(self.centralwidget)
self.label_34.setGeometry(QtCore.QRect(2200, 190, 171, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_34.setFont(font)
self.label_34.setObjectName("label_34")
self.light = QtWidgets.QPushButton(self.centralwidget)
self.light.setGeometry(QtCore.QRect(3120, 50, 101, 46))
self.light.setObjectName("light")
self.led_light = QtWidgets.QLabel(self.centralwidget)
self.led_light.setGeometry(QtCore.QRect(3240, 40, 111, 61))
self.led_light.setAlignment(QtCore.Qt.AlignCenter)
self.led_light.setObjectName("led_light")
self.vacuum_main = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_main.setGeometry(QtCore.QRect(2270, 1510, 231, 91))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.vacuum_main.setFont(font)
self.vacuum_main.setObjectName("vacuum_main")
self.vacuum_buffer = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_buffer.setGeometry(QtCore.QRect(1780, 1500, 231, 91))
font = QtGui.QFont()
font.setPointSize(8)
self.vacuum_buffer.setFont(font)
self.vacuum_buffer.setObjectName("vacuum_buffer")
self.vacuum_load_lock = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_load_lock.setGeometry(QtCore.QRect(1190, 1500, 231, 91))
self.vacuum_load_lock.setObjectName("vacuum_load_lock")
self.label_35 = QtWidgets.QLabel(self.centralwidget)
self.label_35.setGeometry(QtCore.QRect(2020, 1540, 241, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_35.setFont(font)
self.label_35.setObjectName("label_35")
self.label_36 = QtWidgets.QLabel(self.centralwidget)
self.label_36.setGeometry(QtCore.QRect(1490, 1540, 251, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_36.setFont(font)
self.label_36.setObjectName("label_36")
self.label_37 = QtWidgets.QLabel(self.centralwidget)
self.label_37.setGeometry(QtCore.QRect(980, 1540, 181, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_37.setFont(font)
self.label_37.setObjectName("label_37")
self.label_38 = QtWidgets.QLabel(self.centralwidget)
self.label_38.setGeometry(QtCore.QRect(2050, 1650, 191, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_38.setFont(font)
self.label_38.setObjectName("label_38")
self.temp = QtWidgets.QLCDNumber(self.centralwidget)
self.temp.setGeometry(QtCore.QRect(2270, 1620, 231, 91))
self.temp.setObjectName("temp")
####
# self.cam_s_o = QtWidgets.QLabel(self.centralwidget)
self.cam_s_o = pg.ImageView(self.centralwidget)
self.cam_s_o.adjustSize()
self.cam_s_o.ui.histogram.hide()
self.cam_s_o.ui.roiBtn.hide()
self.cam_s_o.ui.menuBtn.hide()
self.cam_s_o.setGeometry(QtCore.QRect(1630, 260, 500, 500))
# self.cam_s_o.setText("")
self.cam_s_o.setObjectName("cam_s_o")
# self.cam_b_o = QtWidgets.QLabel(self.centralwidget)
self.cam_b_o = pg.ImageView(self.centralwidget)
self.cam_b_o.adjustSize()
self.cam_b_o.ui.histogram.hide()
self.cam_b_o.ui.roiBtn.hide()
self.cam_b_o.ui.menuBtn.hide()
self.cam_b_o.setGeometry(QtCore.QRect(1630, 870, 500, 500))
# self.cam_b_o.setText("")
####
self.cam_b_o.setObjectName("cam_b_o")
self.cam_s_d = QtWidgets.QLabel(self.centralwidget)
self.cam_s_d.setGeometry(QtCore.QRect(2150, 260, 1200, 500))
self.cam_s_d.setText("")
self.cam_s_d.setObjectName("cam_s_d")
self.cam_b_d = QtWidgets.QLabel(self.centralwidget)
self.cam_b_d.setGeometry(QtCore.QRect(2150, 870, 1200, 500))
self.cam_b_d.setText("")
self.cam_b_d.setObjectName("cam_b_d")
self.layoutWidget1 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget1.setGeometry(QtCore.QRect(650, 1580, 235, 131))
self.layoutWidget1.setObjectName("layoutWidget1")
self.gridLayout_6 = QtWidgets.QGridLayout(self.layoutWidget1)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.led_pump_load_lock = QtWidgets.QLabel(self.layoutWidget1)
self.led_pump_load_lock.setAlignment(QtCore.Qt.AlignCenter)
self.led_pump_load_lock.setObjectName("led_pump_load_lock")
self.gridLayout_6.addWidget(self.led_pump_load_lock, 0, 0, 2, 1)
self.pump_load_lock_switch = QtWidgets.QPushButton(self.layoutWidget1)
self.pump_load_lock_switch.setObjectName("pump_load_lock_switch")
self.gridLayout_6.addWidget(self.pump_load_lock_switch, 2, 0, 1, 1)
# self.histogram = QtWidgets.QWidget(self.centralwidget)
self.histogram = pg.PlotWidget(self.centralwidget)
self.histogram.setGeometry(QtCore.QRect(1080, 870, 500, 500))
self.histogram.setObjectName("histogram")
self.label_40 = QtWidgets.QLabel(self.centralwidget)
self.label_40.setGeometry(QtCore.QRect(1480, 1640, 291, 31))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_40.setFont(font)
self.label_40.setObjectName("label_40")
self.vacuum_buffer_back = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_buffer_back.setGeometry(QtCore.QRect(1780, 1610, 231, 91))
font = QtGui.QFont()
font.setPointSize(8)
self.vacuum_buffer_back.setFont(font)
self.vacuum_buffer_back.setObjectName("vacuum_buffer_back")
self.vacuum_load_lock_back = QtWidgets.QLCDNumber(self.centralwidget)
self.vacuum_load_lock_back.setGeometry(QtCore.QRect(1190, 1610, 231, 91))
self.vacuum_load_lock_back.setObjectName("vacuum_load_lock_back")
self.label_39 = QtWidgets.QLabel(self.centralwidget)
self.label_39.setGeometry(QtCore.QRect(950, 1640, 231, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_39.setFont(font)
self.label_39.setObjectName("label_39")
self.layoutWidget2 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget2.setGeometry(QtCore.QRect(20, 1580, 476, 131))
self.layoutWidget2.setObjectName("layoutWidget2")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget2)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.led_main_chamber = QtWidgets.QLabel(self.layoutWidget2)
self.led_main_chamber.setAlignment(QtCore.Qt.AlignCenter)
self.led_main_chamber.setObjectName("led_main_chamber")
self.gridLayout.addWidget(self.led_main_chamber, 0, 0, 1, 1)
self.led_load_lock = QtWidgets.QLabel(self.layoutWidget2)
self.led_load_lock.setAlignment(QtCore.Qt.AlignCenter)
self.led_load_lock.setObjectName("led_load_lock")
self.gridLayout.addWidget(self.led_load_lock, 0, 1, 1, 1)
self.led_cryo = QtWidgets.QLabel(self.layoutWidget2)
self.led_cryo.setAlignment(QtCore.Qt.AlignCenter)
self.led_cryo.setObjectName("led_cryo")
self.gridLayout.addWidget(self.led_cryo, 0, 2, 1, 1)
self.main_chamber_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.main_chamber_switch.setObjectName("main_chamber_switch")
self.gridLayout.addWidget(self.main_chamber_switch, 1, 0, 1, 1)
self.load_lock_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.load_lock_switch.setObjectName("load_lock_switch")
self.gridLayout.addWidget(self.load_lock_switch, 1, 1, 1, 1)
self.cryo_switch = QtWidgets.QPushButton(self.layoutWidget2)
self.cryo_switch.setObjectName("cryo_switch")
self.gridLayout.addWidget(self.cryo_switch, 1, 2, 1, 1)
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(530, 30, 2581, 140))
self.textEdit.setObjectName("textEdit")
self.layoutWidget3 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget3.setGeometry(QtCore.QRect(10, 890, 436, 242))
self.layoutWidget3.setObjectName("layoutWidget3")
self.gridLayout_4 = QtWidgets.QGridLayout(self.layoutWidget3)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_11 = QtWidgets.QLabel(self.layoutWidget3)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.gridLayout_4.addWidget(self.label_11, 0, 0, 1, 1)
self.label_12 = QtWidgets.QLabel(self.layoutWidget3)
self.label_12.setObjectName("label_12")
self.gridLayout_4.addWidget(self.label_12, 1, 0, 1, 1)
self.elapsed_time = QtWidgets.QLineEdit(self.layoutWidget3)
self.elapsed_time.setText("")
self.elapsed_time.setObjectName("elapsed_time")
self.gridLayout_4.addWidget(self.elapsed_time, 1, 1, 1, 1)
self.label_13 = QtWidgets.QLabel(self.layoutWidget3)
self.label_13.setObjectName("label_13")
self.gridLayout_4.addWidget(self.label_13, 2, 0, 1, 1)
self.total_ions = QtWidgets.QLineEdit(self.layoutWidget3)
self.total_ions.setText("")
self.total_ions.setObjectName("total_ions")
self.gridLayout_4.addWidget(self.total_ions, 2, 1, 1, 1)
self.label_14 = QtWidgets.QLabel(self.layoutWidget3)
self.label_14.setObjectName("label_14")
self.gridLayout_4.addWidget(self.label_14, 3, 0, 1, 1)
self.speciemen_voltage = QtWidgets.QLineEdit(self.layoutWidget3)
self.speciemen_voltage.setText("")
self.speciemen_voltage.setObjectName("speciemen_voltage")
self.gridLayout_4.addWidget(self.speciemen_voltage, 3, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.layoutWidget3)
self.label_16.setObjectName("label_16")
self.gridLayout_4.addWidget(self.label_16, 4, 0, 1, 1)
self.pulse_voltage = QtWidgets.QLineEdit(self.layoutWidget3)
self.pulse_voltage.setText("")
self.pulse_voltage.setObjectName("pulse_voltage")
self.gridLayout_4.addWidget(self.pulse_voltage, 4, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.layoutWidget3)
self.label_15.setObjectName("label_15")
self.gridLayout_4.addWidget(self.label_15, 5, 0, 1, 1)
self.detection_rate = QtWidgets.QLineEdit(self.layoutWidget3)
self.detection_rate.setText("")
self.detection_rate.setObjectName("detection_rate")
self.gridLayout_4.addWidget(self.detection_rate, 5, 1, 1, 1)
self.criteria_ions = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_ions.setGeometry(QtCore.QRect(500, 190, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_ions.setFont(font)
self.criteria_ions.setMouseTracking(True)
self.criteria_ions.setText("")
self.criteria_ions.setChecked(True)
self.criteria_ions.setObjectName("criteria_ions")
self.criteria_vdc = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_vdc.setGeometry(QtCore.QRect(500, 320, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_vdc.setFont(font)
self.criteria_vdc.setMouseTracking(True)
self.criteria_vdc.setText("")
self.criteria_vdc.setChecked(True)
self.criteria_vdc.setObjectName("criteria_vdc")
self.criteria_time = QtWidgets.QCheckBox(self.centralwidget)
self.criteria_time.setGeometry(QtCore.QRect(500, 150, 31, 29))
font = QtGui.QFont()
font.setItalic(False)
self.criteria_time.setFont(font)
self.criteria_time.setMouseTracking(True)
self.criteria_time.setText("")
self.criteria_time.setChecked(True)
self.criteria_time.setObjectName("criteria_time")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(11, 16, 490, 850))
self.widget.setObjectName("widget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.widget)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout_3.addWidget(self.label, 0, 0, 1, 2)
self.parameters_source = QtWidgets.QComboBox(self.widget)
self.parameters_source.setObjectName("parameters_source")
self.parameters_source.addItem("")
self.parameters_source.addItem("")
self.gridLayout_3.addWidget(self.parameters_source, 0, 2, 1, 1)
self.label_43 = QtWidgets.QLabel(self.widget)
self.label_43.setObjectName("label_43")
self.gridLayout_3.addWidget(self.label_43, 1, 0, 1, 1)
self.ex_user = QtWidgets.QLineEdit(self.widget)
self.ex_user.setObjectName("ex_user")
self.gridLayout_3.addWidget(self.ex_user, 1, 2, 1, 1)
self.label_21 = QtWidgets.QLabel(self.widget)
self.label_21.setObjectName("label_21")
self.gridLayout_3.addWidget(self.label_21, 2, 0, 1, 1)
self.ex_name = QtWidgets.QLineEdit(self.widget)
self.ex_name.setObjectName("ex_name")
self.gridLayout_3.addWidget(self.ex_name, 2, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setObjectName("label_2")
self.gridLayout_3.addWidget(self.label_2, 3, 0, 1, 2)
self.ex_time = QtWidgets.QLineEdit(self.widget)
self.ex_time.setObjectName("ex_time")
self.gridLayout_3.addWidget(self.ex_time, 3, 2, 1, 1)
self.label_41 = QtWidgets.QLabel(self.widget)
self.label_41.setObjectName("label_41")
self.gridLayout_3.addWidget(self.label_41, 4, 0, 1, 2)
self.max_ions = QtWidgets.QLineEdit(self.widget)
self.max_ions.setObjectName("max_ions")
self.gridLayout_3.addWidget(self.max_ions, 4, 2, 1, 1)
self.label_3 = QtWidgets.QLabel(self.widget)
self.label_3.setObjectName("label_3")
self.gridLayout_3.addWidget(self.label_3, 5, 0, 1, 2)
self.ex_freq = QtWidgets.QLineEdit(self.widget)
self.ex_freq.setObjectName("ex_freq")
self.gridLayout_3.addWidget(self.ex_freq, 5, 2, 1, 1)
self.label_4 = QtWidgets.QLabel(self.widget)
self.label_4.setObjectName("label_4")
self.gridLayout_3.addWidget(self.label_4, 6, 0, 1, 2)
self.vdc_min = QtWidgets.QLineEdit(self.widget)
self.vdc_min.setObjectName("vdc_min")
self.gridLayout_3.addWidget(self.vdc_min, 6, 2, 1, 1)
self.label_5 = QtWidgets.QLabel(self.widget)
self.label_5.setObjectName("label_5")
self.gridLayout_3.addWidget(self.label_5, 7, 0, 1, 2)
self.vdc_max = QtWidgets.QLineEdit(self.widget)
self.vdc_max.setObjectName("vdc_max")
self.gridLayout_3.addWidget(self.vdc_max, 7, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setObjectName("label_6")
self.gridLayout_3.addWidget(self.label_6, 8, 0, 1, 1)
self.vdc_steps_up = QtWidgets.QLineEdit(self.widget)
self.vdc_steps_up.setObjectName("vdc_steps_up")
self.gridLayout_3.addWidget(self.vdc_steps_up, 8, 2, 1, 1)
self.label_28 = QtWidgets.QLabel(self.widget)
self.label_28.setObjectName("label_28")
self.gridLayout_3.addWidget(self.label_28, 9, 0, 1, 1)
self.vdc_steps_down = QtWidgets.QLineEdit(self.widget)
self.vdc_steps_down.setObjectName("vdc_steps_down")
self.gridLayout_3.addWidget(self.vdc_steps_down, 9, 2, 1, 1)
self.label_20 = QtWidgets.QLabel(self.widget)
self.label_20.setObjectName("label_20")
self.gridLayout_3.addWidget(self.label_20, 10, 0, 1, 2)
self.cycle_avg = QtWidgets.QLineEdit(self.widget)
self.cycle_avg.setObjectName("cycle_avg")
self.gridLayout_3.addWidget(self.cycle_avg, 10, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.widget)
self.label_8.setObjectName("label_8")
self.gridLayout_3.addWidget(self.label_8, 11, 0, 1, 2)
self.vp_min = QtWidgets.QLineEdit(self.widget)
self.vp_min.setObjectName("vp_min")
self.gridLayout_3.addWidget(self.vp_min, 11, 2, 1, 1)
self.label_9 = QtWidgets.QLabel(self.widget)
self.label_9.setObjectName("label_9")
self.gridLayout_3.addWidget(self.label_9, 12, 0, 1, 2)
self.vp_max = QtWidgets.QLineEdit(self.widget)
self.vp_max.setObjectName("vp_max")
self.gridLayout_3.addWidget(self.vp_max, 12, 2, 1, 1)
self.label_25 = QtWidgets.QLabel(self.widget)
self.label_25.setObjectName("label_25")
self.gridLayout_3.addWidget(self.label_25, 13, 0, 1, 2)
self.pulse_fraction = QtWidgets.QLineEdit(self.widget)
self.pulse_fraction.setObjectName("pulse_fraction")
self.gridLayout_3.addWidget(self.pulse_fraction, 13, 2, 1, 1)
self.label_23 = QtWidgets.QLabel(self.widget)
self.label_23.setObjectName("label_23")
self.gridLayout_3.addWidget(self.label_23, 14, 0, 1, 2)
self.pulse_frequency = QtWidgets.QLineEdit(self.widget)
self.pulse_frequency.setObjectName("pulse_frequency")
self.gridLayout_3.addWidget(self.pulse_frequency, 14, 2, 1, 1)
self.label_17 = QtWidgets.QLabel(self.widget)
self.label_17.setObjectName("label_17")
self.gridLayout_3.addWidget(self.label_17, 15, 0, 1, 2)
self.detection_rate_init = QtWidgets.QLineEdit(self.widget)
self.detection_rate_init.setObjectName("detection_rate_init")
self.gridLayout_3.addWidget(self.detection_rate_init, 15, 2, 1, 1)
self.label_22 = QtWidgets.QLabel(self.widget)
self.label_22.setObjectName("label_22")
self.gridLayout_3.addWidget(self.label_22, 16, 0, 1, 1)
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.widget)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.doubleSpinBox.setMinimum(1.)
self.doubleSpinBox.setMaximum(3.)
self.doubleSpinBox.setSingleStep(0.1)
self.doubleSpinBox.setValue(1)
self.gridLayout_3.addWidget(self.doubleSpinBox, 16, 1, 1, 1)
self.hit_displayed = QtWidgets.QLineEdit(self.widget)
self.hit_displayed.setObjectName("hit_displayed")
self.gridLayout_3.addWidget(self.hit_displayed, 16, 2, 1, 1)
self.label_26 = QtWidgets.QLabel(self.widget)
self.label_26.setObjectName("label_26")
self.gridLayout_3.addWidget(self.label_26, 17, 0, 1, 1)
self.email = QtWidgets.QLineEdit(self.widget)
self.email.setText("")
self.email.setObjectName("email")
self.gridLayout_3.addWidget(self.email, 17, 2, 1, 1)
self.label_27 = QtWidgets.QLabel(self.widget)
self.label_27.setObjectName("label_27")
self.gridLayout_3.addWidget(self.label_27, 18, 0, 1, 1)
self.tweet = QtWidgets.QComboBox(self.widget)
self.tweet.setObjectName("tweet")
self.tweet.addItem("")
self.tweet.addItem("")
self.gridLayout_3.addWidget(self.tweet, 18, 2, 1, 1)
self.label_42 = QtWidgets.QLabel(self.widget)
self.label_42.setObjectName("label_42")
self.gridLayout_3.addWidget(self.label_42, 19, 0, 1, 1)
self.counter_source = QtWidgets.QComboBox(self.widget)
self.counter_source.setObjectName("counter_source")
self.counter_source.addItem("")
self.counter_source.addItem("")
self.counter_source.addItem("")
self.counter_source.addItem("")
self.gridLayout_3.addWidget(self.counter_source, 19, 2, 1, 1)
OXCART.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(OXCART)
self.menubar.setGeometry(QtCore.QRect(0, 0, 3400, 38))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
OXCART.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(OXCART)
self.statusbar.setObjectName("statusbar")
OXCART.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(OXCART)
self.actionExit.setObjectName("actionExit")
self.menuFile.addAction(self.actionExit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(OXCART)
QtCore.QMetaObject.connectSlotsByName(OXCART)
#### Set 8 digits for each LCD to show
self.vacuum_main.setDigitCount(8)
self.vacuum_buffer.setDigitCount(8)
self.vacuum_buffer_back.setDigitCount(8)
self.vacuum_load_lock.setDigitCount(8)
self.vacuum_load_lock_back.setDigitCount(8)
self.temp.setDigitCount(8)
arrow1 = pg.ArrowItem(pos=(100, 1700), angle=-90)
# arrow2 = pg.ArrowItem(pos=(100, 2100), angle=90)
arrow3 = pg.ArrowItem(pos=(130, 1800), angle=0)
self.cam_b_o.addItem(arrow1)
# self.cam_b_o.addItem(arrow2)
self.cam_b_o.addItem(arrow3)
arrow1 = pg.ArrowItem(pos=(590, 620), angle=-90)
arrow2 = pg.ArrowItem(pos=(570, 1120), angle=90)
# arrow3 = pg.ArrowItem(pos=(890, 1100), angle=0)
self.cam_s_o.addItem(arrow1)
self.cam_s_o.addItem(arrow2)
# self.cam_s_o.addItem(arrow3)
####
def retranslateUi(self, OXCART):
_translate = QtCore.QCoreApplication.translate
OXCART.setWindowTitle(_translate("OXCART", "PyOXCART"))
###
OXCART.setWindowIcon(QtGui.QIcon('./png/logo3.png'))
###
self.label_7.setText(_translate("OXCART", "Voltage"))
self.start_button.setText(_translate("OXCART", "Start"))
###
self._translate = QtCore.QCoreApplication.translate
self.start_button.clicked.connect(self.thread_main)
self.thread = MainThread()
self.thread.signal.connect(self.finished_thread_main)
self.stop_button.setText(_translate("OXCART", "Stop"))
self.stop_button.clicked.connect(self.stop_ex)
###
self.label_10.setText(_translate("OXCART", "Detection Rate"))
self.label_19.setText(_translate("OXCART", "Visualization"))
self.label_24.setText(_translate("OXCART", "TOF"))
self.label_18.setText(_translate("OXCART", "Diagram"))
self.Error.setText(_translate("OXCART", "<html><head/><body><p><br/></p></body></html>"))
self.label_29.setText(_translate("OXCART", "Overview"))
self.label_30.setText(_translate("OXCART", "Overview"))
self.label_31.setText(_translate("OXCART", "Detail"))
self.label_32.setText(_translate("OXCART", "Detail"))
self.label_33.setText(_translate("OXCART", "Camera Bottom"))
self.label_34.setText(_translate("OXCART", "Camera Side"))
self.light.setText(_translate("OXCART", "Light"))
self.led_light.setText(_translate("OXCART", "light"))
self.label_35.setText(_translate("OXCART", "Main Chamber (mBar)"))
self.label_36.setText(_translate("OXCART", "Buffer Chamber (mBar)"))
self.label_37.setText(_translate("OXCART", "Load lock (mBar)"))
###
self.main_chamber_switch.clicked.connect(lambda: self.gates(1))
self.load_lock_switch.clicked.connect(lambda: self.gates(2))
self.cryo_switch.clicked.connect(lambda: self.gates(3))
self.light.clicked.connect(lambda: self.light_switch())
self.pump_load_lock_switch.clicked.connect(lambda: self.pump_switch())
###
self.label_38.setText(_translate("OXCART", "Temperature (K)"))
self.led_pump_load_lock.setText(_translate("OXCART", "pump"))
self.pump_load_lock_switch.setText(_translate("OXCART", "Load Lock Pump"))
self.label_40.setText(_translate("OXCART", "Buffer Chamber Pre (mBar)"))
self.label_39.setText(_translate("OXCART", "Load Lock Pre(mBar)"))
self.led_main_chamber.setText(_translate("OXCART", "Main"))
self.led_load_lock.setText(_translate("OXCART", "Load"))
self.led_cryo.setText(_translate("OXCART", "Cryo"))
self.main_chamber_switch.setText(_translate("OXCART", "Main Chamber"))
self.load_lock_switch.setText(_translate("OXCART", "Load Lock"))
self.cryo_switch.setText(_translate("OXCART", "Cryo"))
self.textEdit.setHtml(_translate("OXCART", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.875pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">ex_user=user1;</span>ex_name=test1;ex_time=90;max_ions=2000;ex_freq=10;vdc_min=500;vdc_max=4000;vdc_steps_up=100;vdc_steps_down=100;vp_min=328;vp_max=3281;pulse_fraction=20;pulse_frequency=200;detection_rate_init=1;hit_displayed=20000;email=;tweet=No;counter_source=TDC<span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">;criteria_time=True;criteria_ions=False;criteria_vdc=False</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'JetBrains Mono,monospace\'; font-size:8pt; color:#000000;\">ex_user=user2;ex_name=test2;ex_time=100;max_ions=3000;ex_freq=5;vdc_min=1000;vdc_max=3000;vdc_steps_up=50;vdc_steps_down=50;vp_min=400;vp_max=2000;pulse_fraction=15;pulse_frequency=200;detection_rate_init=2;hit_displayed=40000;email=;tweet=No;counter_source=Pulse Counter;criteria_time=False;criteria_ions=False;criteria_vdc=True</span></p></body></html>"))
self.label_11.setText(_translate("OXCART", "Run Statistics"))
self.label_12.setText(_translate("OXCART", "Elapsed Time (S):"))
self.label_13.setText(_translate("OXCART", "Total Ions"))
self.label_14.setText(_translate("OXCART", "Specimen Voltage (V)"))
self.label_16.setText(_translate("OXCART", "Pulse Voltage (V)"))
self.label_15.setText(_translate("OXCART", "Detection Rate (%)"))
self.label.setText(_translate("OXCART", "Setup Parameters"))
self.parameters_source.setItemText(0, _translate("OXCART", "TextBox"))
self.parameters_source.setItemText(1, _translate("OXCART", "TextLine"))
self.label_43.setText(_translate("OXCART", "Experiment User"))
self.ex_user.setText(_translate("OXCART", "user"))
self.label_21.setText(_translate("OXCART", "Experiment Name"))
self.ex_name.setText(_translate("OXCART", "test"))
self.label_2.setText(_translate("OXCART", "Max. Experiment Time (S)"))
self.ex_time.setText(_translate("OXCART", "90"))
self.label_41.setText(_translate("OXCART", "Max. Number of Ions"))
self.max_ions.setText(_translate("OXCART", "2000"))
self.label_3.setText(_translate("OXCART", "Control refresh Freq.(Hz)"))
self.ex_freq.setText(_translate("OXCART", "10"))
self.label_4.setText(_translate("OXCART", "Specimen Start Voltage (V)"))
self.vdc_min.setText(_translate("OXCART", "500"))
self.label_5.setText(_translate("OXCART", "Specimen Stop Voltage (V)"))
self.vdc_max.setText(_translate("OXCART", "4000"))
self.label_6.setText(_translate("OXCART", "K_p Upwards"))
self.vdc_steps_up.setText(_translate("OXCART", "100"))
self.label_28.setText(_translate("OXCART", "K_p Downwards"))
self.vdc_steps_down.setText(_translate("OXCART", "100"))
self.label_20.setText(_translate("OXCART", "Cycle for Avg. (Hz)"))
self.cycle_avg.setText(_translate("OXCART", "10"))
self.label_8.setText(_translate("OXCART", "Pulse Min. Voltage (V)"))
self.vp_min.setText(_translate("OXCART", "328"))
self.label_9.setText(_translate("OXCART", "Pulse Max. Voltage (V)"))
self.vp_max.setText(_translate("OXCART", "3281"))
self.label_25.setText(_translate("OXCART", "Pulse Fraction (%)"))
self.pulse_fraction.setText(_translate("OXCART", "20"))
self.label_23.setText(_translate("OXCART", "Pulse Frequency (KHz)"))
self.pulse_frequency.setText(_translate("OXCART", "200"))
self.label_17.setText(_translate("OXCART", "Detection Rate (%)"))
self.detection_rate_init.setText(_translate("OXCART", "1"))
self.label_22.setText(_translate("OXCART", "# Hits Displayed"))
self.hit_displayed.setText(_translate("OXCART", "20000"))
self.label_26.setText(_translate("OXCART", "Email"))
self.label_27.setText(_translate("OXCART", "Twitter"))
self.tweet.setItemText(0, _translate("OXCART", "No"))
self.tweet.setItemText(1, _translate("OXCART", "Yes"))
self.label_42.setText(_translate("OXCART", "Counter Source"))
self.counter_source.setItemText(0, _translate("OXCART", "TDC"))
self.counter_source.setItemText(1, _translate("OXCART", "TDC_Raw"))
self.counter_source.setItemText(2, _translate("OXCART", "Pulse Counter"))
self.counter_source.setItemText(3, _translate("OXCART", "DRS"))
self.menuFile.setTitle(_translate("OXCART", "File"))
self.actionExit.setText(_translate("OXCART", "Exit"))
# High Voltage visualization ################
self.x_vdc = np.arange(1000) # 1000 time points
self.y_vdc = np.zeros(1000) # 1000 data points
self.y_vdc[:] = np.nan
self.y_vps = np.zeros(1000) # 1000 data points
self.y_vps[:] = np.nan
# Add legend
self.vdc_time.addLegend()
pen_vdc = pg.mkPen(color=(255, 0, 0), width=6)
pen_vps = pg.mkPen(color=(0, 0, 255), width=3)
self.data_line_vdc = self.vdc_time.plot(self.x_vdc, self.y_vdc, name="High Vol.", pen=pen_vdc)
self.data_line_vps = self.vdc_time.plot(self.x_vdc, self.y_vps, name="Pulse Vol.", pen=pen_vps)
self.vdc_time.setBackground('w')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.vdc_time.setLabel("left", "High Voltage (v)", **styles)
self.vdc_time.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.vdc_time.showGrid(x=True, y=True)
# Add Range
self.vdc_time.setXRange(0, 1000, padding=0.05)
self.vdc_time.setYRange(0, 15000, padding=0.05)
# Detection Visualization #########################
self.x_dtec = np.arange(1000) # 1000 time points
self.y_dtec = np.zeros(1000) # 1000 data points
self.y_dtec[:] = np.nan
pen_dtec = pg.mkPen(color=(255, 0, 0), width=6)
self.data_line_dtec = self.detection_rate_viz.plot(self.x_dtec, self.y_dtec, pen=pen_dtec)
self.detection_rate_viz.setBackground('w')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.detection_rate_viz.setLabel("left", "Counts", **styles)
self.detection_rate_viz.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.detection_rate_viz.showGrid(x=True, y=True)
# Add Range
self.detection_rate_viz.setXRange(0, 1000, padding=0.05)
self.detection_rate_viz.setYRange(0, 4000, padding=0.05)
# Temperature #########################
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.histogram.setLabel("left", "Frequency (counts)", **styles)
self.histogram.setLabel("bottom", "Time (ns)", **styles)
# Temperature #########################
self.x_tem = np.arange(100) # 1000 time points
self.y_tem = np.zeros(100) # 1000 data points
self.y_tem[:] = np.nan
pen_dtec = pg.mkPen(color=(255, 0, 0), width=6)
self.data_line_tem = self.temperature.plot(self.x_tem, self.y_tem, pen=pen_dtec)
self.temperature.setBackground('b')
# Add Axis Labels
styles = {"color": "#f00", "font-size": "20px"}
self.temperature.setLabel("left", "Temperature (K)", **styles)
self.temperature.setLabel("bottom", "Time (s)", **styles)
# Add grid
self.temperature.showGrid(x=True, y=True)
# Add Range
self.temperature.setYRange(0, 100, padding=0.1)
# Visualization #####################
self.scatter = pg.ScatterPlotItem(
size=self.doubleSpinBox.value(), brush=pg.mkBrush(255, 255, 255, 120))
self.visualization.getPlotItem().hideAxis('bottom')
self.visualization.getPlotItem().hideAxis('left')
# timer plot, variables, and cameras
self.timer1 = QtCore.QTimer()
self.timer1.setInterval(1000)
self.timer1.timeout.connect(self.update_cameras)
self.timer1.start()
self.timer2 = QtCore.QTimer()
self.timer2.setInterval(1000)
self.timer2.timeout.connect(self.update_plot_data)
self.timer2.start()
self.timer3 = QtCore.QTimer()
self.timer3.setInterval(2000)
self.timer3.timeout.connect(self.statistics)
self.timer3.start()
# Diagram and LEDs ##############
self.diagram_close_all = QPixmap('.\png\close_all.png')
self.diagram_main_open = QPixmap('.\png\main_open.png')
self.diagram_load_open = QPixmap('.\png\load_open.png')
self.diagram_cryo_open = QPixmap('.\png\cryo_open.png')
self.led_red = QPixmap('.\png\led-red-on.png')
self.led_green = QPixmap('.\png\green-led-on.png')
self.diagram.setPixmap(self.diagram_close_all)
self.led_main_chamber.setPixmap(self.led_red)
self.led_load_lock.setPixmap(self.led_red)
self.led_cryo.setPixmap(self.led_red)
self.led_light.setPixmap(self.led_red)
self.led_pump_load_lock.setPixmap(self.led_green)
def thread_main(self):
"""
Main thread for running experiment
"""
def read_update(text_line, index_line):
"""
Function for reading the Textline box
This function is only run if Textline is selected in the GUI
The function read the the text line and put it in the Qboxes
"""
_translate = QtCore.QCoreApplication.translate
text_line = text_line[index_line].split(';')
text_line_b = []
for i in range(len(text_line)):
text_line_b.append(text_line[i].split('='))
for i in range(len(text_line_b)):
if text_line_b[i][0] == 'ex_user':
self.ex_user.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_name':
self.ex_name.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_time':
self.ex_time.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'ex_freq':
self.ex_freq.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'max_ions':
self.max_ions.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_min':
self.vdc_min.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_max':
self.vdc_max.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'detection_rate_init':
self.detection_rate_init.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'pulse_fraction':
self.pulse_fraction.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'pulse_frequency':
self.pulse_frequency.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'hit_displayed':
self.hit_displayed.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'hdf5_path':
self.ex_name.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'email':
self.email.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'cycle_avg':
self.cycle_avg.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_steps_up':
self.vdc_steps_up.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vdc_steps_down':
self.vdc_steps_down.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vp_min':
self.vp_min.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'vp_max':
self.vp_max.setText(_translate("OXCART", text_line_b[i][1]))
if text_line_b[i][0] == 'counter_source':
if text_line_b[i][1] == 'TDC':
self.counter_source.setCurrentIndex(0)
if text_line_b[i][1] == 'TDC_Raw':
self.counter_source.setCurrentIndex(1)
if text_line_b[i][1] == 'Pulse Counter':
self.counter_source.setCurrentIndex(2)
if text_line_b[i][1] == 'DRS':
self.counter_source.setCurrentIndex(3)
if text_line_b[i][0] == 'tweet':
if text_line_b[i][1] == 'NO':
self.tweet.setCurrentIndex(0)
if text_line_b[i][1] == 'Yes':
self.tweet.setCurrentIndex(1)
if text_line_b[i][0] == 'criteria_time':
if text_line_b[i][1] == 'True':
self.criteria_time.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_time.setChecked(False)
if text_line_b[i][0] == 'criteria_ions':
if text_line_b[i][1] == 'True':
self.criteria_ions.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_ions.setChecked(False)
if text_line_b[i][0] == 'criteria_vdc':
if text_line_b[i][1] == 'True':
self.criteria_vdc.setChecked(True)
elif text_line_b[i][1] == 'False':
self.criteria_vdc.setChecked(False)
# check if the gates are closed
if not variables.flag_main_gate and not variables.flag_load_gate and not variables.flag_cryo_gate:
if self.parameters_source.currentText() == 'TextLine' and variables.index_line == 0:
lines = self.textEdit.toPlainText() # Copy all the lines in TextLine
self.text_line = lines.splitlines() # Seperate the lines in TextLine
self.num_line = len(self.text_line) # count number of line in TextLine (Number of experiments that have to be done)
elif self.parameters_source.currentText() != 'TextLine' and variables.index_line == 0:
self.num_line = 0
self.start_button.setEnabled(False) # Disable the start button in the GUI
variables.plot_clear_flag = True # Change the flag to clear the plots in GUI
# If the TextLine is selected the read_update function is run
if self.parameters_source.currentText() == 'TextLine':
read_update(self.text_line, variables.index_line)
# Update global variables to do the experiments
variables.user_name = self.ex_user.text()
variables.ex_time = int(float(self.ex_time.text()))
variables.ex_freq = int(float(self.ex_freq.text()))
variables.max_ions = int(float(self.max_ions.text()))
variables.vdc_min = int(float(self.vdc_min.text()))
variables.detection_rate = float(self.detection_rate_init.text())
variables.hit_display = int(float(self.hit_displayed.text()))
variables.pulse_fraction = int(float(self.pulse_fraction.text())) / 100
variables.pulse_frequency = float(self.pulse_frequency.text())
variables.hdf5_path = self.ex_name.text()
variables.email = self.email.text()
variables.cycle_avg = int(float(self.cycle_avg.text()))
variables.vdc_step_up = int(float(self.vdc_steps_up.text()))
variables.vdc_step_down = int(float(self.vdc_steps_down.text()))
variables.v_p_min = int(float(self.vp_min.text()))
variables.v_p_max = int(float(self.vp_max.text()))
variables.counter_source = str(self.counter_source.currentText())
if self.criteria_time.isChecked():
variables.criteria_time = True
elif not self.criteria_time.isChecked():
variables.criteria_time = False
if self.criteria_ions.isChecked():
variables.criteria_ions = True
elif not self.criteria_ions.isChecked():
variables.criteria_ions = False
if self.criteria_vdc.isChecked():
variables.criteria_vdc = True
elif not self.criteria_vdc.isChecked():
variables.criteria_vdc = False
if variables.counter_source == 'TDC_Raw':
variables.raw_mode = True
if self.tweet.currentText() == 'Yes':
variables.tweet = True
# Read the experiment counter
with open('./png/counter.txt') as f:
variables.counter = int(f.readlines()[0])
# Current time and date
now = datetime.datetime.now()
exp_name = "%s_" % variables.counter + \
now.strftime("%b-%d-%Y_%H-%M") + "_%s" % variables.hdf5_path
variables.path = 'D:\\pyoxcart\\data\\%s' % exp_name
# Create folder to save the data
if not os.path.isdir(variables.path):
os.makedirs(variables.path, mode=0o777, exist_ok=True)
# start the run methos of MainThread Class, which is main function of oxcart.py
self.thread.start()
if self.parameters_source.currentText() == 'TextLine':
variables.index_line += 1 # increase the index line of TextLine to read the second line in next step
else:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close all "
"Gates !!!</span></p></body></html>"))
def finished_thread_main(self):
"""
The function that is run after end of experiment(MainThread)
"""
self.start_button.setEnabled(True)
self.stop_button.setEnabled(True)
QScreen.grabWindow(app.primaryScreen(),
QApplication.desktop().winId()).save(variables.path + '\\screenshot.png')
if variables.index_line < self.num_line: # Do next experiment in case of TextLine
self.thread_main()
else:
variables.index_line = 0
def stop_ex(self):
"""
The function that is run if STOP button is pressed
"""
if variables.start_flag == True:
variables.stop_flag = True # Set the STOP flag
self.stop_button.setEnabled(False) # Disable the stop button
print('STOP Flag is set:', variables.stop_flag)
def gates(self, gate_num):
"""
The function for closing or opening gates
"""
def switch_gate(num):
"""
The function for applying the command of closing or opening gate
"""
with nidaqmx.Task() as task:
task.do_channels.add_do_chan('Dev2/port0/line%s' % num)
task.start()
task.write([True])
time.sleep(.5)
task.write([False])
# Main gate
if not variables.start_flag and gate_num == 1 and not variables.flag_load_gate and not variables.flag_cryo_gate and variables.flag_pump_load_lock:
if not variables.flag_main_gate: # Open the main gate
switch_gate(0)
self.led_main_chamber.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_main_open)
variables.flag_main_gate = True
elif variables.flag_main_gate: # Close the main gate
switch_gate(1)
self.led_main_chamber.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_all)
variables.flag_main_gate = False
# Buffer gate
elif not variables.start_flag and gate_num == 2 and not variables.flag_main_gate and not variables.flag_cryo_gate and variables.flag_pump_load_lock:
if not variables.flag_load_gate: # Open the main gate
switch_gate(2)
self.led_load_lock.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_load_open)
variables.flag_load_gate = True
elif variables.flag_load_gate: # Close the main gate
switch_gate(3)
self.led_load_lock.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_all)
variables.flag_load_gate = False
# Cryo gate
elif not variables.start_flag and gate_num == 3 and not variables.flag_main_gate and not variables.flag_load_gate and variables.flag_pump_load_lock:
if not variables.flag_cryo_gate: # Open the main gate
switch_gate(4)
self.led_cryo.setPixmap(self.led_green)
self.diagram.setPixmap(self.diagram_cryo_open)
variables.flag_cryo_gate = True
elif variables.flag_cryo_gate: # Close the main gate
switch_gate(5)
self.led_cryo.setPixmap(self.led_red)
self.diagram.setPixmap(self.diagram_close_all)
variables.flag_cryo_gate = False
# Show the error message in the GUI
else:
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close all "
"the Gates and switch on the pump !!!</span></p></body></html>"))
def pump_switch(self):
"""
The function for Switching the Load Lock pump
"""
if not variables.start_flag and not variables.flag_main_gate and not variables.flag_cryo_gate \
and not variables.flag_load_gate:
if variables.flag_pump_load_lock:
variables.flag_pump_load_lock_click = True
self.pump_load_lock_switch.setEnabled(False)
elif not variables.flag_pump_load_lock:
variables.flag_pump_load_lock_click = True
self.pump_load_lock_switch.setEnabled(False)
else: # SHow error message in the GUI
_translate = QtCore.QCoreApplication.translate
self.Error.setText(_translate("OXCART",
"<html><head/><body><p><span style=\" color:#ff0000;\">!!! First Close all "
"the Gates !!!</span></p></body></html>"))
def light_switch(self):
"""
The function for switching the exposure time of cameras in case of swithching the light
"""
if not variables.light:
self.led_light.setPixmap(self.led_green)
Camera.light_switch(self)
self.timer1.setInterval(500)
variables.light = True
variables.sample_adjust = True
variables.light_swich = True
elif variables.light:
self.led_light.setPixmap(self.led_red)
Camera.light_switch(self)
self.timer1.setInterval(500)
variables.light = False
variables.sample_adjust = False
variables.light_swich = False
def thread_worker(self, target):
"""
The function for creating workers
"""
return threading.Thread(target=target)
def update_plot_data(self):
"""
The function for updating plots
"""
# Temperature
self.x_tem = self.x_tem[1:] # Remove the first element.
self.x_tem = np.append(self.x_tem, self.x_tem[-1] + 1) # Add a new value 1 higher than the last.
self.y_tem = self.y_tem[1:] # Remove the first element.
try:
self.y_tem = np.append(self.y_tem, int(variables.temperature))
self.data_line_tem.setData(self.x_tem, self.y_tem)
except:
print(
f"{initialize_devices.bcolors.FAIL}Error: Cannot read the temperature{initialize_devices.bcolors.ENDC}")
if variables.index_auto_scale_graph == 30:
self.temperature.enableAutoRange(axis='x')
self.vdc_time.enableAutoRange(axis='x')
self.detection_rate_viz.enableAutoRange(axis='x')
variables.index_auto_scale_graph = 0
self.temperature.disableAutoRange()
self.vdc_time.disableAutoRange()
self.detection_rate_viz.disableAutoRange()
variables.index_auto_scale_graph += 1
if variables.plot_clear_flag:
self.x_vdc = np.arange(1000) # 1000 time points
self.y_vdc = np.zeros(1000) # 1000 data points
self.y_vdc[:] = np.nan
self.y_vps = np.zeros(1000) # 1000 data points
self.y_vps[:] = np.nan
self.data_line_vdc.setData(self.x_vdc, self.y_vdc)
self.data_line_vps.setData(self.x_vdc, self.y_vps)
self.x_dtec = np.arange(1000)
self.y_dtec = np.zeros(1000)
self.y_dtec[:] = np.nan
self.data_line_dtec.setData(self.x_dtec, self.y_dtec)
self.histogram.clear()
self.scatter.clear()
self.visualization.clear()
self.visualization.addItem(self.detector_circle)
variables.plot_clear_flag = False
variables.specimen_voltage = 0
variables.pulse_voltage = 0
variables.elapsed_time = 0
variables.total_ions = 0
variables.avg_n_count = 0
if variables.start_flag:
if variables.index_wait_on_plot_start <= 16:
variables.index_wait_on_plot_start += 1
if variables.index_wait_on_plot_start >= 8:
# V_dc and V_p
if variables.index_plot <= 999:
self.y_vdc[variables.index_plot] = int(variables.specimen_voltage) # Add a new value.
self.y_vps[variables.index_plot] = int(variables.pulse_voltage) # Add a new value.
else:
self.x_vdc = np.append(self.x_vdc,
self.x_vdc[-1] + 1) # Add a new value 1 higher than the last.
self.y_vdc = np.append(self.y_vdc, int(variables.specimen_voltage)) # Add a new value.
self.y_vps = np.append(self.y_vps, int(variables.pulse_voltage)) # Add a new value.
self.data_line_vdc.setData(self.x_vdc, self.y_vdc)
self.data_line_vps.setData(self.x_vdc, self.y_vps)
# Detection Rate Visualization
if variables.index_plot <= 999:
self.y_dtec[variables.index_plot] = int(variables.avg_n_count) # Add a new value.
else:
self.x_dtec = self.x_dtec[1:] # Remove the first element.
self.x_dtec = np.append(self.x_dtec,
self.x_dtec[-1] + 1) # Add a new value 1 higher than the last.
self.y_dtec = self.y_dtec[1:]
self.y_dtec = np.append(self.y_dtec, int(variables.avg_n_count))
self.data_line_dtec.setData(self.x_dtec, self.y_dtec)
# Increase the index
variables.index_plot += 1
# Time of Flight
if variables.counter_source == 'TDC' and variables.total_ions > 0 and variables.index_wait_on_plot_start > 16 \
and variables.index_wait_on_plot_start > 16 and not variables.raw_mode:
if variables.index_wait_on_plot_start > 16:
try:
def replaceZeroes(data):
min_nonzero = np.min(data[np.nonzero(data)])
data[data == 0] = min_nonzero
return data
math_to_charge = variables.t * 27.432/(1000 * 4) # Time in ns
math_to_charge = math_to_charge[math_to_charge < 5000]
# max_lenght = max(len(variables.x), len(variables.y),
# len(variables.t), len(variables.main_v_dc_dld))
# d_0 = 110 * 0.001
# e = 1.602 * 10 ** (-19)
# x_n = (((variables.x[:max_lenght]) - 1225) * (78/2450))
# y_n = (((variables.y[:max_lenght]) - 1225) * (78/2450))
# t_n = variables.t[:max_lenght] * 27.432 * 10**(-12) / 4
#
# l = np.sqrt(d_0 ** 2 + x_n ** 2 + y_n ** 2)
#
# math_to_charge = (2 * variables.main_v_dc_dld[:max_lenght] * e * t_n**2) / (l**2)
self.y_tof, self.x_tof = | np.histogram(math_to_charge, bins=512) | numpy.histogram |
import numpy as np
from scipy.linalg import block_diag
from numpy.linalg import matrix_power
from scipy.optimize import minimize
import picos as pic
def state_covariance(A, B, K, V):
A_cl = A + B @ K
d = A.shape
X_V = pic.SymmetricVariable('X_V', shape=d)
F = pic.Problem()
F.set_objective('min', pic.trace(X_V))
F.add_constraint(A_cl.T * X_V * A_cl - X_V + V == 0)
F.add_constraint(X_V >> 0)
F.solve(verbosity=0, primals=None)
# Unstable, so expected variance is infinite
if F.status != 'optimal':
return np.Inf
X_V = np.atleast_2d(X_V.value)
return X_V
# E is the steady state state covariance
def bounds(E, Q, R, A, B, K, n, p):
Q_ = Q + K.T @ R @ K
A_ = A + B @ K
omega = block_diag(*list(Q_ for i in range(n)))
column_list = list()
for i in range(n):
entry_list = list()
for j in range(n):
exp = np.abs(j-i)
if j >= i:
entry = matrix_power(A_, exp) @ E
else:
entry = E @ matrix_power(A_, exp).T
entry_list.append(entry)
column = np.vstack(entry_list)
column_list.append(column)
cov = np.hstack(column_list)
assert np.allclose(cov, cov.T)
M = omega @ cov
eig = np.linalg.eigvals(M + np.eye(M.shape[0]) * 1e-9)
assert 0 < p < 1
beta = p
# assert np.alltrue(0 < eig < 1)
def x(eta):
return - 1/eta * np.log(beta/2) - 1/(2*eta) * np.sum(np.log(1 - 2 * eta * eig))
test = np.linspace(-1000., 0., 10000, endpoint=False)
f = lambda eta: -x(eta)
xs = list(f(eta) for eta in test)
# import matplotlib.pyplot as plt
# plt.plot(test, xs)
# plt.show()
# lower Bound
bnds = ((None, -1e-6),)
res = minimize(fun=lambda eta: -x(eta), x0=test[np.argmin(xs)], bounds=bnds)
k_m = x(res.x)
max = 1/(2 * np.max(eig))
test = | np.linspace(0.0001, max, 1000, endpoint=False) | numpy.linspace |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import inspect
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED, SUBCLASS_SAFE_FUNCTIONS, UNSUPPORTED_FUNCTIONS,
FUNCTION_HELPERS, DISPATCHED_FUNCTIONS, IGNORED_FUNCTIONS)
from astropy.utils.compat import (
NUMPY_LT_1_14, NUMPY_LT_1_15, NUMPY_LT_1_16, NUMPY_LT_1_18)
NO_ARRAY_FUNCTION = not ARRAY_FUNCTION_ENABLED
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
all_wrapped_functions = {name: f for name, f in np.__dict__.items()
if callable(f) and hasattr(f, '__wrapped__') and
(NUMPY_LT_1_15 or f is not np.printoptions)}
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup(self):
self.q = np.arange(9.).reshape(3, 3) / 4. * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
# alen is deprecated in Numpy 1.8
if NUMPY_LT_1_18:
def test_alen(self):
assert np.alen(self.q) == 3
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1. * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1. * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1. * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="expand_dims used asarray in numpy <1.16")
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
@pytest.mark.xfail(NUMPY_LT_1_15,
reason="flip needs axis argument in numpy <1.15")
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# TODO: should we change the default for subok?
self.check(np.broadcast_to, (3, 3, 3), subok=True)
def test_broadcast_arrays(self):
# TODO: should we change the default for subok?
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150., 350.]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
@pytest.mark.skip(NUMPY_LT_1_15,
reason="take_long_axis added in numpy 1.15")
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices,
axis=0) * self.q.unit
assert np.all(out == expected)
@pytest.mark.skip(NUMPY_LT_1_15,
reason="put_long_axis added in numpy 1.15")
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis,
self.q.value) * self.q.unit ** 2
assert_array_equal(out, expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
@pytest.mark.parametrize('axes', ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup(self):
self.q = (np.arange(9.).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.)
def test_ones_like(self):
self.check(np.ones_like)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value,
axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True],
self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_putmask(self):
q = np.arange(3.) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_place(self):
q = np.arange(3.) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_copyto(self):
q = np.arange(3.) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25. * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup(self):
self.q1 = np.arange(6.).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop('q_list', [self.q1, self.q2])
o = func(q_list, *args, **kwargs)
unit = q_list[0].unit
v_list = [q.to_value(unit) for q in q_list]
expected = func(v_list, *args, **kwargs) * unit
assert o.shape == expected.shape
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = np.concatenate(
[self.q1.value, self.q2.to_value(self.q1.unit)]) * self.q1.unit
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_stack(self):
self.check(np.stack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_column_stack(self):
self.check(np.column_stack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_hstack(self):
self.check(np.hstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_vstack(self):
self.check(np.vstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_dstack(self):
self.check(np.dstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_block(self):
self.check(np.block)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = np.append(self.q1.value, self.q2.to_value(self.q1.unit),
axis=0) * self.q1.unit
assert np.all(out == expected)
a = np.arange(3.)
result = np.append(a, 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_insert(self):
# Unit of inserted values is ignored.
q = np.arange(12.).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50., 25.] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) * u.m
assert np.all(out == expected)
a = np.arange(3.)
result = np.insert(a, (2,), 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50. * u.cm)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_pad(self):
q = np.arange(1., 6.) * u.m
out = np.pad(q, (2, 3), 'constant', constant_values=(0., 150.*u.cm))
assert out.unit == q.unit
expected = np.pad(q.value, (2, 3), 'constant',
constant_values=(0., 1.5)) * q.unit
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), 'constant', constant_values=150.*u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), 'constant',
constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), 'linear_ramp', end_values=(25.*u.cm, 0.))
assert out3.unit == q.unit
expected3 = np.pad(q.value, (2, 3), 'linear_ramp',
end_values=(0.25, 0.)) * q.unit
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup(self):
self.q = np.arange(54.).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(NotImplementedError):
np.any(self.q)
def test_all(self):
with pytest.raises(NotImplementedError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(NotImplementedError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(NotImplementedError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="angle used asarray in numpy <1.16")
def test_angle(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0., 10., 20.]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
expected = np.clip(self.q.value, qmin.to_value(self.q.unit),
qmax.to_value(self.q.unit)) * self.q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_sinc(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.*u.one)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_where(self):
out = np.where([True, False, True], self.q, 1. * u.km)
expected = np.where([True, False, True], self.q.value,
1000.) * self.q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_select(self):
q = self.q
out = np.select([q < 0.55 * u.m, q > 1. * u.m],
[q, q.to(u.cm)], default=-1. * u.km)
expected = np.select([q.value < 0.55, q.value > 1],
[q.value, q.value], default=-1000) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_real_if_close(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_tril(self):
self.check(np.tril)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_triu(self):
self.check(np.triu)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_unwrap(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1*u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1., 2.]*u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.*u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q, nan=1.*u.km, posinf=2.*u.km, neginf=-2*u.km)
expected = [-2000., 2000., 1000., 3., 4.] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1. + 1j]*u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1. + 1j]*u.m)
def test_isclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 102., 199.]) * u.cm
atol = 1.5 * u.cm
rtol = 1. * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=atol.to_value(q1.unit))
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
@pytest.mark.xfail
def test_isclose_failure(self):
q_cm = self.q.to(u.cm)
# atol does not have units; TODO: should this work by default?
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit ** 2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@pytest.mark.xfail(NO_ARRAY_FUNCTION and not NUMPY_LT_1_14,
reason=("Needs __array_function__ support "
"(or numpy < 1.14)"))
def test_count_nonzero(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
atol = 2 * u.cm
rtol = 1. * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0., rtol=rtol)
def test_allclose_failures(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
with pytest.raises(u.UnitsError):
# Default atol breaks code; TODO: should this work?
assert np.allclose(q1, q2)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1. * u.s)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_array_equal(self):
q1 = np.arange(3.) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_array_equiv(self):
q1 = np.array([[0., 1., 2.]]*3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
class TestNanFunctions(InvariantUnitTestSetup):
def setup(self):
super().setup()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit ** 2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_cross(self):
q1 = np.arange(6.).reshape(2, 3) * u.m
q2 = np.array([4., 5., 6.]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_dot(self):
q1 = np.array([1., 2., 3.]) * u.m
q2 = np.array([4., 5., 6.]) / u.s
o = np.dot(q1, q2)
assert o == 32. * u.m / u.s
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32. + 0j) * u.m / u.s
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_tensordot(self):
# From the docstring example
a = np.arange(60.).reshape(3, 4, 5) * u.m
b = np.arange(24.).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value,
axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_einsum(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.einsum('...i', q1)
assert np.all(o == q1)
o = np.einsum('ii', q1)
expected = np.einsum('ii', q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum('ij,jk', q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum('ij,jk', q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.einsum_path('...i', q1)
assert o[0] == ['einsum_path', (0,)]
o = np.einsum_path('ii', q1)
assert o[0] == ['einsum_path', (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path('ij,jk', q1, q2)
assert o[0] == ['einsum_path', (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10. * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert | np.all(out == expected) | numpy.all |
"""
A collection of some useful ocean functions. These are taken from a range of
MATLAB toolboxes as well as from ocean_funcs.ncl, which in turn has taken them
from the CSIRO SEAWATER (now GSW) MATLAB toolbox.
The NCL code can be found at:
http://www.ncl.ucar.edu/Support/talk_archives/2013/att-1501/ocean_funcs.ncl__size_15540__creation-date_
The MATLAB toolboxes used includes:
http://www.cmar.csiro.au/datacentre/ext_docs/seawater.htm
http://mooring.ucsd.edu/software/matlab/doc/toolbox/ocean/
http://www.mbari.org/staff/etp3/ocean1.htm
See also:
<NAME>., A new extended Gibbs thermodynamic potential of seawater,
Prog. Oceanogr., 58, 43-115,
http://authors.elsevier.com/sd/article/S0079661103000880 corrigendum 61
(2004) 99, 2003.
<NAME>. & Millard, R.C. Unesco 1983. Algorithms for computation of
fundamental properties of seawater, 1983. Unesco Tech. Pap. in Mar. Sci.,
No. 44.
<NAME>., <NAME>, <NAME>, <NAME>, and <NAME>, Updated algorithms for density, potential temperature,
conservative temperature and freezing temperature of seawater, Journal of
Atmospheric and Oceanic Technology, submitted, 2005.
The Simpson-Hunter parameter is described in:
Simpson, JH, and JR Hunter. "Fronts in the Irish Sea." Nature 250 (1974):
404-6.
The relative humidity from dew point temperature and ambient temperature is
taken from:
http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
Provides functions:
- pressure2depth : convert pressure (decibars) to depth in metres
- depth2pressure : convert depth in metres to pressure in decibars
- dT_adiab_sw : calculate adiabatic temperature gradient
- theta_sw : calculate potential temperature for sea water
- cp_sw : calculate constant pressure specific heat for seawater
- sw_smow : calculate density of Standard Mean Ocean Water
- sw_dens0 : calculate seawater density at atmospheric surface pressure
- sw_seck : calculate Secant Bulk Modulus (K) of seawater
- sw_dens : calculate density from temperature, salinity and pressure
- sw_svan : calculate specific volume anomaly (only use if you don't
already have density)
- sw_sal78 : calculate salinity from conductivity, temperature and pressure
based on the Fofonoff and Millard (1983) SAL78 FORTRAN function
- sw_sal80 : calculate salinity from conductivity, temperature and pressure
based on the UCSD sal80.m function (identical approach in sw_salinity)
- sw_salinity : calculate salinity from conductivity, temperature and
pressure (identical approach in sw_sal80)
- dens_jackett : alternative formulation for calculating density from
temperature and salinity (after Jackett et al. (2005)
- pea: calculate the potential energy anomaly (stratification index).
- simpsonhunter : calculate the Simpson-Hunter parameter to predict frontal
locations.
- mixedlayerdepth : calculate the mixed layer depth using the ERSEM
definition.
- stokes : calculate the Stokes parameter.
- dissipation : calculate the tidal dissipation from a current speed.
- calculate_rhum : calculate relative humidity from dew point temperature
and ambient temperature.
<NAME> (Plymouth Marine Laboratory)
"""
import numpy as np
import matplotlib.pyplot as plt
# Define some commonly used constants.
c68 = 1.00024 # conversion constant to T68 temperature scale.
c90 = 0.99976 # conversion constant to T90 temperature scale.
def _tests():
"""
Put some (sort of) unit tests in here to make sure the functions work as
expected.
"""
test_lat = 30
test_z = np.logspace(0.1, 4, 50) # log depth distribution
test_p = np.logspace(0.1, 4, 50) # log pressure distribution
res_p1 = depth2pressure(test_z, test_lat)
res_z1 = pressure2depth(res_p1, test_lat)
res_z2 = pressure2depth(test_p, test_lat)
res_p2 = depth2pressure(res_z2, test_lat)
# Graph the differences
if False:
fig0 = plt.figure(figsize=(12, 10))
ax0 = fig0.add_subplot(1, 2, 1)
ax0.loglog(test_z, res_z1 - test_z, '.')
ax0.set_xlabel('Depth (m)')
ax0.set_ylabel('Difference (m)')
ax0.set_title('depth2pressure <-> pressure2depth')
ax1 = fig0.add_subplot(1, 2, 2)
ax1.loglog(test_p, res_p2 - test_p, '.')
ax1.set_xlabel('Pressure (dbar)')
ax1.set_ylabel('Difference (dbar)')
ax1.set_title('pressure2depth <-> depth2pressure ')
fig0.show()
# Input parameters
test_t = np.array(40)
test_s = np.array(40)
test_p = np.array(10000)
test_pr = np.array(0)
test_c = np.array(1.888091)
test_td = np.array(20) # for dens_jackett
test_sd = np.array(20) # for dens_jackett
test_pd = np.array(1000) # for dens_jackett
test_cond = np.array([100, 65000]) # for cond2salt
test_h = np.array((10, 20, 30, 100)) # depths for stokes
test_U = 0.25 # U for stokes and dissipation
test_omega = 1 / 44714.1647021416 # omega for stokes
test_z0 = np.array((0.0025)) # z0 for stokes
test_rho = 1025
test_temp = np.arange(-20, 50, 10)
test_dew = np.linspace(0, 20, len(test_temp))
# Use some of the Fofonoff and Millard (1983) checks.
res_svan = sw_svan(test_t, test_s, test_p)
print('Steric anomaly\nFofonoff and Millard (1983):\t9.8130210e-6\nsw_svan:\t\t\t{}\n'.format(res_svan))
res_z = pressure2depth(test_p, test_lat)
print('Pressure to depth\nFofonoff and Millard (1983):\t9712.653\npressure2depth:\t\t\t{}\n'.format(res_z))
# The return to depth is a bit inaccurate, not sure why.
res_pres = depth2pressure(res_z, test_lat)
print('Depth to pressure\nFofonoff and Millar (1983):\t9712.653\ndepth2pressure:\t\t\t{}\n'.format(res_pres))
res_cp = cp_sw(test_t, test_s, test_p)
print('Specific heat of seawater\nFofonoff and Millard (1983):\t3849.500\ncp_sw:\t\t\t\t{}\n'.format(res_cp))
res_atg = dT_adiab_sw(test_t, test_s, test_p)
print('Adiabatic temperature gradient\nFofonoff and Millard (1983):\t0.0003255976\ndT_adiab_sw:\t\t\t{}\n'.format(res_atg))
res_theta = theta_sw(test_t, test_s, test_p, test_pr)
print('Potential temperature\nFofonoff and Millard (1983):\t36.89073\ntheta_sw:\t\t\t{}\n'.format(res_theta))
# Haven't got the right input values for sal78 and sw_salinity, but the
# outputs match the MATLAB functions, so I'm assuming they're OK...
# res_salinity = sw_salinity(test_c, test_t, test_p)
# print('Salinity\nFofonoff and Millard (1983):\t40\nsw_salinity:\t\t\t{}\n'.format(res_salinity))
res_sal78 = sw_sal78(test_c, test_t, test_p)
print('Salinity\nFofonoff and Millard (1983):\t40\nsw_sal78:\t\t\t{}\n'.format(res_sal78))
# Haven't got the right input values for sal78 and sw_salinity, but the
# outputs match the MATLAB functions, so I'm assuming they're OK...
# test_c, test_t, test_p = np.array(1.888091), np.array(40), np.array(10000)
# res_sal80 = sw_sal80(test_c, test_t, test_p)
# print('Salinity\nFofonoff and Millard (1983):\t40\nsw_sal80:\t\t\t{}\n'.format(res_sal80))
res_dens = dens_jackett(test_td, test_sd, test_pd)
print('Jackett density\nJackett et al. (2005):\t1017.728868019642\ndens_jackett:\t\t{}\n'.format(res_dens))
res_salt = cond2salt(test_cond)
print('Conductivity to salinity\nUSGS:\t\t0.046,\t\t\t44.016\ncond2salt:\t{},\t{}'.format(res_salt[0], res_salt[1]))
res_stokes, res_u_star, res_delta = stokes(test_h, test_U, test_omega, test_z0, U_star=True, delta=True)
print('Stokes number\nSouza (2013):\tS:\tTEST\tstokes:\tS:{}\n\t\t\tSouza (2013):\tU*:\tTEST\t{}\n\t\t\tSouza (2013):\tdelta:\tTEST\t{}\n'.format(res_stokes, res_u_star, res_delta))
res_dissipation = dissipation(test_rho, test_U)
print('Tidal dissipation\nKnown good:\t0.0400390625\ndissipation():\t{}'.format(res_dissipation))
valid_rhum = np.array((487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, 44.70251971, 31.67003471))
rhum = calculate_rhum(test_dew, test_temp)
for hum in zip(rhum, valid_rhum):
print('Relative humidity:\tvalid: {:.3f}\tcalculate_rhum: {:.3f}\t(difference = {:.3f})'.format(hum[1], hum[0], np.diff(hum)[0]))
def pressure2depth(p, lat):
"""
Convert from pressure in decibars to depth in metres.
Parameters
----------
p : ndarray
Pressure (1D array) in decibars.
lat : ndarray
Latitudes for samples in p.
Returns
-------
z : ndarray
Water depth in metres.
"""
c1 = 9.72659
c2 = -2.1512e-5
c3 = 2.279e-10
c4 = -1.82e-15
gam = 2.184e-6
y = np.abs(lat)
rad = np.sin(np.deg2rad(y))**2
gy = 9.780318 * (1.0 + (rad * 5.2788e-3) + (rad**2 * 2.36e-5))
bline = gy + (gam * 0.5 * p)
tline = (c1 * p) + (c2 * p**2.0) + (c3 * p**3.0) + (c4 * p**4.0)
z = tline / bline
return z
def depth2pressure(z, lat):
"""
Convert from depth in metres to pressure in decibars.
Parameters
----------
z : ndarray
Depth (1D array) in metres. Must be positive down (negative values are
set to zero before conversion to pressure).
lat : ndarray
Latitudes for samples in z.
Returns
-------
p : ndarray
Pressure in decibars.
"""
# Set negative depths to 0. We assume positive depth values (as in the
# docstring).
pz = z.copy()
if isinstance(pz, np.ndarray):
pz[z < 0] = 0
c2 = 2.21e-6
Y = np.sin(np.deg2rad(np.abs(lat)))
c1 = (5.92 + (5.25 * Y**2.0)) * 1.e-3
p = ((1.0 - c1) - np.sqrt((1.0 - c1)**2.0 - (4.0 * c2 * pz))) / (2.0 * c2)
return p
def dT_adiab_sw(t, s, p):
"""
Calculate adiabatic temperature gradient (degrees Celsius dbar^{-1})
Parameters
----------
t : ndarray
Temperature (Celsius)
s : ndarray
Salinity (PSU)
p : ndarray
Pressure (decibars)
All three arrays must have the same shape.
Returns
-------
atg : ndarray
Adiabatic temperature gradient
"""
# Constants
a0 = 3.5803e-5
a1 = 8.5258e-6
a2 = -6.836e-8
a3 = 6.6228e-10
b0 = 1.8932e-6
b1 = -4.2393e-8
c0 = 1.8741e-8
c1 = -6.7795e-10
c2 = 8.733e-12
c3 = -5.4481e-14
d0 = -1.1351e-10
d1 = 2.7759e-12
e0 = -4.6206e-13
e1 = 1.8676e-14
e2 = -2.1687e-16
T68 = t * c68 # convert to 1968 temperature scale
atg = a0 + (a1 + (a2 + a3 * T68) * T68) * T68 + (b0 + b1 * T68) * (s - 35) + \
((c0 + (c1 + (c2 + c3 * T68) * T68) * T68) + (d0 + d1 * T68) *
(s - 35)) * p + (e0 + (e1 + e2 * T68) * T68) * p * p
return atg
def theta_sw(t, s, p, pr):
"""
Calculate potential temperature for seawater from temperature, salinity and
pressure.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
pr : ndarray
Reference pressure (decibars) either a scalar or the same shape as t.
Returns
-------
th : ndarray
Potential temperature (Celsius)
"""
dP = pr - p # pressure difference.
# 1st iteration
dth = dP * dT_adiab_sw(t, s, p)
th = (t * c68) + (0.5 * dth)
q = dth
# 2nd interation
dth = dP * dT_adiab_sw(th / c68, s, (p + (0.5 * dP)))
th = th + ((1 - (1 / np.sqrt(2))) * (dth - q))
q = ((2 - np.sqrt(2)) * dth) + (((3 / np.sqrt(2)) - 2) * q)
# 3rd iteration
dth = dP * dT_adiab_sw(th / c68, s, (p + (0.5 * dP)))
th = th + ((1 + (1 / np.sqrt(2))) * (dth - q))
q = ((2 + np.sqrt(2)) * dth) + (((-3 / np.sqrt(2)) - 2) * q)
# 4th interation
dth = dP * dT_adiab_sw(th / c68, s, (p + dP))
th = (th + (dth - (2 * q)) / 6) / c68
return th
def cp_sw(t, s, p):
"""
Calculate constant pressure specific heat (cp) for seawater, from
temperature, salinity and pressure.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
Returns
-------
cp : ndarray
Constant pressure specific heat (Celsius).
Notes
-----
Valid temperature range is -2 to 40C and salinity is 0-42 PSU. Warnings are
issued if the data fall outside these ranges.
"""
# Check for values outside the valid ranges.
if t.min() < -2:
n = np.sum(t < -2)
print('WARNING: {} values below minimum value temperature (-2C)'.format(n))
if t.max() > 40:
n = np.sum(t > 40)
print('WARNING: {} values above maximum value temperature (40C)'.format(n))
if s.min() < 0:
n = np.sum(s < 0)
print('WARNING: {} values below minimum salinity value (0 PSU)'.format(n))
if s.max() > 42:
n = np.sum(s > 42)
print('WARNING: {} values above maximum salinity value (42C)'.format(n))
# Convert from decibar to bar and temperature to the 1968 temperature scale.
pbar = p / 10.0
T1 = t * c68
# Specific heat at p = 0
# Temperature powers
T2 = T1**2
T3 = T1**3
T4 = T1**4
# Empirical constants
c0 = 4217.4
c1 = -3.720283
c2 = 0.1412855
c3 = -2.654387e-3
c4 = 2.093236e-5
a0 = -7.643575
a1 = 0.1072763
a2 = -1.38385e-3
b0 = 0.1770383
b1 = -4.07718e-3
b2 = 5.148e-5
cp_0t0 = c0 + (c1 * T1) + (c2 * T2) + (c3 * T3) + (c4 * T4)
A = a0 + (a1 * T1) + (a2 * T2)
B = b0 + (b1 * T1) + (b2 * T2)
cp_st0 = cp_0t0 + (A * s) + (B * s**1.5)
# Pressure dependance
a0 = -4.9592e-1
a1 = 1.45747e-2
a2 = -3.13885e-4
a3 = 2.0357e-6
a4 = 1.7168e-8
b0 = 2.4931e-4
b1 = -1.08645e-5
b2 = 2.87533e-7
b3 = -4.0027e-9
b4 = 2.2956e-11
c0 = -5.422e-8
c1 = 2.6380e-9
c2 = -6.5637e-11
c3 = 6.136e-13
d1_cp = (pbar * (a0 + (a1 * T1) + (a2 * T2) + (a3 * T3) + (a4 * T4))) + \
(pbar**2 * (b0 + (b1 * T1) + (b2 * T2) + (b3 * T3) + (b4 * T4))) + \
(pbar**3 * (c0 + (c1 * T1) + (c2 * T2) + (c3 * T3)))
d0 = 4.9247e-3
d1 = -1.28315e-4
d2 = 9.802e-7
d3 = 2.5941e-8
d4 = -2.9179e-10
e0 = -1.2331e-4
e1 = -1.517e-6
e2 = 3.122e-8
f0 = -2.9558e-6
f1 = 1.17054e-7
f2 = -2.3905e-9
f3 = 1.8448e-11
g0 = 9.971e-8
h0 = 5.540e-10
h1 = -1.7682e-11
h2 = 3.513e-13
j1 = -1.4300e-12
d2_cp = pbar * \
((s * (d0 + (d1 * T1) + (d2 * T2) + (d3 * T3) + (d4 * T4))) +
(s**1.5 * (e0 + (e1 * T1) + (e2 * T2)))) + \
(pbar**2 * ((s * (f0 + (f1 * T1) + (f2 * T2) + (f3 * T3))) +
(g0 * s**1.5))) + (pbar**3 * ((s * (h0 + (h1 * T1) + (h2 * T2))) +
(j1 * T1 * s**1.5)))
cp = cp_st0 + d1_cp + d2_cp
return(cp)
def sw_smow(t):
"""
Calculate the density of Standard Mean Ocean Water (pure water).
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
Returns
-------
rho : ndarray
Density in kg m^{-3}.
"""
# Coefficients
a0 = 999.842594
a1 = 6.793952e-2
a2 = -9.095290e-3
a3 = 1.001685e-4
a4 = -1.120083e-6
a5 = 6.536332e-9
T68 = t * c68
dens = a0 + (a1 * T68) + (a2 * T68**2) + (a3 * T68**3) \
+ (a4 * T68**4) + (a5 * T68**5)
return dens
def sw_dens0(t, s):
"""
Calculate sea water density at atmospheric surface pressure.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s: ndarray
Salinity (PSU). Must be the same size as t.
Returns
-------
dens : ndarray
Seawater density at atmospheric surface pressure (kg m^{-1}).
"""
b0 = 8.24493e-1
b1 = -4.0899e-3
b2 = 7.6438e-5
b3 = -8.2467e-7
b4 = 5.3875e-9
c0 = -5.72466e-3
c1 = 1.0227e-4
c2 = -1.6546e-6
d0 = 4.8314e-4
t68 = t * c68
dens = s * (b0 + (b1 * t68) + (b2 * t68**2) + (b3 * t68**3) + (b4 * t68**4)) + \
s**1.5 * (c0 + (c1 * t68) + (c2 * t68**2)) + (d0 * s**2)
dens = dens + sw_smow(t68)
return dens
def sw_seck(t, s, p):
"""
Calculate Secant Bulk Modulus (K) of seawater.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
Returns
-------
k : ndarray
Secant Bulk Modulus of seawater.
"""
# Compression terms
T68 = t * c68
Patm = p / 10.0 # convert to bar
h3 = -5.77905e-7
h2 = 1.16092e-4
h1 = 1.43713e-3
h0 = 3.239908
AW = h0 + (h1 * T68) + (h2 * T68**2) + (h3 * T68**3)
k2 = 5.2787e-8
k1 = -6.12293e-6
k0 = 8.50935e-5
BW = k0 + (k1 + k2 * T68) * T68
e4 = -5.155288e-5
e3 = 1.360477e-2
e2 = -2.327105
e1 = 148.4206
e0 = 19652.21
KW = e0 + (e1 + (e2 + (e3 + e4 * T68) * T68) * T68) * T68
# K at atmospheric pressure
j0 = 1.91075e-4
i2 = -1.6078e-6
i1 = -1.0981e-5
i0 = 2.2838e-3
A = AW + s * (i0 + (i1 * T68) + (i2 * T68**2)) + (j0 * s**1.5)
m2 = 9.1697e-10
m1 = 2.0816e-8
m0 = -9.9348e-7
# Equation 18
B = BW + (m0 + (m1 * T68) + (m2 * T68**2)) * s
f3 = -6.1670e-5
f2 = 1.09987e-2
f1 = -0.603459
f0 = 54.6746
g2 = -5.3009e-4
g1 = 1.6483e-2
g0 = 7.944e-2
# Equation 16
K0 = KW + s * (f0 + (f1 * T68) + (f2 * T68**2) + (f3 * T68**3)) + \
s**1.5 * (g0 + (g1 * T68) + (g2 * T68**2))
# K at t, s, p
K = K0 + (A * Patm) + (B * Patm**2) # Equation 15
return K
def sw_dens(t, s, p):
"""
Convert temperature, salinity and pressure to density.
Parameters
----------
t : ndarray
Temperature (1D array) in degrees Celsius.
s : ndarray
Salinity (1D array) in practical salinity units (unitless). Must be the
same shape as t.
p : ndarray
Pressure (1D array) in decibars. Must be the same shape as t.
Returns
-------
rho : ndarray
Density in kg m^{-3}.
Notes
-----
Valid temperature range is -2 to 40C, salinity is 0-42 and pressure is
0-10000 decibars. Warnings are issued if the data fall outside these
ranges.
"""
# Check for values outside the valid ranges.
if t.min() < -2:
n = np.sum(t < -2)
print('WARNING: {} values below minimum value temperature (-2C)'.format(n))
if t.max() > 40:
n = np.sum(t > 40)
print('WARNING: {} values above maximum value temperature (40C)'.format(n))
if s.min() < 0:
n = np.sum(s < 0)
print('WARNING: {} values below minimum salinity value (0 PSU)'.format(n))
if s.max() > 42:
n = np.sum(s > 42)
print('WARNING: {} values above maximum salinity value (42C)'.format(n))
if p.min() < 0:
n = np.sum(p < 0)
print('WARNING: {} values below minimum pressure value (0 decibar)'.format(n))
if p.max() > 10000:
n = | np.sum(p > 10000) | numpy.sum |
import numpy as np
import pandas as pd
from scipy.spatial.transform import Rotation as R
from scipy import interpolate
def get_matrix_from_ext(ext):
N = np.size(ext,0)
if ext.ndim==2:
rot = R.from_euler('ZYX', ext[:,3:], degrees=True)
rot_m = rot.as_matrix()
tr = np.zeros((N,4,4))
tr[:,:3,:3] = rot_m
tr[:,:3, 3] = ext[:,:3]
tr[:, 3, 3] = 1
if ext.ndim==1:
rot = R.from_euler('ZYX', ext[3:], degrees=True)
rot_m = rot.as_matrix()
tr = np.zeros((4,4))
tr[:3,:3] = rot_m
tr[:3, 3] = ext[:3]
tr[ 3, 3] = 1
return tr
def xyz2blh(x, y, z):
"""Convert XYZ coordinates to BLH,
return tuple(latitude, longitude, height).
"""
A = 6378137.0
B = 6356752.314245
e = np.sqrt(1 - (B**2)/(A**2))
# calculate longitude, in radians
longitude = np.arctan2(y, x)
# calculate latitude, in radians
xy_hypot = np.hypot(x, y)
lat0 = 0
latitude = np.arctan(z / xy_hypot)
while abs(latitude - lat0) > 1E-9:
lat0 = latitude
N = A / np.sqrt(1 - e**2 * np.sin(lat0)**2)
latitude = np.arctan((z + e**2 * N * np.sin(lat0)) / xy_hypot)
# calculate height, in meters
N = A / np.sqrt(1 - e**2 * np.sin(latitude)**2)
if abs(latitude) < np.pi / 4:
R, phi = np.hypot(xy_hypot, z), np.arctan(z / xy_hypot)
height = R * np.cos(phi) / np.cos(latitude) - N
else:
height = z / np.sin(latitude) - N * (1 - e**2)
# convert angle unit to degrees
longitude = np.degrees(longitude)
latitude = np.degrees(latitude)
return latitude, longitude, height
def blh2xyz(latitude, longitude, height):
"""Convert BLH coordinates to XYZ.
return [X, Y, Z].
"""
A = 6378137.0
B = 6356752.314245
# convert angle unit to radians
latitude = np.radians(latitude)
longitude = np.radians(longitude)
e = np.sqrt(1 - (B**2)/(A**2))
N = A / np.sqrt(1 - e**2 * | np.sin(latitude) | numpy.sin |
import numpy as np
from astropy.wcs.wcsapi.wrappers.base import BaseWCSWrapper
__all__ = ['ResampledLowLevelWCS']
class ResampledLowLevelWCS(BaseWCSWrapper):
"""
A wrapper for a low-level WCS object that has down- or
up-sampled pixel axes.
Parameters
----------
wcs : `~astropy.wcs.wcsapi.BaseLowLevelWCS`
The original WCS for which to reorder axes
factor : `int` or `float` or iterable of the same
The factor by which to increase the pixel size for each pixel
axis. If a scalar, the same factor is used for all axes.
offset: `int` or `float` or iterable of the same
The location on the underlying pixel grid which corresponds
to zero on the top level pixel grid. If a scalar, the grid will be
shifted by the same amount in all dimensions.
"""
def __init__(self, wcs, factor, offset=0):
self._wcs = wcs
if np.isscalar(factor):
factor = [factor] * self.pixel_n_dim
self._factor = np.array(factor)
if len(self._factor) != self.pixel_n_dim:
raise ValueError(f"Length of factor must equal number of dimensions {self.pixel_n_dim}.")
if np.isscalar(offset):
offset = [offset] * self.pixel_n_dim
self._offset = | np.array(offset) | numpy.array |
#global: dictionary, docs
#type of model: z, nd, nw
#pull: subset of z, nd, nw
#push: diff of subset of z, nd, nw
import numpy as np
import random
import time
from joblib import Parallel, delayed
"""
Configuration
"""
K = 10
class LDADictionary:
def __init__(self):
self.word2id = dict()
self.id2word = dict()
self.stop_list = set('for a of the and to in'.split())
def num_words(self):
return len(self.word2id)
def get_word(self, word_id):
return self.id2word[word_id]
def get_id(self, word):
return self.word2id[word]
def contains_word(self, word):
return True if word in self.word2id else False
def contains_id(self, word_id):
return True if word_id in self.id2word else False
def add_word(self, word):
if not self.contains_word(word) and word not in self.stop_list:
word_id = len(self.word2id)
self.word2id[word] = word_id
self.id2word[word_id] = word
return word_id
else:
return self.get_id(word)
"""
Preprocess Data
"""
#====================
# Dataset 1
dictionary = LDADictionary()
raw_docs = []
with open('data/lda_data.txt', 'r') as f:
for line in f:
raw_docs.append(line)
D = int(raw_docs[0])
raw_docs = raw_docs[1:]
docs =[None] * D
def set_doc(raw_str, idx):
words = raw_str.lower().split()
tokens = []
for w in words:
if dictionary.contains_word(w):
wid = dictionary.get_id(w)
else:
wid = dictionary.add_word(w)
tokens.append(wid)
docs[idx] = tokens
for idx, raw_doc in enumerate(raw_docs):
set_doc(raw_doc, idx)
W = dictionary.num_words()
# W = 8211, D = 10768
#====================
#====================
# Dataset 2
# Src: https://towardsdatascience.com/topic-modeling-for-the-new-york-times-news-dataset-1f643e15caac
"""
docs = []
with open('data/lda_nyt_small.txt', 'r') as f:
for l in f.readlines():
line = [int(x.split(':')[0]) - 1 for x in l.split(',')]
docs.append(line)
D = 8447
W = 3012
"""
#====================
#====================
# Dataset 3: Blei
# Src: http://www.cs.columbia.edu/~blei/lda-c/
"""
docs = []
with open('data/lda_ap.txt', 'r') as f:
for l in f.readlines():
line = [int(x.split(':')[0]) for x in l.split(' ')[1:]]
docs.append(line)
D = 2246
W = 10473
"""
#====================
batch_size = int(D / 32)
alpha = 50. / K
beta = 200. / W
# Reference: https://blog.csdn.net/pipisorry/article/details/42129099
v_beta = float(W * beta)
k_alpha = float(K * alpha)
"""
Utilities
"""
train_len = len(docs)
def get_next_batch(i, n, clock):
size = batch_size
slicelen = int((train_len - size - 1) / n)
#idx = i * slicelen + (clock * data_select_step) % slicelen
#idx = random.randint(i * slicelen, i * slicelen + slicelen - 1)
idx = i * slicelen
return list(range(idx, idx+size+1))
"""
Interfaces
"""
def make_optimiser():
return None
def build_model(opt, accuracy=True):
nd = dict()
nw = dict()
z = dict()
t_init = [0 for x in range(K)]
for i in range(D): nd[i] = t_init.copy()
for i in range(W): nw[i] = t_init.copy()
for i in range(D): z[i] = []
nwsum = [0 for x in range(K)]
for d in range(D):
l = len(docs[d])
for i in range(l):
topic = random.randint(0, K - 1)
z[d].append(topic)
tok_i = docs[d][i]
nw[tok_i][topic] += 1
nd[d][topic] += 1
nwsum[topic] += 1
return z, nd, nw, nwsum
def build_update():
return {}, {}, {}, [0] * K
def update_model(model, u):
(z0, nd0, nw0, nws0) = model
(z1, nd1, nw1, nws1) = u
for k, v in z1.items():
z0[k] = np.add(z0[k], v)
for k, v in nd1.items():
nd0[k] = np.add(nd0[k], v)
for k, v in nw1.items():
nw0[k] = np.add(nw0[k], v)
nws0 = np.add(nws0, nws1)
return (z0, nd0, nw0, nws0)
def compute_updates(model, i, n, step):
z, nd, nw, nwsum = model
local_z, local_nd, local_nw, local_nwsum = z.copy(), nd.copy(), nw.copy(), nwsum.copy()
docs_index = get_next_batch(i, n, step)
for m in docs_index:
ndsum_m = len(docs[m])
for n in range(len(docs[m])):
topic = local_z[m][n]
w = docs[m][n]
local_nw[w][topic] -= 1
local_nd[m][topic] -= 1
local_nwsum[topic] -= 1
p = [0.0 for _ in range(K)]
for k in range(K):
#p[k] = (local_nw[w][k] + beta) / (nwsum[k] + v_beta) * \
p[k] = (local_nw[w][k] + beta) / (local_nwsum[k] + v_beta) * \
(local_nd[m][k] + alpha) / (ndsum_m + k_alpha)
t = np.random.multinomial(1, np.divide(p, np.sum(p))).argmax()
local_nw[w][t] += 1
local_nd[m][t] += 1
local_nwsum[t] += 1
local_z[m][n] = t
diff_z = {}; diff_nd = {}; diff_nw = {}
diff_nwsum = np.subtract(local_nwsum, nwsum)
for m in docs_index:
diff_z[m] = | np.subtract(local_z[m], z[m]) | numpy.subtract |
import numpy as np
import pytest
import snc.agents.hedgehog.strategic_idling.strategic_idling_utils
from snc.agents.hedgehog.asymptotic_workload_cov.\
compute_asymptotic_cov_bernoulli_service_and_arrivals \
import ComputeAsymptoticCovBernoulliServiceAndArrivals
import snc.agents.hedgehog.strategic_idling.hedging_utils as hedging_utils
import snc.agents.hedgehog.workload.workload as wl
from snc.agents.hedgehog.params import StrategicIdlingParams
from snc.agents.hedgehog.strategic_idling.strategic_idling import StrategicIdlingCore
from snc.agents.hedgehog.strategic_idling.strategic_idling_hedgehog_gto import \
StrategicIdlingGTO, StrategicIdlingHedgehogGTO
from snc.agents.hedgehog.strategic_idling.strategic_idling_hedging import StrategicIdlingHedging
from snc.agents.hedgehog.strategic_idling.strategic_idling_utils import get_dynamic_bottlenecks
import snc.environments.examples as examples
import snc.utils.alt_methods_test as alt_methods_test
import snc.utils.exceptions as exceptions
def test_create_strategic_idling_get_dynamic_bottlenecks():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.69, mu2=0.35, mu3=0.69,
cost_per_buffer=np.array([1, 1, 1])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
gto_object = StrategicIdlingGTO(workload_mat=workload_mat,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
x = np.array([[158], [856], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
x = np.array([[493], [476], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
x = np.array([[631], [338], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([1])
def test_create_strategic_idling_hedgehog_gto_normal_hedging():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.69, mu2=0.35, mu3=0.69,
cost_per_buffer=np.array([1.5, 1, 2])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
workload_cov = np.array([[2, 0.5], [0.5, 3]])
hgto_object = StrategicIdlingHedgehogGTO(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
# this case corresponds to normal hedging regime below hedging threshold
x = np.array([[631], [338], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
# this case corresponds to normal hedging regime above hedging threshold
x = np.array([[969],
[ 0],
[351]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([1])
# this case corresponds to monotone region
x = np.array([[493],
[476],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
# this case corresponds to monotone region
x = np.array([[100],
[476],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert hgto_object._min_drain_lp is None
def test_create_strategic_idling_hedgehog_gto_switching_curve():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.7, mu2=0.345, mu3=0.7,
cost_per_buffer=np.array([1.5, 1, 2])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
workload_cov = np.array([[2, 0.5], [0.5, 3]])
h_object = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
hgto_object = StrategicIdlingHedgehogGTO(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
# This case corresponds to switching curve regime, i.e. minimum cost
# effective state can only be reached by extending the minimum draining time.
# `w` is below the hedging threshold so standard Hedgehog would allow one
# resource to idle, but it turns out that this resource is a dynamic
# bottleneck for the current `w`.
x = np.array(([[955],
[ 0],
[202]]))
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# This case corresponds to switching curve regime (i.e., drift @ psi_plus < 0),
# `w` is below the hedging threshold so standard Hedgehog would allow one resource to idle.
# Since this resource is not a dynamic bottleneck the GTO constraint also allows it to idle.
x = np.array([[ 955],
[ 0],
[1112]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# This case corresponds to switching curve regime (i.e., drift @ psi_plus < 0),
# `w` is below the hedging threshold so standard Hedgehog would allow the
# less loaded resource to idle. This is similar to the first case, but when both
# resources are dynamic bottlenecks for the current `w`.
x = np.array([[759],
[ 0],
[595]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# this case corresponds to monotone region so both bottlenecks are not
# allowed to idle under both standard Hedgehog and GTO policy
x = np.array([[283],
[672],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert hgto_object._min_drain_lp is not None
def test_create_strategic_idling_no_hedging_object_with_no_asymptotic_covariance():
"""
Raise exception if asymptotic covariance is tried to be updated.
"""
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
x = np.array([[413],
[ 0],
[100]])
si_object = StrategicIdlingCore(workload_mat=workload_mat, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
# these methods should not fail
si_object.get_allowed_idling_directions(x)
def test_create_strategic_idling_object_with_no_asymptotic_covariance():
"""
Check asymptotic covariance is passed before querying the idling decision
"""
neg_log_discount_factor = - np.log(0.95)
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
x = np.array([[413],
[ 0],
[100]])
si_object = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
with pytest.raises(AssertionError):
si_object._verify_offline_preliminaries()
with pytest.raises(AssertionError):
si_object.get_allowed_idling_directions(x)
def create_strategic_idling_object(
workload_mat=np.ones((2, 2)),
workload_cov=None,
neg_log_discount_factor=None,
load=None,
cost_per_buffer=np.ones((2, 1)),
model_type='push',
strategic_idling_params=None):
if strategic_idling_params is None:
strategic_idling_params = StrategicIdlingParams()
return StrategicIdlingHedging(workload_mat=workload_mat,
workload_cov=workload_cov,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=cost_per_buffer,
model_type=model_type,
strategic_idling_params=strategic_idling_params)
def test_create_strategic_idling_object_without_strategic_idling_params():
"""
Check assert `strategic_idling_params is not None` in constructor.
"""
neg_log_discount_factor = - np.log(0.95)
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
with pytest.raises(AssertionError):
_ = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type)
def test_is_negative_orthant_true():
w = np.zeros((3, 1))
w[0] = -1
assert StrategicIdlingHedging._is_negative_orthant(w)
def test_is_negative_orthant_false():
w = np.zeros((3, 1))
w[0] = 1
assert not StrategicIdlingHedging._is_negative_orthant(w)
def test_is_negative_orthant_false_since_zero_w():
w = np.zeros((3, 1))
assert not StrategicIdlingHedging._is_negative_orthant(w)
def check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer):
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
barc_a, _, eff_cost_a_1 = si_object.c_bar_solver.solve(w)
_, x_a, eff_cost_a_2 = alt_methods_test.compute_effective_cost_scipy(w, workload_mat,
cost_per_buffer)
barc_b, x_b, eff_cost_b = alt_methods_test.compute_effective_cost_cvxpy(w, workload_mat,
cost_per_buffer)
barc_c, x_c, eff_cost_c = alt_methods_test.compute_dual_effective_cost_cvxpy(w, workload_mat,
cost_per_buffer)
np.testing.assert_almost_equal(barc_a, barc_b)
np.testing.assert_almost_equal(barc_a, barc_c)
np.testing.assert_almost_equal(x_a, x_b)
np.testing.assert_almost_equal(x_a, x_c)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_b)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_c)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_a_2)
return barc_a
def test_effective_cost_superfluous_inequalities():
"""We check that Scipy linprog() used in compute_dual_effective_cost() does not return a status
4 (encountered numerical difficulties)"""
# This example was known to return this status 4 before the fix
env = examples.simple_reentrant_line_with_demand_model(alpha_d=2, mu1=3, mu2=2.5, mu3=3,
mus=1e3, mud=1e3,
cost_per_buffer=np.ones((5, 1)),
initial_state=np.array([10, 25,
55, 0,
100])[:, None],
capacity=np.ones((5, 1)) * np.inf,
job_conservation_flag=True)
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec=2,
load_threshold=None)
w = np.array([[1.], [0.]])
try:
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,)
c_bar, _, eff_cost = si_object.c_bar_solver.solve(w)
except exceptions.ScipyLinprogStatusError:
pytest.fail()
def test_effective_cost_ksrs_network_model_case_1():
"""Example 5.3.3 case 1 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
barc_1 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, 0]
np.testing.assert_almost_equal(barc_1, 1 / 3 * np.array([[0], [1]]))
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w1 = 2
w2 = 1
w = np.array([[w1], [w2]])
barc_2 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
np.testing.assert_almost_equal(barc_2, 1 / 4 * np.ones((2, 1)))
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 1
w = np.array([[w1], [w2]])
barc_3 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, 0]
np.testing.assert_almost_equal(barc_3, 1 / 3 * np.array([[1], [0]]))
def test_effective_cost_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
barc_1 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, -2]
np.testing.assert_almost_equal(barc_1, np.array([[-2], [1]]))
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w1 = 2
w2 = 1
w = np.array([[w1], [w2]])
barc_2 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
np.testing.assert_almost_equal(barc_2, 1 / 4 * np.ones((2, 1)))
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 1
w = np.array([[w1], [w2]])
barc_3 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [-2, 1]
np.testing.assert_almost_equal(barc_3, np.array([[1], [-2]]))
def test_all_effective_cost_vectors_ksrs_network_model_case_1():
"""Example 5.3.3 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Compute cost vectors.
barc_vectors = alt_methods_test.get_all_effective_cost_linear_vectors(workload_mat,
cost_per_buffer)
barc_vectors_theory = np.array([[1 / 3, 0],
[0, 1 / 3],
[0.25, 0.25]])
# Due to numerical noise, different computers can obtain the barc vectors in different order.
# So we will compare sets instead of ndarrays.
np.around(barc_vectors, decimals=7, out=barc_vectors)
np.around(barc_vectors_theory, decimals=7, out=barc_vectors_theory)
barc_vectors_set = set(map(tuple, barc_vectors))
barc_vectors_theory_set = set(map(tuple, barc_vectors_theory))
assert barc_vectors_set == barc_vectors_theory_set
def test_all_effective_cost_vectors_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Compute cost vectors.
barc_vectors = alt_methods_test.get_all_effective_cost_linear_vectors(workload_mat,
cost_per_buffer)
# Order of the vectors not relevant, just made up for easy comparison.
barc_vectors_theory = np.array([[1, -2],
[-2, 1],
[0.25, 0.25]])
# Due to numerical noise, different computers can obtain the barc vectors in different order.
# So we will compare sets instead of ndarrays.
np.around(barc_vectors, decimals=7, out=barc_vectors)
np.around(barc_vectors_theory, decimals=7, out=barc_vectors_theory)
barc_vectors_set = set(map(tuple, barc_vectors))
barc_vectors_theory_set = set(map(tuple, barc_vectors_theory))
assert barc_vectors_set == barc_vectors_theory_set
def test_get_vector_defining_possible_idling_direction_1():
w = np.array([[1], [0]])
w_star = np.array([[1], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[0], [1]]))
def test_get_vector_defining_possible_idling_direction_2():
w = np.array([[0], [1]])
w_star = np.array([[1], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[1], [0]]))
def test_get_vector_defining_possible_idling_direction_3():
# Although this w_star is impossible since w_star >= w, we can still calculate v_star.
w = np.array([[1], [1]])
w_star = np.array([[1], [0]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[0], [-1]]))
def test_get_vector_defining_possible_idling_direction_4():
# Although this w_star is impossible since w_star >= w, we can still calculate v_star.
w = np.array([[1], [1]])
w_star = np.array([[0], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[-1], [0]]))
def test_project_workload_on_monotone_region_along_minimal_cost_negative_w():
"""We use the single server queue with demand model. The expected result when we project
negative workload with the effective cost LP is zero."""
env = examples.single_station_demand_model(alpha_d=9, mu=10, mus=1e3, mud=1e2)
_, workload_mat, _ = wl.compute_load_workload_matrix(env)
num_wl = workload_mat.shape[0]
w = - np.ones((num_wl, 1))
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.zeros((num_wl, 1)))
def test_project_workload_on_monotone_region_along_minimal_cost_w_equal_w_star_ksrs_region_2():
"""We use the KSRS model, for which we know the boundary of the monotone region. Therefore, if
we set w in the boundary, we should get w_star = w."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 1 = {0 < 3 * w1 < w2 < inf}, and Region 2 = {0 < w1 < 3 * w2 < 9 * w1}, so w = (1, 3)
# is already right in the boundary.
w1 = 1
w2 = 3
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w, w_star)
def test_project_workload_on_monotone_region_along_minimal_cost_ksrs_region_1():
"""We use the KSRS model, for which we know the boundary of the monotone region."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 1 = {0 < 3 * w1 < w2 < inf}, so w = (0.5, 3) should be projected to w_star = (1, 3)
w1 = 0.5
w2 = 3
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.array([[1], [3]]))
def test_project_workload_on_monotone_region_along_minimal_cost_ksrs_region_3():
"""We use the KSRS model, for which we know the boundary of the monotone region."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 3 = {0 < 3 * w2 < w1}, so w = (3, 0.5) should be projected to w_star = (3, 1)
w1 = 3
w2 = 0.5
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.array([[3], [1]]))
def test_project_workload_on_monotone_region_along_minimal_cost_pseudorandom_values():
"""Since this uses random values, it could happen that the simplex (SciPy-LinProg) and SCS (CVX)
solvers give different solutions. This is uncommon, but possible."""
np.random.seed(42)
num_buffers = 4
num_wl = 3
num_tests = 1e3
strategic_idling_params = StrategicIdlingParams()
discrepancy = 0
for i in range(int(num_tests)):
w = np.random.random_sample((num_wl, 1))
cost_per_buffer = np.random.random_sample((num_buffers, 1))
workload_mat = np.random.random_sample((num_wl, num_buffers))
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
if not np.allclose(w_star, w_star_b):
discrepancy += 1
assert discrepancy < 5
def test_project_workload_when_monotone_region_is_a_ray():
"""We use the simple re-entrant line model."""
c_1 = 1
c_2 = 2
c_3 = 3
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)],
[mu_2 * c_2 + (mu_1 * mu_2) / mu_3 * (c_2 - c_1)]])
c_minus = np.array([[c_3 * mu_3],
[mu_2 * c_1 - c_3 * mu_2 * (mu_3 / mu_1 + 1)]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
np.testing.assert_almost_equal(w_star, w_star_theory)
def test_project_workload_when_idling_direction_lies_in_c_plus_level_set_zero_penalty():
"""We use the simple re-entrant line model."""
c_1 = 2
c_2 = 1
c_3 = 2
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)], [mu_2 * (c_2 * (1 + mu_1/mu_3) - c_1 * mu_1 / mu_3)]])
c_minus = np.array([[mu_3 * c_3], [mu_2 * (c_1 - c_3 * (1 + mu_3/mu_1))]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
strategic_idling_params = StrategicIdlingParams(penalty_coeff_w_star=0)
si_object = create_strategic_idling_object(
workload_mat=workload_mat, cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
with pytest.raises(AssertionError):
np.testing.assert_almost_equal(w_star, w_star_theory)
def test_project_workload_when_idling_direction_lies_in_c_plus_level_set():
"""We use the simple re-entrant line model."""
c_1 = 2
c_2 = 1
c_3 = 2
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)], [mu_2 * (c_2 * (1 + mu_1/mu_3) - c_1 * mu_1 / mu_3)]])
c_minus = np.array([[mu_3 * c_3], [mu_2 * (c_1 - c_3 * (1 + mu_3/mu_1))]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
si_object = create_strategic_idling_object(
workload_mat=workload_mat, cost_per_buffer=cost_per_buffer,
strategic_idling_params=StrategicIdlingParams(penalty_coeff_w_star=1e-5))
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
np.testing.assert_almost_equal(w_star, w_star_theory, decimal=5)
def test_is_w_inside_monotone_region_ksrs_network_model_case_1():
"""Example 5.3.3 case 1 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Since w is already in \W^+ in any of the 3 regions, any increment in w will increase the cost,
# so w_star should equal w. Thus, v_star should be a vector of nan, in every case.
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1_1 = 1
w1_2 = 4
w_1 = np.array([[w1_1], [w1_2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w_1)
c_bar_1 = si_object_1._get_level_set_for_current_workload(w_1)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_1, w_star_1, c_bar_1)
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w2_1 = 2
w2_2 = 1
w_2 = np.array([[w2_1], [w2_2]])
si_object_2 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_2 = si_object_2._find_workload_with_min_eff_cost_by_idling(w_2)
c_bar_2 = si_object_2._get_level_set_for_current_workload(w_2)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_2, w_star_2, c_bar_2)
# Region 3 = {0 < 3 * w2 < w1}
w3_1 = 4
w3_2 = 0.05
w_3 = np.array([[w3_1], [w3_2]])
si_object_3 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w_3)
c_bar_3 = si_object_3._get_level_set_for_current_workload(w_3)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_3, w_star_3, c_bar_3)
def test_closest_face_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
strategic_idling_params = StrategicIdlingParams()
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1_1 = 1
w1_2 = 4
w_1 = np.array([[w1_1], [w1_2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w_1)
w_star_1b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_1, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_1, w_star_1b)
v_star_1 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_1, w_1)
psi_plus_1, c_plus_1, c_minus_1 = si_object_1._get_closest_face_and_level_sets(w_star_1,
v_star_1)
np.testing.assert_almost_equal(c_minus_1, np.array([[-2], [1]]), decimal=5)
np.testing.assert_almost_equal(c_plus_1, np.array([[0.25], [0.25]]), decimal=5)
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w2_1 = 2
w2_2 = 1
w_2 = np.array([[w2_1], [w2_2]])
si_object_2 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_2 = si_object_2._find_workload_with_min_eff_cost_by_idling(w_2)
w_star_2b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_2, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_2, w_star_2b)
# Region 2 is in the monotone region W^+
c_bar_2 = si_object_2._get_level_set_for_current_workload(w_2)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_2, w_star_2, c_bar_2)
# Region 3 = {0 < 3 * w2 < w1}
w3_1 = 4
w3_2 = 0.05
w_3 = np.array([[w3_1], [w3_2]])
si_object_3 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w_3)
w_star_3b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_3, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_3, w_star_3b)
v_star_3 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_3, w_3)
psi_plus_3, c_plus_3, c_minus_3 = si_object_3._get_closest_face_and_level_sets(w_star_3,
v_star_3)
np.testing.assert_almost_equal(c_minus_3, np.array([[1], [-2]]), decimal=5)
np.testing.assert_almost_equal(c_plus_3, np.array([[0.25], [0.25]]), decimal=5)
def test_is_monotone_region_a_ray_negative_c_plus():
c_plus = - np.ones((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_nonpositive_c_plus():
c_plus = np.array([[-1], [-1], [0]])
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_zero_c_plus():
c_plus = np.zeros((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_positive_c_plus():
c_plus = np.ones((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_c_plus_with_positive_negative_and_zero_components():
c_plus = np.array([[1], [-1], [0]])
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_c_plus_with_positive_and_negative_components():
c_plus = np.array([[1], [-1], [-1]])
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_simple_reentrant_line():
"""We use the simple re-entrant line with parameters that make monotone region to be a ray."""
w = np.array([[1], [0]])
env = examples.simple_reentrant_line_model(mu1=2, mu2=1, mu3=2,
cost_per_buffer=np.array([[1], [2], [3]]))
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_infeasible_with_real_c_plus():
c_plus = np.array([[1], [-1], [-1]])
assert not StrategicIdlingHedging._is_infeasible(c_plus)
def test_is_monotone_region_infeasible():
c_plus = None
assert StrategicIdlingHedging._is_infeasible(c_plus)
def test_is_w_inside_monotone_region_when_small_tolerance():
w = np.random.random_sample((3, 1))
w_star = w + 1e-4
c_bar = np.ones((3, 1))
assert StrategicIdlingHedging._is_w_inside_monotone_region(w, w_star, c_bar)
def test_is_w_inside_monotone_region_false():
w = np.random.random_sample((3, 1))
w_star = w + 1e-2
c_bar = np.ones((3, 1))
assert not StrategicIdlingHedging._is_w_inside_monotone_region(w, w_star, c_bar)
def check_lambda_star(w, c_plus, psi_plus, w_star, test_strong_duality_flag=True):
lambda_star = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
lambda_star_b = alt_methods_test.get_price_lambda_star_lp_1_cvxpy(w, c_plus, psi_plus)
lambda_star_c = alt_methods_test.get_price_lambda_star_lp_2_cvxpy(w, c_plus, psi_plus)
lambda_star_d = alt_methods_test.get_price_lambda_star_lp_scipy(w, c_plus, psi_plus)
if test_strong_duality_flag:
lambda_star_a = alt_methods_test.get_price_lambda_star_strong_duality(w, w_star, c_plus,
psi_plus)
np.testing.assert_almost_equal(lambda_star, lambda_star_a, decimal=5)
if lambda_star_b is not None: # If primal is not accurately solved with CVX
np.testing.assert_almost_equal(lambda_star, lambda_star_b, decimal=5)
if lambda_star_c is not None:
np.testing.assert_almost_equal(lambda_star, lambda_star_c, decimal=5)
np.testing.assert_almost_equal(lambda_star, lambda_star_d)
return lambda_star
def test_get_price_lambda_star_when_c_plus_is_positive():
"""lambda_star depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[1], [1]])
w = np.array([[3], [0.1]])
psi_plus = np.array([[-.1], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_c_plus_is_negative():
"""c_plus should always be nonnegative"""
c_plus = np.array([[-1], [1]])
psi_plus = np.array([[-1], [0.5]])
with pytest.raises(exceptions.ArraySignError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert (excinfo.value.array_name == "c_plus" and excinfo.value.all_components and
excinfo.value.positive and not excinfo.value.strictly)
def test_get_price_lambda_star_when_c_plus_is_zero():
"""c_plus should always have at least one strictly positive component"""
c_plus = np.array([[0], [0]])
psi_plus = np.array([[-1], [0.5]])
with pytest.raises(exceptions.ArraySignError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert (excinfo.value.array_name == "c_plus" and not excinfo.value.all_components and
excinfo.value.positive and excinfo.value.strictly)
def test_get_price_lambda_star_when_c_plus_has_zero_components():
"""lambda_star only depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[0], [1]])
w = np.array([[3], [0.1]])
psi_plus = np.array([[-.1], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_c_plus_has_zero_components_with_positive_psi_plus():
"""lambda_star only depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[0], [1]])
w = np.array([[-3], [0.1]])
psi_plus = np.array([[0.5], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_psi_plus_is_negative():
c_plus = | np.array([[1], [1]]) | numpy.array |
import cv2
import numpy as np
from progress.bar import Bar
from mtcnn import p_net, o_net, r_net
from .log import logger
from .utils import load_weights, process_image, generate_bbox, py_nms, bbox_2_square, pad, calibrate_bbox
class Detector:
def __init__(self, weight_dir,
min_face_size=24,
threshold=None,
scale_factor=0.79,
mode=3):
assert mode in [1, 2, 3]
assert scale_factor < 1
self.min_face_size = min_face_size
self.threshold = [0.6, 0.7, 0.7] if threshold is None else threshold
self.scale_factor = scale_factor
self.p_net = None
self.r_net = None
self.o_net = None
self.init_network(mode, weight_dir)
def init_network(self, mode, weight_dir):
p_weights, r_weights, o_weights = load_weights(weight_dir)
logger.info('Loading PNet weight file is: {}'.format(p_weights))
self.p_net = p_net()
self.p_net.load_weights(p_weights)
if mode > 1:
logger.info('Loading RNet weight file: {}'.format(r_weights))
self.r_net = r_net()
self.r_net.load_weights(r_weights)
if mode > 2:
logger.info('Loading ONet weight file: {}'.format(o_weights))
self.o_net = o_net()
self.o_net.load_weights(o_weights)
def predict(self, np_images, verbose=False):
all_boxes = [] # save each image's bboxes
landmarks = []
bar = None
if verbose:
bar = Bar('Detecting...', max=len(np_images))
for im in np_images:
if bar:
bar.next()
boxes, boxes_c, landmark = self.predict_with_p_net(im)
if boxes_c is None:
logger.info('Bounding boxes is None, after PNet')
all_boxes.append(np.array([]))
landmarks.append( | np.array([]) | numpy.array |
import numpy as np
import pycuda.autoinit
import pycuda.driver as cu
import pycuda.gpuarray as gpu
import pycuda.compiler as nvcc
import matplotlib.image as img
from pycuda.elementwise import ElementwiseKernel
import matplotlib
from PIL import Image, ImageDraw
import copy
import scipy
import time
pycuda_seaming = \
"""
__global__ void convolution(int* red, int* green, int* blue, int* filter, int* energy_map, int height, int width){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index_pixel = row * width + col;
int size = height * width;
if(col < width - 1 && row < height - 1 && col > 0 && row > 0){
int r = (red[index_pixel] * filter[4]) + (red[index_pixel - 1] * filter[3]) + (red[index_pixel + 1] * filter[5]) + (red[index_pixel - width] * filter[1]) + (red[index_pixel + width] * filter[7]) + (red[index_pixel - width - 1] * filter[0]) + (red[index_pixel - width + 1] * filter[2]) + (red[index_pixel + width - 1] * filter[6]) + (red[index_pixel + width + 1] * filter[8]);
int g = (green[index_pixel] * filter[4]) + (green[index_pixel - 1] * filter[3]) + (green[index_pixel + 1] * filter[5]) + (green[index_pixel - width] * filter[1]) + (green[index_pixel + width] * filter[7]) + (green[index_pixel - width - 1] * filter[0]) + (green[index_pixel - width + 1] * filter[2]) + (green[index_pixel + width - 1] * filter[6]) + (green[index_pixel + width + 1] * filter[8]);
int b = (blue[index_pixel] * filter[4]) + (blue[index_pixel - 1] * filter[3]) + (blue[index_pixel + 1] * filter[5]) + (blue[index_pixel - width] * filter[1]) + (blue[index_pixel + width] * filter[7]) + (blue[index_pixel - width - 1] * filter[0]) + (blue[index_pixel - width + 1] * filter[2]) + (blue[index_pixel + width - 1] * filter[6]) + (blue[index_pixel + width + 1] * filter[8]);
energy_map[index_pixel] = r;
energy_map[size+index_pixel] = g;
energy_map[size*2+index_pixel] = b;
}
}
__global__ void suma(int* x, int* y, int height, int width){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index_pixel = row * width + col;
int new_index_pixel = (row-1)*(width-2) + (col-1);
int size = height * width;
if(col<width-1 && row<height-1 && col>0 && row>0){
y[new_index_pixel]=x[index_pixel]+x[size+index_pixel]+x[2*size + index_pixel];
}
}
__global__ void find_min(int* energy_map, int height, int width, int* backtrack){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index_pixel = row * width + col;
int size = height * width;
int val = 9999999;
int idx;
if(col<width && row<height-1){
if(col == 0){
for(int i = 0; i < 2; i = i + 1){
if(val > energy_map[index_pixel + width + i]){
idx = i;
val = energy_map[index_pixel + i + width];
}
}
backtrack[index_pixel] = index_pixel+width+idx;
}
else if(col==width-1){
for(int i = 0; i < 2; i = i + 1){
if(val > energy_map[index_pixel + width + i - 1]){
idx = i;
val = energy_map[index_pixel + i + width - 1];
}
}
backtrack[index_pixel] = index_pixel+width+idx-1;
}
else{
for(int i = 0; i < 3; i = i + 1){
if(val > energy_map[index_pixel + width + i - 1]){
idx = i;
val = energy_map[index_pixel + i + width - 1];
}
}
backtrack[index_pixel] = index_pixel+width+idx-1;
}
}
}
__global__ void get_sum_map(int* sum_map, int* next_sum_map, int* offset_map, int* next_offset,int height, int width, int n){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index_pixel = row * width + col;
int size=height*width;
if(col < width && row < height && row%(2*n)==0 && offset_map[index_pixel]>=0 && offset_map[index_pixel]<=width*height){
next_sum_map[index_pixel] = sum_map[index_pixel]+sum_map[offset_map[index_pixel]];
__syncthreads();
next_offset[index_pixel]=offset_map[offset_map[index_pixel]];
__syncthreads();
}
}
__global__ void extract_seam_path(int* backtrack, int index,int height, int width, int* path){
int tid=blockIdx.x * blockDim.x + threadIdx.x;
if(tid==0){
path[0]=index;
path[1]=index;
for(int i=1;i<height-1;++i){
path[i+1]=backtrack[index]%(width-2);
index=backtrack[index];
}
path[height-1]=path[height-2];
}
}
__global__ void remove_seam(int* red, int* green, int* blue, int* new_red, int* new_green, int* new_blue,int* path, int height, int width){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int new_index_pixel = row *(width-1) + col;
int index_pixel = row*width+col;
if(col<width-1 && row<height){
if(col>=path[row]){
new_red[new_index_pixel]=red[index_pixel+1];
new_green[new_index_pixel]=green[index_pixel+1];
new_blue[new_index_pixel]=blue[index_pixel+1];
}
else{
new_red[new_index_pixel] = red[index_pixel];
new_green[new_index_pixel] = green[index_pixel];
new_blue[new_index_pixel] = blue[index_pixel];
}
}
}
"""
if __name__ == '__main__':
start = time.time()
# Kernels de CUDA
module = nvcc.SourceModule(pycuda_seaming)
convolution_kernel = module.get_function("convolution")
suma=module.get_function("suma")
find_min = module.get_function("find_min")
get_sum_map = module.get_function("get_sum_map")
extract_seam_path = module.get_function("extract_seam_path")
remove_seam = module.get_function("remove_seam")
# Leer imagen
h_img_in = np.array(img.imread('img.jpg'), dtype=np.int32)
height, width, channels = np.int32(h_img_in.shape)
block = (32, 32, 1)
grid = (int(np.ceil(width/block[0])),
int(np.ceil(height/block[1])))
#Partiendo la imagen en sus canales y comunicandolos (esto simplificara el algoritmo)
red_cpu = np.array(copy.deepcopy(h_img_in[:,:,0]), dtype=np.int32)
green_cpu = np.array(copy.deepcopy(h_img_in[:,:,1]), dtype=np.int32)
blue_cpu = np.array(copy.deepcopy(h_img_in[:,:,2]), dtype=np.int32)
red_gpu = gpu.to_gpu(red_cpu)
green_gpu = gpu.to_gpu(green_cpu)
blue_gpu = gpu.to_gpu(blue_cpu)
filterdu_cpu = np.array([
[1.0, 2.0, 1.0],
[0.0, 0.0, 0.0],
[-1.0, -2.0, -1.0],
], dtype = np.int32)
filterdv_cpu = np.array([
[1.0, 0.0, -1.0],
[2.0, 0.0, -2.0],
[1.0, 0.0, -1.0]
], dtype = np.int32)
filterdu_gpu = gpu.to_gpu(filterdu_cpu)
filterdv_gpu = gpu.to_gpu(filterdv_cpu)
# Transferir al GPU
d_img_in = gpu.to_gpu(h_img_in)
seams=600
for i in range(seams):
#print(i)
# Aplicar convolucion para determinar las energias
width2=np.int32(width-2)
height2=np.int32(height-2)
energy_mapdu = gpu.empty((3,height, width), dtype=np.int32)
energy_mapdv = gpu.empty((3,height, width), dtype=np.int32)
convolution_kernel(red_gpu, green_gpu, blue_gpu, filterdu_gpu, energy_mapdu, height, width, block = block, grid = grid)
convolution_kernel(red_gpu, green_gpu, blue_gpu, filterdv_gpu, energy_mapdv, height, width, block = block, grid = grid)
energy_map = energy_mapdu.__abs__() + energy_mapdv.__abs__()
# Los bordes se eliminan con la convolucion, asi que el energy map es de estas dimensiones
energy_map2 = gpu.empty((height2, width2), dtype=np.int32)
suma(energy_map, energy_map2, height, width, block = block, grid = grid)
# Guardar los posibles caminos en backtrack que podriamos seguir con un enfoque greedy (nos vamos por la menor energia cada vez)
backtrack = gpu.empty((height2, width2), dtype = np.int32)
find_min(energy_map2, height2, width2, backtrack, block = block, grid = grid)
offset_map = backtrack.copy()
sum_map = energy_map2.copy()
# Calcular el sum_top de las energias cumulativas de todos los caminos por reduccion
n=np.int32(1)
while n<height2/2:
next_sum_map = gpu.empty_like(sum_map)
next_offset = gpu.empty_like(offset_map)
get_sum_map(sum_map, next_sum_map,offset_map, next_offset,height2, width2, n,block = block, grid = grid)
#print(next_offset)
sum_map = next_sum_map.copy()
offset_map = next_offset.copy()
n=n*2
# Indice minimo de sum_top para iniciar el corte, este sera nuestro seam optimo
indexs=gpu.arange(0,width2, dtype=np.int32)
sum_top = gpu.take(sum_map, indexs)
sum_cpu = np.zeros(width2, dtype=np.int32)
cu.memcpy_dtoh(sum_cpu, sum_top.gpudata)
indice=np.argmin(sum_cpu)
# Extrayendo y removiendo el seam optimo encontrado
path = gpu.empty(height, dtype=np.int32)
extract_seam_path(backtrack, np.int32(indice), height, width, path, block = (1,1,1), grid = (1,1))
new_red_gpu=gpu.empty((height,width-1), dtype=np.int32)
new_green_gpu=gpu.empty((height,width-1), dtype=np.int32)
new_blue_gpu=gpu.empty((height,width-1), dtype=np.int32)
remove_seam(red_gpu, green_gpu, blue_gpu, new_red_gpu, new_green_gpu, new_blue_gpu, path, height, width, block = block, grid = grid)
# El width se decrementa en uno tras haber removido el seam, guardando los resultados para volver a iterar
width=np.int32(width-1)
red_gpu, green_gpu, blue_gpu = new_red_gpu, new_green_gpu, new_blue_gpu
# Comunicando canales
red_cpu=np.zeros((height,width), dtype=np.int32)
green_cpu= | np.zeros((height,width), dtype=np.int32) | numpy.zeros |
import CandyCrashSimulation3X3_2 as f
import random
import copy
import numpy as np
width = height = 3
s = 2 ** (width * height)
num_candy = 2
N = 100000
print("size: {} * {}".format(width,height))
w_score = np.zeros(s*N).reshape(s,N)
for i in range(s):
board = f.buildboard(i, height, width)
for c in range(N):
newboard,tmp,score = f.eliminate_to_static(board,height,width)
w_score[i][c] = score
mean = | np.mean(w_score,axis=1) | numpy.mean |
'''
function result = johansen(x,p,k)
% PURPOSE: perform Johansen cointegration tests
% -------------------------------------------------------
% USAGE: result = johansen(x,p,k)
% where: x = input matrix of time-series in levels, (nobs x m)
% p = order of time polynomial in the null-hypothesis
% p = -1, no deterministic part
% p = 0, for constant term
% p = 1, for constant plus time-trend
% p > 1, for higher order polynomial
% k = number of lagged difference terms used when
% computing the estimator
% -------------------------------------------------------
% RETURNS: a results structure:
% result.eig = eigenvalues (m x 1)
% result.evec = eigenvectors (m x m), where first
% r columns are normalized coint vectors
% result.lr1 = likelihood ratio trace statistic for r=0 to m-1
% (m x 1) vector
% result.lr2 = maximum eigenvalue statistic for r=0 to m-1
% (m x 1) vector
% result.cvt = critical values for trace statistic
% (m x 3) vector [90% 95% 99%]
% result.cvm = critical values for max eigen value statistic
% (m x 3) vector [90% 95% 99%]
% result.ind = index of co-integrating variables ordered by
% size of the eigenvalues from large to small
% -------------------------------------------------------
% NOTE: c_sja(), c_sjt() provide critical values generated using
% a method of MacKinnon (1994, 1996).
% critical values are available for n<=12 and -1 <= p <= 1,
% zeros are returned for other cases.
% -------------------------------------------------------
% SEE ALSO: prt_coint, a function that prints results
% -------------------------------------------------------
% References: Johansen (1988), 'Statistical Analysis of Co-integration
% vectors', Journal of Economic Dynamics and Control, 12, pp. 231-254.
% MacKinnon, <NAME> (1996) 'Numerical distribution
% functions of likelihood ratio tests for cointegration',
% Queen's University Institute for Economic Research Discussion paper.
% (see also: MacKinnon's JBES 1994 article
% -------------------------------------------------------
% written by:
% <NAME>, Dept of Economics
% University of Toledo
% 2801 <NAME> St,
% Toledo, OH 43606
% <EMAIL>
% ****************************************************************
% NOTE: <NAME> provided some bug fixes and corrections that
% she notes below in comments. 4/10/2000
% ****************************************************************
'''
import numpy as np
from numpy import zeros, ones, flipud, log
from numpy.linalg import inv, eig, cholesky as chol
from statsmodels.regression.linear_model import OLS
tdiff = np.diff
class Holder(object):
pass
def rows(x):
return x.shape[0]
def trimr(x, front, end):
if end > 0:
return x[front:-end]
else:
return x[front:]
import statsmodels.tsa.tsatools as tsat
mlag = tsat.lagmat
def mlag_(x, maxlag):
'''return all lags up to maxlag
'''
return x[:-lag]
def lag(x, lag):
return x[:-lag]
def detrend(y, order):
if order == -1:
return y
return OLS(y, np.vander(np.linspace(-1, 1, len(y)), order + 1)).fit().resid
def resid(y, x):
r = y - np.dot(x, np.dot(np.linalg.pinv(x), y))
return r
def coint_johansen(x, p, k, print_on_console=True):
# % error checking on inputs
# if (nargin ~= 3)
# error('Wrong # of inputs to johansen')
# end
nobs, m = x.shape
# why this? f is detrend transformed series, p is detrend data
if (p > -1):
f = 0
else:
f = p
x = detrend(x, p)
dx = tdiff(x, 1, axis=0)
# dx = trimr(dx,1,0)
z = mlag(dx, k) # [k-1:]
# print z.shape
z = trimr(z, k, 0)
z = detrend(z, f)
# print dx.shape
dx = trimr(dx, k, 0)
dx = detrend(dx, f)
# r0t = dx - z*(z\dx)
r0t = resid(dx, z) # diff on lagged diffs
# lx = trimr(lag(x,k),k,0)
lx = lag(x, k)
lx = trimr(lx, 1, 0)
dx = detrend(lx, f)
# print 'rkt', dx.shape, z.shape
# rkt = dx - z*(z\dx)
rkt = resid(dx, z) # level on lagged diffs
skk = np.dot(rkt.T, rkt) / rows(rkt)
sk0 = np.dot(rkt.T, r0t) / rows(rkt)
s00 = np.dot(r0t.T, r0t) / rows(r0t)
sig = np.dot(sk0, np.dot(inv(s00), (sk0.T)))
tmp = inv(skk)
# du, au = eig(np.dot(tmp, sig))
au, du = eig(np.dot(tmp, sig)) # au is eval, du is evec
# orig = np.dot(tmp, sig)
# % Normalize the eigen vectors such that (du'skk*du) = I
temp = inv(chol(np.dot(du.T, np.dot(skk, du))))
dt = np.dot(du, temp)
# JP: the next part can be done much easier
# % NOTE: At this point, the eigenvectors are aligned by column. To
# % physically move the column elements using the MATLAB sort,
# % take the transpose to put the eigenvectors across the row
# dt = transpose(dt)
# % sort eigenvalues and vectors
# au, auind = np.sort(diag(au))
auind = np.argsort(au)
# a = flipud(au)
aind = flipud(auind)
a = au[aind]
# d = dt[aind,:]
d = dt[:, aind]
# %NOTE: The eigenvectors have been sorted by row based on auind and moved to array "d".
# % Put the eigenvectors back in column format after the sort by taking the
# % transpose of "d". Since the eigenvectors have been physically moved, there is
# % no need for aind at all. To preserve existing programming, aind is reset back to
# % 1, 2, 3, ....
# d = transpose(d)
# test = np.dot(transpose(d), np.dot(skk, d))
# %EXPLANATION: The MATLAB sort function sorts from low to high. The flip realigns
# %auind to go from the largest to the smallest eigenvalue (now aind). The original procedure
# %physically moved the rows of dt (to d) based on the alignment in aind and then used
# %aind as a column index to address the eigenvectors from high to low. This is a double
# %sort. If you wanted to extract the eigenvector corresponding to the largest eigenvalue by,
# %using aind as a reference, you would get the correct eigenvector, but with sorted
# %coefficients and, therefore, any follow-on calculation would seem to be in error.
# %If alternative programming methods are used to evaluate the eigenvalues, e.g. Frame method
# %followed by a root extraction on the characteristic equation, then the roots can be
# %quickly sorted. One by one, the corresponding eigenvectors can be generated. The resultant
# %array can be operated on using the Cholesky transformation, which enables a unit
# %diagonalization of skk. But nowhere along the way are the coefficients within the
# %eigenvector array ever changed. The final value of the "beta" array using either method
# %should be the same.
# % Compute the trace and max eigenvalue statistics */
lr1 = zeros(m)
lr2 = zeros(m)
cvm = zeros((m, 3))
cvt = zeros((m, 3))
iota = ones(m)
t, junk = rkt.shape
for i in range(0, m):
tmp = trimr(log(iota - a), i , 0)
lr1[i] = -t * np.sum(tmp, 0) # columnsum ?
# tmp = np.log(1-a)
# lr1[i] = -t * np.sum(tmp[i:])
lr2[i] = -t * | log(1 - a[i]) | numpy.log |
# -*- coding: utf-8 -*-
"""
.. Authors
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
A set of mathematical utilities and vector convenience functions for XICSRT.
"""
import numpy as np
def distance_point_to_line(origin, normal, point):
o = origin
n = normal
p = point
t = np.dot(p - o, n) / np.dot(n, n)
d = np.linalg.norm(np.outer(t, n) + o - p, axis=1)
return d
def toarray_1d(a):
"""
Convert the input to a ndarray with at least 1 dimension.
This is similar to the numpy function atleast_1d, but has less overhead
and is jax compatible.
"""
a = np.asarray(a)
if a.ndim == 0:
a = a.reshape(1)
return a
def vector_angle(a, b):
"""
Find the angle between two vectors.
"""
a_mod = np.linalg.norm(a)
b_mod = np.linalg.norm(b)
if a.ndim == 2 & b.ndim == 2:
dot = np.einsum('ij,ik->i', a/a_mod, b/b_mod, optimize=True)
elif a.ndim == 1 & b.ndim == 1:
dot = np.dot(a/a_mod, b/b_mod)
else:
raise Exception('Input must have 1 or 2 dimensions.')
angle = np.arccos(dot)
return angle
def vector_rotate(a, b, theta):
"""
Rotate vector a around vector b by an angle theta (radians)
Programming Notes:
u: parallel projection of a on b_hat.
v: perpendicular projection of a on b_hat.
w: a vector perpendicular to both a and b.
"""
if a.ndim == 2:
b_hat = b / np.linalg.norm(b)
dot = np.einsum('ij,j->i', a, b_hat, optimize=True)
u = np.einsum('i,j->ij', dot, b_hat, optimize=True)
v = a - u
w = np.cross(b_hat, v)
c = u + v * np.cos(theta) + w * np.sin(theta)
elif a.ndim == 1:
b_hat = b / np.linalg.norm(b)
u = b_hat * np.dot(a, b_hat)
v = a - u
w = np.cross(b_hat, v)
c = u + v * np.cos(theta) + w * np.sin(theta)
else:
raise Exception('Input array must be 1d (vector) or 2d (array of vectors)')
return c
def magnitude(vector):
"""
Calculate magnitude of a vector or array of vectors.
"""
if vector.ndim > 1:
mag = np.linalg.norm(vector, axis=1)
else:
mag = np.linalg.norm(vector)
return mag
def normalize(vector):
"""
Normalize a vector or an array of vectors.
If an array of vectors is given it should have the shape (N,M) where
| N: Number of vectors
| M: Vector length
"""
if vector.ndim > 1:
norm = np.linalg.norm(vector, axis=1)
vector /= np.expand_dims(norm, 1)
else:
norm = np.linalg.norm(vector)
vector /= norm
return vector
def sinusoidal_spiral(phi, b, r0, theta0):
r = r0 * (np.sin(theta0 + (b-1)*phi)/np.sin(theta0))**(1/(b-1))
return r
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis/np.sqrt(np.dot(axis, axis))
a = np.cos(theta/2.0)
b, c, d = -axis*np.sin(theta/2.0)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
matrix = np.array(
[[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)]
,[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)]
,[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
return matrix
def bragg_angle(wavelength, crystal_spacing):
"""
The Bragg angle calculation is used so often that it deserves its own
function.
.. Note::
The crystal_spacing here is the true spacing, not the 2d spacing that
is sometimes used in the literature.
"""
bragg_angle = np.arcsin(wavelength / (2 * crystal_spacing))
return bragg_angle
def cyl_from_car(point_car):
"""
Convert from cartesian to cylindrical coordinates.
"""
if not isinstance(point_car, np.ndarray):
point_car = np.array(point_car)
point_cyl = np.empty(point_car.shape)
if point_car.ndim == 2:
point_cyl[:, 0] = np.sqrt(np.sum(point_car[:, 0:2]**2, 1))
point_cyl[:, 1] = np.arctan2(point_car[:, 1], point_car[:, 0])
point_cyl[:, 2] = point_car[:, 2]
else:
point_cyl[0] = np.sqrt(np.sum(point_car[0:2]**2))
point_cyl[1] = np.arctan2(point_car[1], point_car[0])
point_cyl[2] = point_car[2]
return point_cyl
def car_from_cyl(point_cyl):
"""
Convert from cylindrical to cartesian coordinates.
"""
if not isinstance(point_cyl, np.ndarray):
point_cyl = np.array(point_cyl)
point_car = np.empty(point_cyl.shape)
if point_cyl.ndim == 2:
point_car[:, 0] = point_cyl[:, 0]*np.cos(point_cyl[:, 1])
point_car[:, 1] = point_cyl[:, 0]*np.sin(point_cyl[:, 1])
point_car[:, 2] = point_cyl[:, 2]
else:
point_car[0] = point_cyl[0]*np.cos(point_cyl[1])
point_car[1] = point_cyl[0]*np.sin(point_cyl[1])
point_car[2] = point_cyl[2]
return point_car
def tor_from_car(point_car, major_radius):
"""
Convert from cartesian to toroidal coordinates.
Arguments
---------
point_car: array [meters]
Cartesian coordinates [x,y,z]
major_radius: float [meters]
Torus Major Radius
Returns
-------
point_tor: array [meters]
Toroidal coordinates [r_min, theta_poloidal, theta_toroidal]
"""
if not isinstance(point_car, np.ndarray):
point_car = np.array(point_car)
point_tor = np.empty(point_car.shape)
if point_tor.ndim == 2:
d = np.linalg.norm(point_car[:, 0:2], axis=1) - major_radius
point_tor[:, 2] = np.arctan2(point_car[:, 1], point_car[:, 0])
point_tor[:, 1] = np.arctan2(point_car[:, 2], d)
point_tor[:, 0] = np.sqrt(np.power(point_car[:, 2], 2) + np.power(d, 2))
else:
d = np.linalg.norm(point_car[0:2]) - major_radius
point_tor[2] = np.arctan2(point_car[1], point_car[0])
point_tor[1] = np.arctan2(point_car[2], d)
point_tor[0] = np.sqrt(np.power(point_car[2], 2) + np.power(d, 2))
return point_tor
def car_from_tor(point_tor, major_radius):
"""
Convert from toroidal to cartesian coordinates.
Arguments
---------
point_tor: array [meters]
Toroidal coordinates [r_min, theta_poloidal, theta_toroidal]
major_radius: float [meters]
Torus Major Radius
Returns
-------
point_car: array [meters]
Cartesian coordinates [x,y,z]
"""
if not isinstance(point_tor, np.ndarray):
point_tor = np.array(point_tor)
point_car = np.empty(point_tor.shape)
if point_tor.ndim == 2:
point_car[:, 0] = major_radius*np.cos(point_tor[:, 2])
point_car[:, 1] = major_radius*np.sin(point_tor[:, 2])
vector = point_car[:, 0:2]/np.linalg.norm(point_car[:, 0:2], axis=1)
point_car[:, 0] = point_car[:, 0] + vector[:, 0]*point_tor[:, 0]*np.cos(point_tor[:, 1])
point_car[:, 1] = point_car[:, 1] + vector[:, 1]*point_tor[:, 0]*np.cos(point_tor[:, 1])
point_car[:, 2] = point_tor[:, 0]*np.sin(point_tor[:, 1])
else:
point_car[0] = major_radius* | np.cos(point_tor[2]) | numpy.cos |
import numpy as np, pandas as pd
import torch, dataclasses, warnings, operator, builtins, numbers, os
from typing import List
from torch.utils.data import DataLoader
import scipy.sparse as sps
def get_batch_size(shape, frac=float(os.environ.get("BATCH_SIZE_FRAC", 0.1))):
""" round to similar batch sizes """
n_users, n_items = shape
if torch.cuda.device_count():
total_memory = torch.cuda.get_device_properties(0).total_memory
else:
total_memory = 16e9
max_batch_size = total_memory / 8 / n_items * frac
n_batches = int(n_users / max_batch_size) + 1
return int(np.ceil(n_users / n_batches))
def matrix_reindex(csr, old_index, new_index, axis, fill_value=0):
""" pandas.reindex functionality on sparse or dense matrices as well as 1d arrays """
if axis == 1:
return matrix_reindex(csr.T, old_index, new_index, 0, fill_value).T.copy()
assert axis == 0, "axis must be 0 or 1"
assert csr.shape[0] == len(old_index), "shape must match between csr and old_index"
if sps.issparse(csr):
csr = sps.vstack([csr, csr[:1] * 0 + fill_value], "csr")
csr.eliminate_zeros()
else:
csr = np.concatenate([csr, csr[:1] * 0 + fill_value], axis=0)
iloc = pd.Series(
np.arange(len(old_index)), index=old_index
).reindex(new_index, fill_value=-1).values
return csr[iloc].copy()
def sps_to_torch(x, device):
""" convert scipy.sparse to torch.sparse """
coo = x.tocoo()
values = coo.data
indices = np.vstack((coo.row, coo.col))
return torch.sparse_coo_tensor(indices, values, coo.shape, device=device)
class LazyScoreBase:
""" Lazy element-wise A*B+C for sparse and low-rank matrices.
The base class wraps over scalar, scipy.sparse, and numpy dense.
Methods to overload include: eval, T, __getitem__, collate_fn.
Method `reindex` is only supported in the derived LowRankDataFrame subclass.
"""
def eval(self, device):
""" LazyScoreBase -> scalar, numpy (device is None), or torch (device) """
raise NotImplementedError
def T(self):
""" LazyScoreBase -> LazyScoreBase(transposed) """
raise NotImplementedError
def __getitem__(self, key):
""" LazyScoreBase -> LazyScoreBase(sub-rows); used in pytorch dataloader """
raise NotImplementedError
@classmethod
def collate_fn(cls, D):
""" List[LazyScoreBase] -> LazyScoreBase; used in pytorch dataloader """
raise NotImplementedError
def __len__(self):
return self.shape[0]
@property
def size(self):
return np.prod(self.shape)
@property
def batch_size(self):
return get_batch_size(self.shape)
def _wrap_and_check(self, other):
other = auto_cast_lazy_score(other)
if not isinstance(self, _LazyScoreScalar) and not isinstance(other, _LazyScoreScalar):
assert np.allclose(self.shape, other.shape), "shape must be compatible"
return other
def __add__(self, other):
other = self._wrap_and_check(other)
return LazyScoreExpression(operator.add, [self, other])
def __mul__(self, other):
other = self._wrap_and_check(other)
return LazyScoreExpression(operator.mul, [self, other])
def auto_cast_lazy_score(other):
if other is None:
return None # prior_score=None -> None
elif isinstance(other, LazyScoreBase):
return other
elif isinstance(other, numbers.Number):
return _LazyScoreScalar(other)
elif sps.issparse(other):
return LazyScoreSparseMatrix(other)
elif isinstance(other, pd.DataFrame):
return LazyScoreDenseMatrix(other.values)
elif np.ndim(other) == 2:
return LazyScoreDenseMatrix(other)
else:
raise NotImplementedError(f"type {type(other)} is not supported")
class _LazyScoreScalar(LazyScoreBase):
def __init__(self, c):
self.c = c
self.shape = ()
def eval(self, device):
return self.c
@property
def T(self):
return self
def __getitem__(self, key):
return self
@classmethod
def collate_fn(cls, D):
return D[0]
class LazyScoreSparseMatrix(LazyScoreBase):
def __init__(self, c):
self.c = c.tocsr()
self.shape = c.shape
def eval(self, device):
return self.c.toarray() if device is None else \
sps_to_torch(self.c, device).to_dense()
@property
def T(self):
return self.__class__(self.c.T)
def __getitem__(self, key):
if np.isscalar(key):
slc = slice(self.c.indptr[key], self.c.indptr[key + 1])
_dict = {
"values": self.c.data[slc],
"keys": self.c.indices[slc],
"shape": self.c.shape[1],
}
return _LazyScoreSparseDictFast(_dict)
else:
return self.__class__(self.c[key])
@classmethod
def collate_fn(cls, D):
return cls(sps.vstack([d.c for d in D]))
class _LazyScoreSparseDictFast(LazyScoreBase):
def __init__(self, c):
self.c = c
self.shape = (1, self.c['shape'])
@classmethod
def collate_fn(cls, D):
C = [d.c for d in D]
csr = sps.csr_matrix((
np.hstack([c['values'] for c in C]), # data
np.hstack([c['keys'] for c in C]), # indices
np.hstack([[0], np.cumsum([len(c['keys']) for c in C])]), # indptr
), shape=(len(C), C[0]['shape']))
return LazyScoreSparseMatrix(csr)
class LazyScoreDenseMatrix(LazyScoreBase):
def __init__(self, c):
self.c = c
self.shape = c.shape
def eval(self, device):
return self.c if device is None else torch.as_tensor(self.c, device=device)
@property
def T(self):
return self.__class__(self.c.T)
def __getitem__(self, key):
if np.isscalar(key):
key = [key] # list or slice
return self.__class__(self.c[key])
@classmethod
def collate_fn(cls, D):
return cls(np.vstack([d.c for d in D]))
class LazyScoreExpression(LazyScoreBase):
""" Tree representation of score expression until final eval """
def __init__(self, op, children):
self.op = op
self.children = children
for c in children:
assert isinstance(c, LazyScoreBase), f"please wrap {c} in LazyScoreBase"
self.shape = children[0].shape
def eval(self, device=None):
children = [c.eval(device) for c in self.children]
return self.op(*children)
@property
def T(self):
children = [c.T for c in self.children]
return self.__class__(self.op, children)
def __getitem__(self, key):
children = [c[key] for c in self.children]
return self.__class__(self.op, children)
@classmethod
def collate_fn(cls, batch):
first = batch[0]
data = zip(*[b.children for b in batch])
children = [c.collate_fn(D) for c, D in zip(first.children, data)]
return cls(first.op, children)
@dataclasses.dataclass(repr=False)
class RandScore(LazyScoreBase):
""" add random noise to break ties """
row_seeds: list # np.array for fast indexing
col_seeds: list # np.array for fast indexing
@property
def shape(self):
return (len(self.row_seeds), len(self.col_seeds))
@classmethod
def like(cls, other):
return cls(np.arange(other.shape[0]), np.arange(other.shape[1]))
def eval(self, device=None):
d1 = len(self.col_seeds)
if device is None:
return np.vstack([
np.random.RandomState(int(s)).rand(d1)
for s in self.row_seeds])
else:
rows = []
for s in self.row_seeds:
generator = torch.Generator(device).manual_seed(int(s))
new = torch.rand(d1, device=device, generator=generator)
rows.append(new)
return torch.vstack(rows)
@property
def T(self):
warnings.warn("transpose changes rand seed; only for evaluate_user_rec")
return self.__class__(self.col_seeds, self.row_seeds)
def __getitem__(self, key):
if np.isscalar(key):
key = [key]
row_seeds = self.row_seeds[key]
return self.__class__(row_seeds, self.col_seeds)
@classmethod
def collate_fn(cls, batch):
return cls(np.hstack([b.row_seeds for b in batch]), batch[0].col_seeds)
@dataclasses.dataclass(repr=False)
class LowRankDataFrame(LazyScoreBase):
""" mimics a pandas dataframe with low-rank structures and
nonnegative exp / softplus / sigmoid activation
"""
ind_logits: List[list]
col_logits: List[list]
index: list # np.array for fast indexing
columns: list # np.array for fast indexing
act: str
ind_default: list = None
col_default: list = None
def __post_init__(self):
if self.ind_default is None:
self.ind_default = np.zeros_like(self.ind_logits[0])
if self.col_default is None:
self.col_default = np.zeros_like(self.col_logits[0])
assert self.ind_logits.shape[1] == self.col_logits.shape[1], "check hidden"
assert self.ind_logits.shape[0] == len(self.index), "check index"
assert self.col_logits.shape[0] == len(self.columns), "check columns"
assert self.act in ['exp', 'softplus', 'sigmoid', '_nnmf'], \
"requires nonnegative act to model intensity score"
def eval(self, device=None):
if device is None:
z = self.ind_logits @ self.col_logits.T
assert not np.isnan(z).any(), "low rank score must be valid"
if self.act == 'exp':
return np.exp(z)
elif self.act == 'softplus':
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'overflow encountered in exp')
return np.where(z > 0, z + np.log(1 + np.exp(-z)), np.log(1 + np.exp(z)))
elif self.act == 'sigmoid':
return 1. / (1 + np.exp(-z))
elif self.act == '_nnmf':
return z
else:
raise NotImplementedError
else:
ind_logits = torch.as_tensor(self.ind_logits, device=device)
col_logits = torch.as_tensor(self.col_logits, device=device)
z = ind_logits @ col_logits.T
assert not torch.isnan(z).any(), "low rank score must be valid"
if self.act == 'exp':
return z.exp()
elif self.act == 'softplus':
return torch.nn.functional.softplus(z)
elif self.act == 'sigmoid':
return z.sigmoid()
elif self.act == '_nnmf':
return z
else:
raise NotImplementedError
@property
def shape(self):
return (len(self.ind_logits), len(self.col_logits))
def __getitem__(self, key):
if np.isscalar(key):
key = [key]
return self.__class__(
self.ind_logits[key], self.col_logits,
self.index[key], self.columns, self.act,
self.ind_default, self.col_default)
@property
def T(self):
return self.__class__(
self.col_logits, self.ind_logits,
self.columns, self.index, self.act, self.col_default, self.ind_default)
@classmethod
def collate_fn(cls, batch):
first = batch[0]
ind_logits = []
index = []
for elm in batch:
ind_logits.append(elm.ind_logits)
index.extend(elm.index)
return cls(
| np.vstack(ind_logits) | numpy.vstack |
"""
fastspecfit.templates.qa
========================
QA for templates
"""
import pdb
import os
import numpy as np
from astropy.table import Table
from scipy.ndimage import median_filter
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.templates import rebuild_fastspec_spectrum
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
from desiutil.log import get_logger
log = get_logger()
def plot_style(font_scale=1.2):
import seaborn as sns
sns.set(context='talk', style='ticks', palette='deep', font_scale=font_scale)#, rc=rc)
colors = sns.color_palette()
return sns, colors
def qa_bpt(targetclass, fastspecfile=None, png=None):
"""QA of the fastspec emission-line spectra.
"""
from fastspecfit.templates.templates import remove_undetected_lines, read_stacked_fastspec
sns, _ = plot_style()
fastmeta, _fastspec = read_stacked_fastspec(fastspecfile, read_spectra=False)
fastspec = remove_undetected_lines(_fastspec)
nobj = len(fastmeta)
def oplot_class(ax, kewley=False, **kwargs):
if kewley:
niiha = np.linspace(-1.9, 0.4, 1000)
oiiihb = 0.61 / (niiha-0.47) + 1.19
else:
niiha = np.linspace(-1.9, -0.1, 1000)
oiiihb = 0.61 / (niiha-0.05) + 1.3
ax.plot(niiha, oiiihb, **kwargs)
def _bpt(cc, cclabel='Redshift', vmin=None, vmax=None, png=None):
fig, ax = plt.subplots(figsize=(10, 7))
cb = ax.scatter(niiha, oiiihb, c=cc, cmap='jet', vmin=vmin, vmax=vmax)
oplot_class(ax, kewley=True, color='k', ls='--', lw=3, label='Kewley+01')
oplot_class(ax, kewley=False, color='k', lw=3, label='Kauffmann+03')
plt.colorbar(cb, label=cclabel)
ax.set_xlim(-1.9, 0.7)
ax.set_ylim(-1.2, 1.5)
ax.set_xlabel(r'$\log_{10}$ ([NII] $\lambda6584$ / H$\alpha$)')
ax.set_ylabel(r'$\log_{10}$ ([OIII] $\lambda5007$ / H$\beta$)')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.legend(fontsize=16, loc='lower left')#, ncol=2)
plt.subplots_adjust(bottom=0.15, left=0.18, top=0.95, right=0.95)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
good = np.where(
(fastspec['HALPHA_FLUX'] > 0) *
(fastspec['HBETA_FLUX'] > 0) *
(fastspec['NII_6584_FLUX'] > 0) *
(fastspec['OIII_5007_FLUX'] > 0)
#(fastspec['HALPHA_CHI2'] < 1e4)
)[0]
niiha = np.log10(fastspec['NII_6584_FLUX'][good] / fastspec['HALPHA_FLUX'][good])
oiiihb = np.log10(fastspec['OIII_5007_FLUX'][good] / fastspec['HBETA_FLUX'][good])
ww = np.where((niiha > -0.05) * (niiha < 0.05) * (oiiihb < -0.5))[0]
#log.info(fastspec[good][ww]['HALPHA_FLUX', 'NII_6584_FLUX'])
zz = fastspec['CONTINUUM_Z'][good]
ewhb = fastspec['HBETA_EW'][good]
#rW1 = fastmeta['RW1'][good]
#gr = fastmeta['GR'][good]
_bpt(zz, 'Redshift', vmin=0, vmax=0.5, png=png.replace('.png', '-redshift.png'))
_bpt(np.log10(ewhb), r'$\log_{10}\,\mathrm{EW}(\mathrm{H}\beta)$',
png=png.replace('.png', '-ewhb.png'))
#_bpt(rW1, r'$r-W1$', vmin=-0.3, vmax=0.9, png=png.replace('.png', '-rW1.png'))
#_bpt(gi, r'$g-i$', vmin=0.6, vmax=1.3, png=png.replace('.png', '-gi.png'))
def qa_fastspec_fullspec(targetclass, fastwave=None, fastflux=None, fastivar=None,
fastmeta=None, fastspec=None, fastspecfile=None, CFit=None,
EMFit=None, ncol=3, nrow=5, photometric_models=False,
pdffile=None):
"""Full-spectrum QA.
photometric_models - use the fits to the broadband continuum
"""
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.sample import SAMPLE_PROPERTIES as props
from fastspecfit.templates.templates import rebuild_fastspec_spectrum, read_stacked_fastspec
sns, _ = plot_style()
if CFit is None or EMFit is None:
from fastspecfit.continuum import ContinuumFit
from fastspecfit.emlines import EMLineFit
CFit = ContinuumFit()
EMFit = EMLineFit()
if fastwave is None:
fastwave, fastflux, fastivar, fastmeta, fastspec = read_stacked_fastspec(fastspecfile)
#fastspec = remove_undetected_lines(fastspec, EMFit.linetable, devshift=False)
absmaglabel = props[targetclass]['absmag_label']
colorlabel = props[targetclass]['color_label']
nobj = len(fastmeta)
icam = 0
zobj = np.unique(fastmeta['ZOBJ'])
npage = len(zobj)
inches_wide_perpanel = 4.0
inches_tall_perpanel = 3.0
if npage == 1:
png = True
else:
png = False
if pdffile:
if png:
pdffile = pdffile.replace('.pdf', '.png')
else:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages(pdffile)
for ipage in [0]:#np.arange(npage):
log.info('Building page {}/{}'.format(ipage+1, npage))
pageindx = np.where(zobj[ipage] == fastmeta['ZOBJ'])[0]
absmag = sorted(set(fastmeta['ABSMAG'][pageindx])) # subpage
nsubpage = len(absmag)
for isubpage in [6]:#np.arange(nsubpage):
subpageindx = np.where((absmag[isubpage] == fastmeta['ABSMAG'][pageindx]))[0]
fig, allax = plt.subplots(nrow, ncol, figsize=(inches_wide_perpanel*ncol, inches_tall_perpanel*nrow),
sharex=True, sharey=False)#True)
for iplot, (indx, ax) in enumerate(zip(pageindx[subpageindx], allax.flatten())):
#log.info(ipage, isubpage, iplot, len(pageindx), len(subpageindx))
# rebuild the best-fitting spectrum; these models have been
# normalized already in iterative_stack
modelwave, continuum, smooth_continuum, emlinemodel, data = rebuild_fastspec_spectrum(
fastspec[indx], fastwave, fastflux[indx, :], fastivar[indx, :], CFit, EMFit)
# rest-frame
if photometric_models:
modelwave_phot, continuum_phot = rebuild_fastspec_spectrum(fastspec[indx], _, _, _, CFit,
EMFit, full_resolution=True,
normalize_wave=props[targetclass]['normwave'])
#modelwave_phot *= (1 + data['zredrock'])
#continuum_phot /= (1 + data['zredrock'])
zfact = (1 + data['zredrock'])
#ax.plot(data['wave'][icam]/zfact, data['flux'][icam], color='skyblue')
ax.plot(modelwave_phot, continuum_phot, color='gray')
ax.plot(modelwave/zfact, (continuum+emlinemodel), color='firebrick', alpha=0.7)
xmin, xmax = 900, 4e4
ww = np.where((modelwave_phot > xmin) * (modelwave_phot < xmax))[0]
ymin, ymax = np.min(continuum_phot[ww]), np.max(continuum_phot[ww])
if np.max(emlinemodel) > ymax:
pdb.set_trace()
ymax = np.max(emlinemodel)
else:
# observed frame
ax.plot(data['wave'][icam], data['flux'][icam], color='skyblue')
ax.plot(modelwave, continuum+emlinemodel, color='firebrick', alpha=0.5)
ax.plot(modelwave, continuum, color='blue', alpha=0.5)
#ax.plot(modelwave, continuum+smooth_continuum, color='gray', alpha=0.3)
ax.plot(modelwave, smooth_continuum, color='gray', alpha=0.7)
xmin, xmax = modelwave.min(), modelwave.max()
ymin, ymax = 1e6, -1e6
filtflux = median_filter(data['flux'][icam], 51, mode='nearest')
sigflux = np.std(data['flux'][icam][data['ivar'][icam] > 0])
if -2 * sigflux < ymin:
ymin = -2 * sigflux
if sigflux * 5 > ymax:
ymax = sigflux * 5
if np.max(filtflux) > ymax:
ymax = np.max(filtflux) * 1.4
ax.text(0.96, 0.06, r'${:.2f}<{}<{:.2f}$'.format(
fastmeta['COLORMIN'][indx], colorlabel,
fastmeta['COLORMAX'][indx]),
ha='right', va='bottom', transform=ax.transAxes, fontsize=10,
bbox=dict(boxstyle='round', facecolor='gray', alpha=0.25))
ax.text(0.04, 0.96, '\n'.join(( 'N={}, S/N={:.1f}'.format(
fastmeta['NOBJ'][indx], fastspec['CONTINUUM_SNR_ALL'][indx]), )),
ha='left', va='top', transform=ax.transAxes, fontsize=10,
bbox=dict(boxstyle='round', facecolor='gray', alpha=0.25))
print(ymin, ymax)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticklabels([])
ax.set_yticklabels([])
if photometric_models:
ax.set_xscale('log')
plt.subplots_adjust(wspace=0.05, hspace=0.05, left=0.07, right=0.95, top=0.95, bottom=0.1)
if iplot == ncol*nrow-1:
break
fig.text(0.52, 0.968, r'${:.2f}<z<{:.2f}\ {:.1f}<{}<{:.1f}$'.format(
fastmeta['ZOBJMIN'][indx], fastmeta['ZOBJMAX'][indx],
fastmeta['{}MIN'.format('ABSMAG')][indx], absmaglabel,
fastmeta['{}MAX'.format('ABSMAG')][indx]),
ha='center', va='center', fontsize=22)
for rem in np.arange(ncol*nrow-iplot-1)+iplot+1:
allax.flatten()[rem].axis('off')
if pdffile and png is False:
pdf.savefig(fig)
plt.close()
if pdffile:
log.info('Writing {}'.format(pdffile))
if png:
fig.savefig(pdffile)
plt.close()
else:
pdf.close()
def qa_fastspec_emlinespec(targetclass, fastwave=None, fastflux=None, fastivar=None,
fastmeta=None, fastspec=None, fastspecfile=None, CFit=None,
EMFit=None, ncol=3, nrow=5, pdffile=None):
"""QA of the fastspec emission-line spectra.
"""
from matplotlib.colors import Normalize
from fastspecfit.templates.templates import remove_undetected_lines
from fastspecfit.util import ivar2var, C_LIGHT
from fastspecfit.templates.sample import SAMPLE_PROPERTIES as props
from fastspecfit.templates.templates import rebuild_fastspec_spectrum, read_stacked_fastspec
sns, _ = plot_style()
if CFit is None or EMFit is None:
from fastspecfit.continuum import ContinuumFit
from fastspecfit.emlines import EMLineFit
CFit = ContinuumFit()
EMFit = EMLineFit()
if fastwave is None:
fastwave, fastflux, fastivar, fastmeta, fastspec = read_stacked_fastspec(fastspecfile)
fastspec_fix = remove_undetected_lines(fastspec, EMFit.linetable, devshift=False)
# plotting preferences
cmap = plt.cm.get_cmap('jet')
#cmap = sns.color_palette(as_cmap=True)
cnorm = Normalize(vmin=np.min(fastmeta['ZOBJ']), vmax=np.max(fastmeta['ZOBJ']))
inches_wide = 16
inches_fullspec = 6
inches_perline = inches_fullspec / 2.0
nlinepanels = 4
nline = len(set(EMFit.linetable['plotgroup']))
nlinerows = np.ceil(nline / nlinepanels).astype(int)
nrows = 1 + nlinerows
height_ratios = np.hstack([1, [0.5]*nlinerows])
plotsig_default = 150.0 # 300.0 # [km/s]
meanwaves, deltawaves, sigmas, linenames = [], [], [], []
for plotgroup in set(EMFit.linetable['plotgroup']):
I = np.where(plotgroup == EMFit.linetable['plotgroup'])[0]
linenames.append(EMFit.linetable['nicename'][I[0]])
meanwaves.append(np.mean(EMFit.linetable['restwave'][I]))
deltawaves.append((np.max(EMFit.linetable['restwave'][I]) -
np.min(EMFit.linetable['restwave'][I])) / 2)
sigmas.append(plotsig_default)
srt = np.argsort(meanwaves)
meanwaves = np.hstack(meanwaves)[srt]
deltawaves = np.hstack(deltawaves)[srt]
sigmas = np.hstack(sigmas)[srt]
linenames = np.hstack(linenames)[srt]
absmaglabel = props[targetclass]['absmag_label']
colorlabel = props[targetclass]['color_label']
# how many pages?
nobj = len(fastmeta)
icam = 0
restcolor = np.unique(fastmeta['COLOR'])
npage = len(restcolor)
if npage == 1:
png = True
else:
png = False
if pdffile:
if png:
pdffile = pdffile.replace('.pdf', '.png')
else:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages(pdffile)
# make the plot!
for ipage in np.arange(npage):
log.info('Building page {}/{}'.format(ipage+1, npage))
pageindx = np.where(restcolor[ipage] == fastmeta['COLOR'])[0]
absmag = sorted(set(fastmeta['ABSMAG'][pageindx])) # subpage
nsubpage = len(absmag)
for isubpage in np.arange(nsubpage):#[:1]:#[::2]:
subpageindx = np.where((absmag[isubpage] == fastmeta['ABSMAG'][pageindx]))[0]
fig = plt.figure(figsize=(inches_wide, 2*inches_fullspec + inches_perline*nlinerows))
gs = fig.add_gridspec(nrows, nlinepanels, height_ratios=height_ratios)
bigax = fig.add_subplot(gs[0, :])
ax, irow, icol = [], 1, 0
for iax in np.arange(nline):
icol = iax % nlinepanels
if iax > 0 and iax % nlinepanels == 0:
irow += 1
xx = fig.add_subplot(gs[irow, icol])
ax.append(xx)
bigymin, bigymax = 1e6, -1e6
lineymin, lineymax = np.zeros(nline)+1e6, np.zeros(nline)-1e6
removelabels = np.ones(nline, bool)
for iplot, indx in enumerate(pageindx[subpageindx]):
#log.info(ipage, isubpage, iplot, len(pageindx), len(subpageindx))
modelwave, continuum, smooth_continuum, emlinemodel, data = rebuild_fastspec_spectrum(
fastspec[indx], fastwave, fastflux[indx, :], fastivar[indx, :], CFit, EMFit)
#if fastmeta['IBIN'][indx] == 1262:
# pdb.set_trace()
redshift = data['zredrock']
emlineflux = data['flux'][icam] - continuum - smooth_continuum
modelwave /= (1+redshift) # rest-frame
label = 'z=[{:.2f}-{:.2f}] (N={})'.format(
fastmeta['ZOBJMIN'][indx], fastmeta['ZOBJMAX'][indx],
np.sum(fastmeta['ZOBJ'][pageindx[subpageindx]] == fastmeta['ZOBJ'][indx]))
#bigax.plot(modelwave/(1+redshift), emlineflux, color='gray')
bigax.plot(modelwave, emlinemodel, label=label, color=cmap(cnorm(fastmeta['ZOBJ'][indx])))
if -np.max(emlinemodel)*0.05 < bigymin:
bigymin = -np.max(emlinemodel)*0.05
if np.max(emlinemodel)*1.1 > bigymax:
bigymax = np.max(emlinemodel)*1.1
if np.max(emlinemodel) == 0.0:
bigymin, bigymax = 0.0, 1.0
# zoom in on individual emission lines
for iax, (meanwave, deltawave, sig, linename) in enumerate(zip(
meanwaves, deltawaves, sigmas, linenames)):
wmin = (meanwave - deltawave) - 8 * sig * meanwave / C_LIGHT
wmax = (meanwave + deltawave) + 8 * sig * meanwave / C_LIGHT
lineindx = np.where((modelwave > wmin) * (modelwave < wmax))[0]
if len(lineindx) > 1:
if np.min(emlinemodel[lineindx]) > 0.0: # at least one line kept (snr>3)
removelabels[iax] = False
ax[iax].plot(modelwave[lineindx], emlinemodel[lineindx],
color=cmap(cnorm(fastmeta['ZOBJ'][indx])))
if -np.max(emlinemodel[lineindx])*0.05 < lineymin[iax]:
lineymin[iax] = -np.max(emlinemodel[lineindx])*0.05
if np.max(emlinemodel[lineindx]) * 1.1 > lineymax[iax]:
lineymax[iax] = np.max(emlinemodel[lineindx]) * 1.1
if np.abs(lineymax[iax]-lineymin[iax]) < 1e-2:
removelabels[iax] = False
for iax, xx in enumerate(ax):
xx.text(0.08, 0.89, linenames[iax], ha='left', va='center',
transform=xx.transAxes, fontsize=20)
if removelabels[iax]:
xx.set_ylim(0, 1)
xx.set_xticklabels([])
xx.set_yticklabels([])
else:
if lineymax[iax] == lineymin[iax]:
lineymax[iax] = 1.0
xx.set_ylim(lineymin[iax], lineymax[iax])
xlim = xx.get_xlim()
xx.xaxis.set_major_locator(ticker.MaxNLocator(2))
# don't repeat the legend labels
hand, lab = bigax.get_legend_handles_labels()
ulabels = dict(zip(lab, hand))
bigax.legend(ulabels.values(), ulabels.keys(), fontsize=18, loc='upper left')
#bigax.legend(fontsize=18, loc='upper left')
bigax.set_ylim(bigymin, bigymax)
bigax.set_xlim(2600, 7200) # 3500, 9300)
bigax.set_title(r'${:.2f}<{}<{:.2f}\ {:.1f}<{}<{:.1f}$'.format(
fastmeta['COLORMIN'][indx], colorlabel,
fastmeta['COLORMAX'][indx],
fastmeta['ABSMAGMIN'][indx], absmaglabel,
fastmeta['ABSMAGMAX'][indx]))
#bigax.set_xlabel('Observed-frame Wavelength ($\AA$)')
plt.subplots_adjust(wspace=0.28, left=0.07, right=0.95, top=0.95, bottom=0.1)
if pdffile and png is False:
pdf.savefig(fig)
plt.close()
if pdffile:
log.info('Writing {}'.format(pdffile))
if png:
fig.savefig(pdffile)
plt.close()
else:
pdf.close()
def qa_photometry_templates(targetclass, samplefile=None, templatefile=None,
ntspace=5, png=None):
"""Compare the color-color tracks of the templates to the data.
"""
from fastspecfit.templates.sample import read_parent_sample
from fastspecfit.templates.templates import read_templates
if ntspace == 1:
prefix = 'All '
else:
prefix = ''
sns, _ = plot_style()
cmap = plt.cm.get_cmap('RdYlBu')
mincnt = 1
phot, spec, meta = read_parent_sample(samplefile)
def template_colors_zgrid(templatefile, targetclass):
"""Compute the colors of the templates on a fixed redshift grid.
"""
from speclite import filters
filt = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z', 'wise2010-W1')
wave, flux, meta = read_templates(templatefile)
nt = len(meta)
print('Number of templates = {}'.format(nt))
print(wave.min(), wave.max())
dz = 0.1
if targetclass == 'lrg':
zmin, zmax = 0.0, 1.4
elif targetclass == 'elg':
zmin, zmax = 0.0, 1.7
elif targetclass == 'bgs':
zmin, zmax = 0.0, 0.6
else:
pass
nz = np.round( (zmax - zmin) / dz ).astype('i2')
print('Number of redshift points = {}'.format(nz))
cc = dict(
redshift = np.linspace(zmin, zmax, nz),
gr = np.zeros((nt, nz), 'f4'),
rz = np.zeros((nt, nz), 'f4'),
rW1 = np.zeros((nt, nz), 'f4'),
zW1 = np.zeros((nt, nz), 'f4')
)
for iz, red in enumerate(cc['redshift']):
zwave = wave.astype('float') * (1 + red)
maggies = filt.get_ab_maggies(flux, zwave, mask_invalid=False)
cc['gr'][:, iz] = -2.5 * np.log10(maggies['decam2014-g'] / maggies['decam2014-r'] )
cc['rz'][:, iz] = -2.5 * np.log10(maggies['decam2014-r'] / maggies['decam2014-z'] )
cc['rW1'][:, iz] = -2.5 * np.log10(maggies['decam2014-r'] / maggies['wise2010-W1'] )
cc['zW1'][:, iz] = -2.5 * np.log10(maggies['decam2014-z'] / maggies['wise2010-W1'] )
return cc
# compute colors on a grid
log.info('Reading {}'.format(templatefile))
template_colors = template_colors_zgrid(templatefile, targetclass)
nt, nz = template_colors['gr'].shape
zmin = '{:.1f}'.format(template_colors['redshift'].min())
zmax = '{:.1f}'.format(template_colors['redshift'].max())
dz = '{:.1f}'.format(template_colors['redshift'][1] - template_colors['redshift'][0])
def elg_obs(phot, png=None):
grobslim = (-0.8, 1.8)
rzobslim = (-1, 2.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rz'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rz'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.17, 0.42, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.set_xlabel(r'$(r - z)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.set_xlim(rzobslim)
ax2.set_ylim(grobslim)
for aa in (ax1, ax2):
aa.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.87, bottom=0.19, wspace=0.05)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def bgs_obs(phot, png=None):
grobslim = (-0.5, 2.5)
rzobslim = (-0.5, 1.5)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
ax1.grid(True)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rz'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rz'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.2, 0.1, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.set_xlim(rzobslim)
ax2.set_ylim(grobslim)
ax2.set_xlabel(r'$(r - z)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.87, bottom=0.19, wspace=0.05)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def lrg_obs(phot, png=None):
grobslim = (-0.2, 3)
rzobslim = (0.0, 3)
rW1obslim = (-0.3, 5.5)
zW1obslim = (-0.5, 3)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(phot['RMAG']-phot['W1MAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
#norm=LogNorm(vmin=1, vmax=100),
extent=np.hstack((rW1obslim, grobslim)))
ax1.set_xlabel(r'$(r - W1)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rW1obslim)
ax1.set_ylim(grobslim)
ax1.text(0.05, 0.9, 'Data', ha='left', va='bottom',
transform=ax1.transAxes, fontsize=14)
for tt in np.arange(0, nt, ntspace):
ax2.plot(template_colors['rW1'][tt, :], template_colors['gr'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax2.scatter(template_colors['rW1'][tt, 0], template_colors['gr'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax2.text(0.1, 0.05, 'z=0.0', ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
ha='left', va='bottom',
transform=ax2.transAxes, fontsize=14)
ax2.yaxis.set_label_position('right')
ax2.yaxis.tick_right()
ax2.set_xlabel(r'$(r - W1)_{\rm obs}$')
ax2.set_ylabel(r'$(g - r)_{\rm obs}$')
ax2.set_xlim(rW1obslim)
ax2.set_ylim(grobslim)
ax3.hexbin(phot['ZMAG']-phot['W1MAG'], phot['RMAG']-phot['ZMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zW1obslim, rzobslim)))
ax3.set_ylabel(r'$(r - z)_{\rm obs}$')
ax3.set_xlabel(r'$(z - W1)_{\rm obs}$')
ax3.set_xlim(zW1obslim)
ax3.set_ylim(rzobslim)
ax3.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax3.yaxis.set_major_locator(ticker.MultipleLocator(1))
for tt in np.arange(0, nt, ntspace):
ax4.plot(template_colors['zW1'][tt, :], template_colors['rz'][tt, :], marker='s',
markersize=5, ls='-', alpha=0.5)
for tt in np.arange(0, nt, ntspace):
ax4.scatter(template_colors['zW1'][tt, 0], template_colors['rz'][tt, 0], marker='o',
facecolors='none', s=40, edgecolors='k',
linewidth=1, zorder=10)
ax4.text(0.05, 0.3, 'z=0.0', ha='left', va='bottom',
transform=ax4.transAxes, fontsize=14)
#ax4.text(0.05, 0.9, '{}Models (z={}-{}, dz={})'.format(prefix, zmin, zmax, dz),
# ha='left', va='bottom',
# transform=ax4.transAxes, fontsize=14)
ax4.yaxis.set_label_position('right')
ax4.yaxis.tick_right()
ax4.set_ylabel(r'$(r - z)_{\rm obs}$')
ax4.set_xlabel(r'$(z - W1)_{\rm obs}$')
ax4.set_xlim(zW1obslim)
ax4.set_ylim(rzobslim)
ax4.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax4.yaxis.set_major_locator(ticker.MultipleLocator(1))
for aa in (ax1, ax2, ax3, ax4):
aa.grid(True)
plt.subplots_adjust(top=0.95, left=0.1, right=0.9, bottom=0.13, wspace=0.05, hspace=0.28)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
# make the plots!
if targetclass == 'lrg':
lrg_obs(phot, png=png)
elif targetclass == 'elg':
elg_obs(phot, png=png)
elif targetclass == 'bgs':
bgs_obs(phot, png=png)
else:
pass
def qa_photometry(targetclass, samplefile=None, png_obs=None, png_rest=None, png_rest_bins=None):
"""QA of the observed- and rest-frame photometry.
"""
from matplotlib.colors import LogNorm
from fastspecfit.templates.sample import read_parent_sample, stacking_bins
sns, _ = plot_style()
cmap = plt.cm.get_cmap('RdYlBu')
mincnt = 1
phot, spec, meta = read_parent_sample(samplefile)
bins = stacking_bins(targetclass, verbose=True)
def bgs_obs(phot, png=None):
robslim = (15, 21.0)
grobslim = (-0.2, 2.5)
rzobslim = (-0.5, 1.5)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharey=True)
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
hb = ax2.hexbin(phot['RMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((robslim, grobslim)))
ax2.set_xlabel(r'$r_{\rm obs}$')
ax2.set_ylim(grobslim)
ax2.set_xlim(robslim)
cax = fig.add_axes([0.88, 0.12, 0.02, 0.83])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, cax=cax, format=formatter, label='Number of Galaxies')
for aa in (ax1, ax2):
aa.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.85, bottom=0.19, wspace=0.07)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def bgs_rest(phot, meta, bins=None, png=None):
zlim = (0.0, 0.6)
Mrlim = (-16, -25)
grlim = (-0.2, 1.2)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(meta['Z'], phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, Mrlim)))
ax1.set_ylim(Mrlim)
ax1.set_xlim(zlim)
ax1.set_xlabel('Redshift')
ax1.set_ylabel(r'$M_{0.0r}$')
#ax1.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0]
[ax1.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['ABSMAGMIN'])]
ax2.hexbin(meta['Z'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, grlim)))
ax2.set_xlim(zlim)
ax2.set_ylim(grlim)
ax2.set_xlabel('Redshift')
ax2.set_ylabel(r'$^{0.0}(g - r)$')#, labelpad=-10)
#ax2.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
#ax2.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['COLORMAX'][0]-bins['COLORMIN'][0]
[ax2.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['COLORMIN'])]
hb = ax3.hexbin(phot['ABSMAG_R'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((Mrlim, grlim)))
ax3.set_xlabel(r'$M_{0.0r}$')
ax3.set_ylabel(r'$^{0.0}(g - r)$')#, labelpad=-10)
ax3.set_xlim(Mrlim)
ax3.set_ylim(grlim)
#ax3.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
if bins:
dx, dy = bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0], bins['COLORMAX'][0]-bins['COLORMIN'][0]
[ax3.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ABSMAGMIN'], bins['COLORMIN'])]
ax4.axis('off')
cax = fig.add_axes([0.49, 0.12, 0.02, 0.36])
#cax = fig.add_axes([0.54, 0.4, 0.35, 0.03])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, format=formatter, label='Number of Galaxies',
cax=cax)#, orientation='horizontal')
for aa in (ax1, ax2, ax3):
aa.grid(True)
plt.subplots_adjust(left=0.1, top=0.95, wspace=0.3, hspace=0.3, right=0.88, bottom=0.13)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def elg_obs(phot, png=None):
gobslim = (19.5, 24.5)
grobslim = (-1.2, 1.2)
rzobslim = (-1.5, 2.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharey=True)
ax1.hexbin(phot['RMAG']-phot['ZMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((rzobslim, grobslim)))
ax1.set_xlabel(r'$(r - z)_{\rm obs}$')
ax1.set_ylabel(r'$(g - r)_{\rm obs}$')
ax1.set_xlim(rzobslim)
ax1.set_ylim(grobslim)
hb = ax2.hexbin(phot['GMAG'], phot['GMAG']-phot['RMAG'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((gobslim, grobslim)))
ax2.set_xlabel(r'$g_{\rm obs}$')
ax2.set_ylim(grobslim)
ax2.set_xlim(gobslim)
cax = fig.add_axes([0.88, 0.12, 0.02, 0.83])
formatter = ticker.LogFormatter(10, labelOnlyBase=False)
fig.colorbar(hb, cax=cax, format=formatter, label='Number of Galaxies')
for aa in (ax1, ax2):
aa.grid(True)
plt.subplots_adjust(left=0.12, top=0.95, right=0.85, bottom=0.19, wspace=0.07)
if png:
log.info('Writing {}'.format(png))
fig.savefig(png)
plt.close()
def elg_rest(phot, meta, bins=None, png=None):
zlim = (0.5, 1.6)
Mglim = (-18, -25)
grlim = (-0.5, 1.0)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 10))
ax1.hexbin(meta['Z'], phot['ABSMAG_G'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent=np.hstack((zlim, Mglim)))
ax1.set_ylim(Mglim)
ax1.set_xlim(zlim)
ax1.set_xlabel('Redshift')
ax1.set_ylabel(r'$M_{0.0g}$')
ax1.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
if bins:
dx, dy = bins['ZOBJMAX'][0]-bins['ZOBJMIN'][0], bins['ABSMAGMAX'][0]-bins['ABSMAGMIN'][0]
[ax1.add_patch(Rectangle((xx, yy), dx, dy, facecolor='none', edgecolor='k'))
for xx, yy in zip(bins['ZOBJMIN'], bins['ABSMAGMIN'])]
ax2.hexbin(meta['Z'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=mincnt, bins='log', cmap=cmap,
#C=cat['weight'], reduce_C_function=np.sum,
extent= | np.hstack((zlim, grlim)) | numpy.hstack |
from __future__ import print_function
import numpy as np
import os
import pickle as pk
import pyfftw
from lensit.sims.sims_generic import hash_check
from lensit.misc.misc_utils import npy_hash, Freq
from lensit.misc.rfft2_utils import udgrade_rfft2, supersample
from lensit.pbs import pbs
class ell_mat:
"""Library helping with flat-sky patch discretization and harmonic mode structure.
This handles Fourier mode structure on the flat sky, at the given resolution and size of
specified rectangular box.
Args:
lib_dir: various things might be cached there
(unlens *cache* is set to 0, in which case only a hashdict is put there at instantiation)
shape(2-tuple): pair of int defining the number of pixels on each side of the box
lsides(2-tuple): physical size (in radians) of the box sides
cache(optional): if non-zero, a bunch of matrices might be cached to speed up some calculations.
"""
def __init__(self, lib_dir, shape, lsides, cache=1):
assert len(shape) == 2 and len(lsides) == 2
assert shape[0] % 2 == 0 and shape[1] % 2 == 0
assert shape[0] < 2 ** 16 and shape[1] < 2 ** 16
self.shape = tuple(shape)
self.rshape = (shape[0], shape[1] // 2 + 1)
self.lsides = tuple(lsides)
self.lib_dir = lib_dir
self.mmap_mode = None
self.cache=cache
fn_hash = os.path.join(lib_dir, "ellmat_hash.pk")
if pbs.rank == 0 and self.cache > 0:
if not os.path.exists(lib_dir): os.makedirs(lib_dir)
if not os.path.exists(fn_hash):
pk.dump(self.hash_dict(), open(fn_hash, 'wb'), protocol=2)
pbs.barrier()
if self.cache > 0:
hash_check(pk.load(open(fn_hash, 'rb')), self.hash_dict())
if pbs.rank == 0 and self.cache > 0 and not os.path.exists(os.path.join(self.lib_dir, 'ellmat.npy')):
print('ell_mat:caching ells in ' + os.path.join(self.lib_dir, 'ellmat.npy'))
np.save(os.path.join(self.lib_dir, 'ellmat.npy'), self._build_ellmat())
pbs.barrier()
self.ellmax = int(self._get_ellmax())
self._ell_counts = self._build_ell_counts()
self._nz_counts = self._ell_counts.nonzero()
def __eq__(self, other):
return self.shape == other.shape and self.lsides == self.lsides
def _build_ellmat(self):
kmin = 2. * np.pi / np.array(self.lsides)
ky2 = Freq(np.arange(self.shape[0]), self.shape[0]) ** 2 * kmin[0] ** 2
kx2 = Freq(np.arange(self.rshape[1]), self.shape[1]) ** 2 * kmin[1] ** 2
ones = np.ones(np.max(self.shape))
return self.k2ell(np.sqrt(np.outer(ky2, ones[0:self.rshape[1]]) + np.outer(ones[0:self.rshape[0]], kx2)))
def hash_dict(self):
return {'shape': self.shape, 'lsides': self.lsides}
@staticmethod
def k2ell(k):
r"""Mapping of 2d-frequency :math:`k` to multipole :math:`\ell`
:math:`\ell = \rm{int}\left(|k| - \frac 12 \right)`
"""
ret = np.uint16(np.round(k - 0.5) + 0.5 * ((k - 0.5) < 0))
return ret
def __call__(self, *args, **kwargs):
return self.get_ellmat(*args, **kwargs)
def __getitem__(self, item):
return self.get_ellmat()[item]
def get_pixwinmat(self):
r"""Pixel window function rfft array :math:`\sin(k_x l_{\rm cell, x} / 2) \sin (k_y l_{\rm cell, y} / 2 )`
"""
ky = (np.pi/self.shape[0]) * Freq(np.arange(self.shape[0]), self.shape[0])
ky[self.shape[0] // 2:] *= -1.
kx = (np.pi/self.shape[1]) * Freq(np.arange(self.rshape[1]), self.shape[1])
rety = np.sin(ky)
rety[1:] /= ky[1:];rety[0] = 1.
retx = | np.sin(kx) | numpy.sin |
#!/usr/bin/env python3
#
# Evolutionary Algorithms
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def check_dir(directory):
"""
:param directory: path to the directory
"""
os.makedirs(directory, exist_ok=True)
def sphere_test(data):
"""
:param data:
:return:
"""
f_x = np.sum(np.square(data), axis=-1)
return f_x
def rastrigin_test(data, A=10):
"""
:param data:
:param A:
:return:
"""
n = data.shape[1]
cos = np.cos(2 * np.pi * data)
e1 = np.square(data) - np.multiply(A, cos)
e2 = np.sum(e1, axis=-1)
return np.sum([A * n, e2])
def plot_2d_contour(obj_function):
"""
:param obj_function:
"""
x = np.linspace(-5, 5, 100)
y = np.linspace(-5, 5, 100)
X, Y = np.meshgrid(x, y)
data = np.dstack((X, Y))
S = obj_function(data)
plt.contour(X, Y, S)
def plot_fitness(out_dir, name, algo_name, x, y1, y2, title):
"""
(d) For each test function, plot the best and the worse fitness for each generation (averaged over 3 runs).
:param name:
:param x:
:param y1:
:param y2:
:param title:
"""
plt.figure()
plt.grid()
# Let x-axis be the generations and y-axis be the fitness values.
plt.plot(x, y1, label='avg_' + name.lower() + '_max')
plt.plot(x, y2, label='avg_' + name.lower() + '_min')
plt.xlabel('generations', fontsize=11)
plt.ylabel('fitness values', fontsize=11)
plt.gca().set_ylim(bottom=-70)
plt.annotate(round(y1[-1], 2), xy=(x[-1], y1[-1]), xycoords='data',
xytext=(-40, 15), size=10, textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
plt.annotate(round(y2[-1], 2), xy=(x[-1], y2[-1]), xycoords='data',
xytext=(-40, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
plt.legend()
plt.title(algo_name + '\n' + title, weight='bold', fontsize=12)
plt.savefig(out_dir + 'fitness.pdf')
plt.close()
def plot_generation(out_dir, name, i, iteration, min, obj_fun, sample):
"""
:param i:
:param iteration:
:param min:
:param obj_fun:
:param sample:
:return:
"""
if i % (iteration / 10) == 0:
plt.figure(1)
plt.clf()
plot_2d_contour(obj_fun)
plt.plot(sample[:, 0], sample[:, 1], 'ko')
plt.xlim([-5, 5])
plt.ylim([-5, 5])
plt.title(name.upper() + '\ngeneration: ' + str(i + 1) + '\nmin: ' + str(min[i]))
# plt.pause(0.1)
plt.savefig(out_dir + name + '-generation-contour-' + str(i) + '.pdf')
plt.close()
def cem(obj_fun, dim_domain, population_size, elite_set_ratio, learning_rate, iteration, out_dir, name, plot_generations):
"""
:param dim_domain:
:param population_size:
:param elite_set_ratio:
:param obj_fun:
:param iter:
:return mean:
"""
# Initialise parameters
# Note that you can uniformly sample the initial population parameters as long as they are reasonably far from
# the global optimum.
mean = np.random.uniform(-5, 5, dim_domain)
variance = np.random.uniform(4, 5, dim_domain)
max = np.zeros(iteration)
min = np.zeros(iteration)
for i in range(iteration):
# Obtain n sample from a normal distribution
sample = np.random.normal(mean, variance, [population_size, dim_domain])
# Evaluate objective function on an objective function
fitness = obj_fun(sample)
min[i] = np.min(fitness)
max[i] = np.max(fitness)
# Sort sample by objective function values in descending order
idx = np.argsort(fitness)
fittest = sample[idx]
# Elite set
p = | np.rint(population_size * elite_set_ratio) | numpy.rint |
import numpy as np
from MagniPy.lensdata import Data
import subprocess
import shutil
import scipy.ndimage.filters as sfilt
import itertools
from copy import deepcopy
def dr(x1,x2,y1,y2):
return np.sqrt((x1-x2)**2+(y1-y2)**2)
def snap_to_bins(data, xbin_centers, dx, ybin_centers, dy, ranges):
new_datax = deepcopy(data[:, 0])
new_datay = deepcopy(data[:, 1])
new_datax[np.where(new_datax <= ranges[0][0])] = xbin_centers[0]
new_datax[np.where(new_datax >= ranges[0][1])] = xbin_centers[-1]
new_datay[np.where(new_datay <= ranges[1][0])] = ybin_centers[0]
new_datay[np.where(new_datay >= ranges[1][1])] = ybin_centers[-1]
new_data = None
xx, yy = np.meshgrid(xbin_centers, ybin_centers)
coords = zip(np.round(xx.ravel(), 4), np.round(yy.ravel(), 4))
for i, (cenx, ceny) in enumerate(coords):
subx = np.absolute(new_datax - cenx) * dx ** -1
suby = np.absolute(new_datay - ceny) * dy ** -1
inds = np.where(np.logical_and(subx < 1, suby < 1))[0]
if len(inds) > 0:
new_array = np.column_stack((np.array([cenx] * len(inds)), np.array([ceny] * len(inds))))
if new_data is None:
new_data = deepcopy(new_array)
else:
new_data = np.vstack((new_data, new_array))
return new_data
def approx_theta_E(ximg,yimg):
dis = []
xinds,yinds = [0,0,0,1,1,2],[1,2,3,2,3,3]
for (i,j) in zip(xinds,yinds):
dx,dy = ximg[i] - ximg[j], yimg[i] - yimg[j]
dr = (dx**2+dy**2)**0.5
dis.append(dr)
dis = np.array(dis)
greatest = np.argmax(dis)
dr_greatest = dis[greatest]
dis[greatest] = 0
second_greatest = np.argmax(dis)
dr_second = dis[second_greatest]
return 0.5*(dr_greatest*dr_second)**0.5
def min_img_sep_ranked(ximg, yimg):
ximg, yimg = np.array(ximg), np.array(yimg)
d1 = dr(ximg[0], ximg[1:], yimg[0], yimg[1:])
d2 = dr(ximg[1], [ximg[0], ximg[2], ximg[3]], yimg[1],
[yimg[0], yimg[2], yimg[3]])
d3 = dr(ximg[2], [ximg[0], ximg[1], ximg[3]], yimg[2],
[yimg[0], yimg[1], yimg[3]])
d4 = dr(ximg[3], [ximg[0], ximg[1], ximg[2]], yimg[3],
[yimg[0], yimg[1], yimg[2]])
idx1 = np.argmin(d1)
idx2 = np.argmin(d2)
idx3 = np.argmin(d3)
idx4 = np.argmin(d4)
x_2, x_3, x_4 = [ximg[0], ximg[2], ximg[3]], [ximg[0], ximg[1], ximg[3]], [ximg[0], ximg[1], ximg[2]]
y_2, y_3, y_4 = [yimg[0], yimg[2], yimg[3]], [yimg[0], yimg[1], yimg[3]], [yimg[0], yimg[1], yimg[2]]
theta1 = np.arctan((yimg[1:][idx1] - yimg[0])/(ximg[1:][idx1] - ximg[0]))
theta2 = np.arctan((y_2[idx2] - yimg[1]) / (x_2[idx2] - ximg[1]))
theta3 = np.arctan((y_3[idx3] - yimg[2]) / (x_3[idx3] - ximg[2]))
theta4 = np.arctan((y_4[idx4] - yimg[3]) / (x_4[idx4] - ximg[3]))
return np.array([np.min(d1), np.min(d2), np.min(d3), np.min(d4)]), np.array([theta1, theta2,
theta3, theta4])
def min_img_sep(ximg,yimg):
assert len(ximg) == len(yimg)
dr = []
if len(ximg) == 1:
return 1
elif len(ximg) == 0:
return 1
try:
for i in range(0,int(len(ximg)-1)):
for j in range(i+1,int(len(ximg))):
dx = ximg[i] - ximg[j]
dy = yimg[i] - yimg[j]
dr.append((dx**2 + dy**2)**0.5)
return min(dr)
except:
print('problem with the fit...')
return 1
def sort_image_index(ximg,yimg,xref,yref):
assert len(xref) == len(ximg)
x_self = np.array(list(itertools.permutations(ximg)))
y_self = np.array(list(itertools.permutations(yimg)))
indexes = [0, 1, 2, 3]
index_iterations = list(itertools.permutations(indexes))
delta_r = []
for i in range(0, int(len(x_self))):
dr = 0
for j in range(0, int(len(x_self[0]))):
dr += (x_self[i][j] - xref[j]) ** 2 + (y_self[i][j] - yref[j]) ** 2
delta_r.append(dr ** .5)
min_indexes = np.array(index_iterations[np.argmin(delta_r)])
return min_indexes
def coordinates_inbox(box_dx,box_dy,centered_x,centered_y):
return np.logical_and(np.logical_and(-0.5*box_dx < centered_x, centered_x < 0.5*box_dx),
np.logical_and(-0.5*box_dy < centered_y, centered_y < 0.5*box_dy))
def confidence_interval(percentile,data):
data=np.array(data)
data.sort()
L = len(data)
counter = 0
while True:
value = data[counter]
if counter>=L*percentile:
break
counter+=1
return value
def quick_confidence(centers, heights, percentile):
total = np.sum(heights)
summ, index = 0, 0
while summ < total * percentile:
summ += heights[index]
index += 1
return centers[index-1]
def read_data(filename='',N=None):
with open(filename,'r') as f:
lines = f.readlines()
dsets = []
for line in lines:
line = line.split(' ')
n = int(line[0])
try:
srcx,srcy = float(line[1]),float(line[2])
except:
srcx,srcy = None,None
x1,x2,x3,x4,y1,y2,y3,y4 = float(line[3]),float(line[7]),float(line[11]),float(line[15]),float(line[4]),\
float(line[8]),float(line[12]),float(line[16])
m1,m2,m3,m4 = float(line[5]),float(line[9]),float(line[13]),float(line[17])
t1,t2,t3,t4 = float(line[6]),float(line[10]),float(line[14]),float(line[18])
dsets.append(Data(x=[x1,x2,x3,x4],y=[y1,y2,y3,y4],m=[m1,m2,m3,m4],
t=[t1,t2,t3,t4],source=[srcx,srcy]))
if N is not None and len(dsets)>=N:
break
return dsets
def write_fluxes(filename='',fluxes = [], mode='append',summed_in_quad=True):
if summed_in_quad:
fluxes = np.squeeze(fluxes)
with open(filename,'a') as f:
if isinstance(fluxes,float):
f.write(str(fluxes)+'\n')
else:
for val in fluxes:
f.write(str(val)+'\n')
return
fluxes = np.array(fluxes)
if mode == 'append':
m = 'a'
else:
m = 'w'
if fluxes.ndim == 1:
with open(filename, m) as f:
for val in fluxes:
f.write(str(val) + ' ')
f.write('\n')
else:
N = int(np.shape(fluxes)[0])
with open(filename,m) as f:
for n in range(0,N):
for val in fluxes[n,:]:
f.write(str(val)+' ')
f.write('\n')
def write_data(filename='',data_list=[],mode='append'):
def single_line(dset=classmethod):
lines = ''
lines += str(dset.nimg)+' '+str(dset.srcx)+' '+str(dset.srcy)+' '
for i in range(0,int(dset.nimg)):
for value in [dset.x[i],dset.y[i],dset.m[i],dset.t[i]]:
if value is None:
lines += '0 '
else:
lines += str(value)+' '
return lines+'\n'
if mode=='append':
with open(filename,'a') as f:
for dataset in data_list:
f.write(single_line(dataset))
else:
with open(filename,'w') as f:
for dataset in data_list:
f.write(single_line(dataset))
def integrate_profile(profname,limit,inspheres=False,**kwargs):
if profname=='nfw':
rs=kwargs['rs']
ks=kwargs['ks']
n=limit*rs**-1
if inspheres:
rho0 = 86802621404*ks*rs**-1
n*=rs
r200 = kwargs['c']*rs
return 4*np.pi*rho0*rs**3*(np.log(1+r200*n**-1)- n*(n+r200)**-1)
else:
return 2*np.pi*rs**2*ks*(np.log(.25*n**2)+2*np.arctanh(np.sqrt(1-n**2))*(np.sqrt(1-n**2))**-1)
elif profname=='SIE':
b = kwargs['SIE_Rein']
return np.pi*limit*b
def rotate(xcoords,ycoords,angle):
return xcoords*np.cos(angle)+ycoords*np.sin(angle),-xcoords*np.sin(angle)+ycoords*np.cos(angle)
def img_sept(x,y):
return np.sort(np.array([dr(x[0],x[1],y[0],y[1]),dr(x[0],x[2],y[0],y[2]),dr(x[0],x[3],y[0],y[3]),
dr(x[1],x[2],y[1],y[2]),dr(x[1],x[3],y[1],y[3]),dr(x[2],x[3],y[2],y[3])]))
def identify(x,y,RE):
separations = img_sept(x,y)
if separations[0] > RE:
return 0
if separations[1] <= 1.15*RE:
return 2
elif separations[0] <= 0.85*RE:
return 1
else:
return 0
def read_dat_file(fname):
x_srcSIE, y_srcSIE = [], []
with open(fname, 'r') as f:
nextline = False
dosrc = False
doimg = False
count = 0
readcount = 0
for line in f:
row = line.split(" ")
#print(row,fname)
#row_split = filter(None, row)
row_split = list(filter(None, row))
if row_split[0] == 'alpha':
macromodel = row_split
continue
if row_split[0] == 'Source':
nextline = True
dosrc = True
src = []
continue
if nextline and dosrc:
for item in row:
try:
src.append(float(item))
except ValueError:
continue
x_srcSIE.append(src[0])
y_srcSIE.append(src[1])
nextline = False
dosrc = False
continue
if row_split[0] == 'images:\n':
nextline = True
doimg = True
count = 0
x, y, f, t = [], [], [], []
continue
if nextline and doimg:
count += 1
numbers = []
for item in row:
try:
numbers.append(float(item))
except ValueError:
continue
x.append(numbers[4])
y.append(numbers[5])
f.append(numbers[6])
t.append(numbers[7])
if int(count) == 4:
t = np.array(t)
if min(t) < 0:
t += -1 * min(t)
xpos = x
ypos = y
fr = np.array(f)
tdel = np.array(t)
return xpos, ypos, fr, t, macromodel, [x_srcSIE[0], y_srcSIE[0]]
def read_gravlens_out(fnames):
vector = []
if isinstance(fnames,list):
for fname in fnames:
with open(fname, 'r') as f:
lines = f.readlines()
f.close()
imgline = lines[1].split(' ')
numimg = int(imgline[1])
xpos, ypos, mag, tdelay = [], [], [], []
for i in range(0, numimg):
data = lines[2 + i].split(' ')
data = filter(None, data)
xpos.append(float(data[0]))
ypos.append(float(data[1]))
mag.append(np.absolute(float(data[2])))
tdelay.append(float(data[3]))
vector.append([np.array(xpos), np.array(ypos), np.array(mag), np.array(tdelay), numimg])
else:
with open(fnames, 'r') as f:
lines = f.readlines()
f.close()
imgline = lines[1].split(' ')
numimg = int(imgline[1])
xpos, ypos, mag, tdelay = [], [], [], []
for i in range(0, numimg):
data = lines[2 + i].split(' ')
data = filter(None, data)
xpos.append(float(data[0]))
ypos.append(float(data[1]))
mag.append(np.absolute(float(data[2])))
tdelay.append(float(data[3]))
vector.append([np.array(xpos), np.array(ypos), np.array(mag), np.array(tdelay), numimg])
return vector
def read_chain_out(fname, N=1):
nimg, srcx, srcy, x1, y1, m1, t1, x2, y2, m2, t2, x3, y3, m3, t3, x4, y4, m4, t4 = np.loadtxt(fname, unpack=True)
return nimg, [srcx, srcy], [x1, x2, x3, x4], [y1, y2, y3, y4], [m1, m2, m3, m4], [t1, t2, t3, t4]
def polar_to_cart(ellip, theta, polar_to_cart = True):
xcomp = ellip*np.cos(2*theta*np.pi*180**-1)
ycomp = ellip*np.sin(2*theta*np.pi*180**-1)
return xcomp,ycomp
def cart_to_polar(e1, e2, polar_to_cart = True):
if e1==0:
return 0,0
else:
return np.sqrt(e1**2+e2**2),0.5*np.arctan2(e2,e1)*180*np.pi**-1
def array2image(array, nx=0, ny=0):
"""
returns the information contained in a 1d array into an n*n 2d array (only works when lenght of array is n**2)
:param array: image values
:type array: array of size n**2
:returns: 2d array
:raises: AttributeError, KeyError
"""
if nx == 0 or ny == 0:
n = int(np.sqrt(len(array)))
if n**2 != len(array):
raise ValueError("lenght of input array given as %s is not square of integer number!" %(len(array)))
nx, ny = n, n
image = array.reshape(int(nx), int(ny))
return image
def image2array(image):
"""
returns the information contained in a 2d array into an n*n 1d array
:param array: image values
:type array: array of size (n,n)
:returns: 1d array
:raises: AttributeError, KeyError
"""
nx, ny = image.shape # find the size of the array
imgh = np.reshape(image, nx*ny) # change the shape to be 1d
return imgh
def make_grid(numPix, deltapix, subgrid_res=1, left_lower=False):
"""
:param numPix: number of pixels per axis
:param deltapix: pixel size
:param subgrid_res: sub-pixel resolution (default=1)
:return: x, y position information in two 1d arrays
"""
numPix_eff = numPix*subgrid_res
deltapix_eff = deltapix/float(subgrid_res)
a = np.arange(numPix_eff)
matrix = np.dstack(np.meshgrid(a, a)).reshape(-1, 2)
if left_lower is True:
x_grid = matrix[:, 0]*deltapix
y_grid = matrix[:, 1]*deltapix
else:
x_grid = (matrix[:, 0] - (numPix_eff-1)/2.)*deltapix_eff
y_grid = (matrix[:, 1] - (numPix_eff-1)/2.)*deltapix_eff
shift = (subgrid_res-1)/(2.*subgrid_res)*deltapix
return array2image(x_grid - shift), array2image(y_grid - shift)
def filter_by_position(lens_components, x_filter=None, y_filter=None, mindis_front=0.5, mindis_back=0.3, log_masscut_low=7,
zmain=None, cosmology=None):
"""
:param xsub: sub x coords
:param ysub: sub y coords
:param x_filter: img x coords
:param y_filter: img y coords
:param mindis: max 2d distance
:return: filtered subhalos
"""
masscut_low = 10**log_masscut_low
keep_index = []
for index, deflector in enumerate(lens_components):
if not deflector.is_subhalo:
keep_index.append(index)
continue
if zmain >= deflector.redshift:
"""
for LOS halos; keep if it's rescaled position is near an image
"""
scale = np.ones_like(x_filter)
_mindis = mindis_front
else:
#zmain < deflector.redshift:
"""
for halos behind the main lens
"""
beta = cosmology.beta(deflector.redshift,zmain,cosmology.zsrc)
scale = np.ones_like(x_filter)*(1 - beta)
_mindis = mindis_back
#scale_mindis = 0.5
x, y = deflector.lenstronomy_args['center_x'], deflector.lenstronomy_args['center_y']
for i in range(0, len(x_filter)):
dr = ((x - x_filter[i]*scale[i]) ** 2 + (y - y_filter[i]*scale[i]) ** 2) ** .5
if dr <= _mindis or deflector.other_args['mass'] >= masscut_low:
keep_index.append(index)
break
newcomponents = [lens_components[i] for i in keep_index]
new_redshift_list = [lens_components[i].redshift for i in keep_index]
return newcomponents, new_redshift_list
def copy_directory(dirname,location):
shutil.copy(dirname,location)
def create_directory(dirname=''):
proc = subprocess.Popen(['mkdir', dirname])
proc.wait()
def delete_dir(dirname=''):
shutil.rmtree(dirname)
def rebin_image(image,factor):
if np.shape(image)[0]%factor != 0:
raise ValueError('size of image must be divisible by factor')
def rebin(a, shape):
sh = shape[0], a.shape[0] // shape[0], shape[1], a.shape[1] // shape[1]
return a.reshape(sh).mean(-1).mean(1)
size = int(np.shape(image)[0]*factor**-1)
return rebin(image,[size,size])
def convolve_image(image,kernel='Gaussian',scale=None):
if kernel == 'Gaussian':
grid = sfilt.gaussian_filter(image, scale * (2.355) ** -1, mode='constant', cval=0)
elif kernel == 'HST':
grid = sfilt.gaussian_filter(image, scale * (2.355) ** -1, mode='constant', cval=0)
return grid
def nfw_kr(X):
def f(x):
if isinstance(x, int) or isinstance(x, float):
if x > 1:
return np.arctan((x ** 2 - 1) ** .5) * (x ** 2 - 1) ** -.5
elif x < 1:
return np.arctanh((1 - x ** 2) ** .5) * (1 - x ** 2) ** -.5
else:
return 1
else:
inds1 = | np.where(x < 1) | numpy.where |
import pytest
from pytest import approx
import numpy as np
from sklearn.metrics.pairwise import rbf_kernel
from simforest.distance import rbf, sqeuclidean
from simforest._distance import rbf_sequential as crbf
from scipy.spatial.distance import sqeuclidean as refsqeuclidean
def test_rbf():
x1 = np.array([1, 0, 0])
x2 = np.array([1, 0, 0])
x3 = np.array([1, 0, 0])
# rbf(x, q) - rbf(x, p)
rbf1 = rbf(x1.reshape(1, -1), x2, x3)
rbf2 = rbf_kernel(np.vstack([x1, x3]))[1][0] - rbf_kernel(np.vstack([x1, x2]))[1][0]
assert rbf1[0] == approx(rbf2)
x1 = np.array([1, 1, 1])
x2 = np.array([1, 2, 2])
x3 = np.array([3, 2, 1])
rbf1 = rbf(x1.reshape(1, -1), x2, x3)
rbf2 = rbf_kernel(np.vstack([x1, x3]))[1][0] - rbf_kernel(np.vstack([x1, x2]))[1][0]
assert rbf1[0] == approx(rbf2)
x1 = np.array([0.1, 0.01, 1.5])
x2 = np.array([2.1, 0.82, 2.15])
x2 = np.array([5.1, 2.82, 3.15])
rbf1 = rbf(x1.reshape(1, -1), x2, x3)
rbf2 = rbf_kernel(np.vstack([x1, x3]))[1][0] - rbf_kernel(np.vstack([x1, x2]))[1][0]
assert rbf1[0] == approx(rbf2)
x1 = np.array([0.01, 0.001, 0.000015])
x2 = np.array([0.21, 0.082, 2.15])
x2 = np.array([7.21, 7.082, 1.15])
rbf1 = rbf(x1.reshape(1, -1), x2, x3)
rbf2 = rbf_kernel(np.vstack([x1, x3]))[1][0] - rbf_kernel(np.vstack([x1, x2]))[1][0]
assert rbf1[0] == approx(rbf2)
def test_crbf():
x1 = np.array([1, 0, 0], dtype=np.float32)
x2 = np.array([1, 0, 0], dtype=np.float32)
rbf1 = crbf(x1, x2)
rbf2 = rbf_kernel(np.vstack([x1, x2]))[1][0]
assert rbf1 == approx(rbf2)
x1 = np.array([1, 1, 1], dtype=np.float32)
x2 = np.array([1, 2, 2], dtype=np.float32)
rbf1 = crbf(x1, x2)
rbf2 = rbf_kernel(np.vstack([x1, x2]))[1][0]
assert rbf1 == approx(rbf2)
x1 = np.array([0.1, 0.01, 1.5], dtype=np.float32)
x2 = np.array([2.1, 0.82, 2.15], dtype=np.float32)
rbf1 = crbf(x1, x2)
rbf2 = rbf_kernel(np.vstack([x1, x2]))[1][0]
assert rbf1 == approx(rbf2)
x1 = np.array([0.01, 0.001, 0.000015], dtype=np.float32)
x2 = np.array([0.21, 0.082, 2.15], dtype=np.float32)
rbf1 = crbf(x1, x2)
rbf2 = rbf_kernel(np.vstack([x1, x2]))[1][0]
assert rbf1 == approx(rbf2)
def test_squclidean():
"""In my implementation, squared euclidean distance - base projection is calculated as dot(X, p - q).
This way, I don't compute the projections exactly, but the order of projected points doesn't change.
During this test, I make sure that 2 * dot(X, p - q) == sqeuclidean(X, q) - sqeuclidean(X, p).
"""
x = np.array([1, 0, 0])
p = np.array([1, 0, 0])
q = np.array([1, 0, 0])
res1 = 2 * sqeuclidean(x.reshape(1, -1), p, q) + np.dot(q, q) - np.dot(p, p)
res2 = refsqeuclidean(x, q) - refsqeuclidean(x, p)
assert res1[0] == approx(res2)
x = np.array([1, 1, 1])
p = np.array([1, 2, 2])
q = np.array([3, 2, 1])
res1 = 2 * sqeuclidean(x.reshape(1, -1), p, q) + np.dot(q, q) - np.dot(p, p)
res2 = refsqeuclidean(x, q) - refsqeuclidean(x, p)
assert res1[0] == approx(res2)
x = np.array([0.1, 0.01, 1.5])
p = np.array([2.1, 0.82, 2.15])
q = np.array([5.1, 2.82, 3.15])
res1 = 2 * sqeuclidean(x.reshape(1, -1), p, q) + np.dot(q, q) - np.dot(p, p)
res2 = refsqeuclidean(x, q) - refsqeuclidean(x, p)
assert res1[0] == approx(res2)
x = np.array([0.01, 0.001, 0.000015])
p = np.array([0.21, 0.082, 2.15])
q = np.array([7.21, 7.082, 1.15])
res1 = 2 * sqeuclidean(x.reshape(1, -1), p, q) + np.dot(q, q) - | np.dot(p, p) | numpy.dot |
# -*- coding: utf-8 -*-
"""
pipeline
data pipeline from image root folder to processed tensors of train test batches
for images and labels
"""
import os
import functools
import collections
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn import model_selection, preprocessing
def folder_traverse(root_dir, ext=None):
"""recusively map all image files from root directory"""
if not os.path.exists(root_dir):
raise RuntimeError('{0} doesn\'t exist.'.format(root_dir))
file_structure = collections.defaultdict(list)
for item in os.scandir(root_dir):
if item.is_dir():
file_structure.update(folder_traverse(item.path, ext))
elif item.is_file() and item.name.endswith(ext):
file_structure[os.path.dirname(item.path)].append(item.name)
return file_structure
def resample(feature_index, labels, balance='auto'):
"""use oversampling to balance class, after split of training set."""
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(ratio=balance)
feature_index = np.array(feature_index).reshape(-1, 1)
resampled_index, _ = ros.fit_sample(feature_index, labels)
resampled_index = [i for nested in resampled_index for i in nested]
return resampled_index
def generate_data_skeleton(root_dir,
ext=None,
valid_size=None,
oversample=False):
"""turn file structure into human-readable pandas dataframe"""
file_structure = folder_traverse(root_dir, ext=ext)
reversed_fs = {k + '/' + f: os.path.splitext(f)[0]
for k, v in file_structure.items() for f in v}
# find the first csv and load it in memory and remove it from dictionary
for key in reversed_fs:
if key.endswith('.csv'):
df_csv = pd.read_csv(key, dtype=np.str)
reversed_fs.pop(key)
break
df = pd.DataFrame.from_dict(data=reversed_fs, orient='index').reset_index()
df.rename(columns={'index': 'path_to_file', 0: 'filename'}, inplace=True)
df.reset_index(inplace=True, drop=True)
df = df_csv.merge(right=df,
how='left',
left_on='image_name',
right_on='filename').dropna(axis=0)
discrete_labels = [string.split(' ') for string in df['tags'].tolist()]
mlb = preprocessing.MultiLabelBinarizer()
mlb.fit(discrete_labels)
X = np.array(df['path_to_file'])
y = mlb.transform(discrete_labels)
X_codified = df['path_to_file'].index
y_codified = pd.Categorical(df['tags']).codes
if valid_size:
print('tags one-hot encoded: \n{0}'.format(mlb.classes_))
X_train_codified, X_valid_codified, y_train_codified,\
y_valid_codified = model_selection.train_test_split(
X_codified,
y_codified,
test_size=valid_size)
if oversample:
resampled_train_idx = resample(X_train_codified, y_train_codified)
resampled_valid_idx = resample(X_valid_codified, y_valid_codified)
X_train, y_train = X[resampled_train_idx], y[resampled_train_idx]
X_valid, y_valid = X[resampled_valid_idx], y[resampled_valid_idx]
print('To balance classes, training data has been oversampled'
' to: {0}'.format(len(resampled_train_idx) +
len(resampled_valid_idx)))
elif not oversample:
X_train, y_train = X[X_train_codified], y[X_train_codified]
X_valid, y_valid = X[X_valid_codified], y[X_valid_codified]
print('training: {0} samples; validation: {1} samples.'.format(
X_train.shape[0], X_valid.shape[0]))
return X_train, y_train, X_valid, y_valid
elif not valid_size:
print('test: {0} samples.'.format(X.shape[0]))
return X, y
def make_queue(paths_to_image, labels, num_epochs=None, shuffle=True):
"""returns an Ops Tensor with queued image and label pair"""
images = tf.convert_to_tensor(paths_to_image, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.float32)
input_queue = tf.train.slice_input_producer(
tensor_list=[images, labels],
num_epochs=num_epochs,
shuffle=shuffle)
return input_queue
def decode_transform(input_queue,
shape=None,
standardize=True,
augmentation=None):
"""a single decode and transform function that applies standardization with
mean centralisation."""
# input_queue allows slicing with 0: path_to_image, 1: encoded label
label_queue = input_queue[1]
image_queue = tf.read_file(input_queue[0])
# !!! decode_jpeg only accepts RGB jpg but not raising error for CMYK
original_image = tf.image.decode_jpeg(contents=image_queue, channels=0)
# original_image = tf.cast(tf.image.decode_png(contents=image_queue,
# channels=0,
# dtype=tf.uint16),
# tf.int16)
# crop larger images to 256*256, this func doesn't 'resize'.
cropped_img = tf.image.resize_image_with_crop_or_pad(
image=original_image,
target_height=256,
target_width=256)
# resize cropped images to desired shape
img = tf.image.resize_images(images=cropped_img, size=shape[:2])
img.set_shape(shape)
if augmentation:
img = tf.image.random_flip_up_down(img)
img = tf.image.random_flip_left_right(img)
img = tf.image.rot90(img, k= | np.random.randint(4) | numpy.random.randint |
import warnings
from numbers import Number
from typing import Iterable, List, Optional, Union
import numpy as np
from ..C import (
LEN_RGB,
LEN_RGBA,
RGB,
RGB_RGBA,
RGBA_ALPHA,
RGBA_MAX,
RGBA_MIN,
RGBA_WHITE,
)
from .clust_color import assign_colors_for_list
def process_result_list(results, colors=None, legends=None):
"""
Assign colors and legends to a list of results, check user provided lists.
Parameters
----------
results: list or pypesto.Result
list of pypesto.Result objects or a single pypesto.Result
colors: list, optional
list of RGBA colors
legends: str or list
labels for line plots
Returns
-------
results: list of pypesto.Result
list of pypesto.Result objects
colors: list of RGBA
One for each element in 'results'.
legends: list of str
labels for line plots
"""
# check how many results were passed
single_result = False
legend_error = False
if isinstance(results, list):
if len(results) == 1:
single_result = True
else:
single_result = True
results = [results]
# handle results according to their number
if single_result:
# assign colors and create list for later handling
if colors is None:
colors = [colors]
else:
colors = [np.array(colors)]
# create list of legends for later handling
if not isinstance(legends, list):
legends = [legends]
else:
# if more than one result is passed, we use one color per result
colors = assign_colors_for_list(len(results), colors)
# check whether list of legends has the correct length
if legends is None:
# No legends were passed: create some custom legends
legends = []
for i_leg in range(len(results)):
legends.append('Result ' + str(i_leg))
else:
# legends were passed by user: check length
if isinstance(legends, list):
if len(legends) != len(results):
legend_error = True
else:
legend_error = True
# size of legend list and size of results does not match
if legend_error:
raise ValueError(
'List of results passed and list of labels do '
'not have the same length but should. Stopping.'
)
return results, colors, legends
def process_offset_y(
offset_y: Optional[float], scale_y: str, min_val: float
) -> float:
"""
Compute offset for y-axis, depend on user settings.
Parameters
----------
offset_y:
value for offsetting the later plotted values, in order to ensure
positivity if a semilog-plot is used
scale_y:
Can be 'lin' or 'log10', specifying whether values should be plotted
on linear or on log10-scale
min_val:
Smallest value to be plotted
Returns
-------
offset_y: float
value for offsetting the later plotted values, in order to ensure
positivity if a semilog-plot is used
"""
# check whether the offset specified by the user is sufficient
if offset_y is not None:
if (scale_y == 'log10') and (min_val + offset_y <= 0.0):
warnings.warn(
"Offset specified by user is insufficient. "
"Ignoring specified offset and using "
+ str(np.abs(min_val) + 1.0)
+ " instead."
)
else:
return offset_y
else:
# check whether scaling is lin or log10
if scale_y == 'lin':
# linear scaling doesn't need any offset
return 0.0
return 1.0 - min_val
def process_y_limits(ax, y_limits):
"""
Apply user specified limits of y-axis.
Parameters
----------
ax: matplotlib.Axes, optional
Axes object to use.
y_limits: ndarray
y_limits, minimum and maximum, for current axes object
Returns
-------
ax: matplotlib.Axes, optional
Axes object to use.
"""
# apply y-limits, if they were specified by the user
if y_limits is not None:
y_limits = np.array(y_limits)
# check validity of bounds
if y_limits.size == 0:
y_limits = np.array(ax.get_ylim())
elif y_limits.size == 1:
# if the user specified only an upper bound
tmp_y_limits = ax.get_ylim()
y_limits = [tmp_y_limits[0], y_limits]
elif y_limits.size > 1:
y_limits = [y_limits[0], y_limits[1]]
# check validity of bounds if plotting in log-scale
if ax.get_yscale() == 'log' and y_limits[0] <= 0.0:
tmp_y_limits = ax.get_ylim()
if y_limits[1] <= 0.0:
y_limits = tmp_y_limits
warnings.warn(
"Invalid bounds for plotting in "
"log-scale. Using defaults bounds."
)
else:
y_limits = [tmp_y_limits[0], y_limits[1]]
warnings.warn(
"Invalid lower bound for plotting in "
"log-scale. Using only upper bound."
)
# set limits
ax.set_ylim(y_limits)
else:
# No limits passed, but if we have a result list: check the limits
ax_limits = np.array(ax.get_ylim())
data_limits = ax.dataLim.ymin, ax.dataLim.ymax
# Check if data fits to axes and adapt limits, if necessary
if ax_limits[0] > data_limits[0] or ax_limits[1] < data_limits[1]:
# Get range of data
data_range = data_limits[1] - data_limits[0]
if ax.get_yscale() == 'log':
data_range = np.log10(data_range)
new_limits = (
np.power(10, np.log10(data_limits[0]) - 0.02 * data_range),
np.power(10, | np.log10(data_limits[1]) | numpy.log10 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@authors: <NAME>, <NAME>
"""
import numpy as np
def parse_geometry(path):
""" Parse the input geometry file.
"""
f = open(path, 'r')
lines = f.readlines()
f.close()
vectors =[]
frac_coords = []
cartesian_coords = []
types = []
for i, line in enumerate(lines):
if "lattice_vector" in line:
ls = line.split()
vectors.append([float(ls[1]), float(ls[2]), float(ls[3])])
if "atom_frac" in line:
ls = line.split()
frac_coords.append([float(ls[1]), float(ls[2]), float(ls[3])])
types.append(str(ls[4]))
Cartesian=False
elif 'atom ' in line:
ls = line.split()
cartesian_coords.append([float(ls[1]), float(ls[2]), float(ls[3])])
types.append(str(ls[4]))
Cartesian=True
else:
continue
if Cartesian == True:
return vectors, cartesian_coords, types, Cartesian
else:
return vectors, frac_coords, types, Cartesian
def parse_homo_lumo(path):
""" Parse the indices of the HOMO and LUMO.
"""
for line in open(path):
# Start from k-position
if " 0.00000 " in line:
words = line.split()
n_energy=int(words[0])
break
n_HOMO = 4 + 2*(n_energy-1)-1
n_LUMO = 4 + 2*n_energy-1
return n_HOMO, n_LUMO
def parse_bands(files, band_id, nend=None):
""" Parse the energy bands from the `bandxxxx.out` files.
:Parameters:
files : list
List of band energy files.
band_id : int
Band index.
nend : int | None
Ending index of the energy band values.
"""
band = []
n_bands = len(files)
for i_band, filename in enumerate(files):
data = | np.loadtxt(filename) | numpy.loadtxt |
#################################################################################
# Copyright (c) 2018-2021, Texas Instruments Incorporated - http://www.ti.com
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################
# Also includes parts from: https://github.com/pytorch/vision
# License: License: https://github.com/pytorch/vision/blob/master/LICENSE
# BSD 3-Clause License
#
# Copyright (c) <NAME> 2016,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==============================================================================
# Some parts of the code are borrowed from: https://github.com/ansleliu/LightNet
# with the following license:
#
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Reference:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
“The Cityscapes Dataset for Semantic Urban Scene Understanding,”
in Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016.
https://www.cityscapes-dataset.com/
"""
import os
import numpy as np
import cv2
import json
from torch.utils import data
import sys
import warnings
from torchvision.edgeailite import xnn
###########################################
# config settings
def get_config():
dataset_config = xnn.utils.ConfigNode()
dataset_config.image_folders = ('leftImg8bit',)
dataset_config.input_offsets = None
dataset_config.load_segmentation = True
dataset_config.load_segmentation_five_class = False
return dataset_config
###########################################
class CityscapesBaseSegmentationLoader():
"""CityscapesLoader: Data is derived from CityScapes, and can be downloaded from here: https://www.cityscapes-dataset.com/downloads/
Many Thanks to @fvisin for the loader repo: https://github.com/fvisin/dataset_loaders/blob/master/dataset_loaders/images/cityscapes.py"""
colors = [
[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153],
[153, 153, 153],[250, 170, 30], [220, 220, 0],[107, 142, 35], [152, 251, 152],
[0, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70],
[0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32], [0, 0, 0]]
label_colours = dict(zip(range(19), colors))
void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
# class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',
# 'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',
# 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train',
# 'motorcycle', 'bicycle']
ignore_index = 255
class_map = dict(zip(valid_classes, range(19)))
num_classes_ = 19
class_weights_ = np.array([0.05570516, 0.32337477, 0.08998544, 1.03602707, 1.03413147, 1.68195437,
5.58540548, 3.56563995, 0.12704978, 1., 0.46783719, 1.34551528,
5.29974114, 0.28342531, 0.9396095, 0.81551811, 0.42679146, 3.6399074,
2.78376194], dtype=float)
@classmethod
def decode_segmap(cls, temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, cls.num_classes_):
r[temp == l] = cls.label_colours[l][0]
g[temp == l] = cls.label_colours[l][1]
b[temp == l] = cls.label_colours[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
@classmethod
def encode_segmap(cls, mask):
# Put all void classes to zero
for _voidc in cls.void_classes:
mask[mask == _voidc] = cls.ignore_index
for _validc in cls.valid_classes:
mask[mask == _validc] = cls.class_map[_validc]
return mask
@classmethod
def class_weights(cls):
return cls.class_weights_
###########################################
class CityscapesBaseSegmentationLoaderFiveClasses():
"""CityscapesLoader: Data is derived from CityScapes, and can be downloaded from here: https://www.cityscapes-dataset.com/downloads/
Many Thanks to @fvisin for the loader repo: https://github.com/fvisin/dataset_loaders/blob/master/dataset_loaders/images/cityscapes.py"""
colors = [ # [ 0, 0, 0],
[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [0, 0, 0]]
label_colours = dict(zip(range(5), colors))
void_classes = [-1, 255]
valid_classes = [0, 1, 2, 3, 4]
# class_names = ['road', 'sky', 'pedestrian', 'vehicle', 'background']
ignore_index = 255
class_map = dict(zip(valid_classes, range(5)))
num_classes_ = 5
class_weights_ = np.array([0.22567085, 1.89944273, 5.24032014, 1., 0.13516443], dtype=float)
@classmethod
def decode_segmap(cls, temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, cls.num_classes_):
r[temp == l] = cls.label_colours[l][0]
g[temp == l] = cls.label_colours[l][1]
b[temp == l] = cls.label_colours[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
@classmethod
def encode_segmap(cls, mask):
# Put all void classes to zero
for _voidc in cls.void_classes:
mask[mask == _voidc] = cls.ignore_index
for _validc in cls.valid_classes:
mask[mask == _validc] = cls.class_map[_validc]
return mask
@classmethod
def class_weights(cls):
return cls.class_weights_
###########################################
class CityscapesBaseMotionLoader():
"""CityscapesLoader: Data is derived from CityScapes, and can be downloaded from here: https://www.cityscapes-dataset.com/downloads/
Many Thanks to @fvisin for the loader repo: https://github.com/fvisin/dataset_loaders/blob/master/dataset_loaders/images/cityscapes.py"""
colors = [ # [ 0, 0, 0],
[0, 0, 0], [119, 11, 32]]
label_colours = dict(zip(range(2), colors))
void_classes = []
valid_classes = [0, 255]
# class_names = ['static', 'moving']
ignore_index = 255
class_map = dict(zip(valid_classes, range(2)))
num_classes_ = 2
class_weights_ = np.array([0.05, 0.95], dtype=float) #Calculated weights based on mdeian_frequenncy = [ 0.51520306, 16.94405377]
@classmethod
def decode_segmap(cls, temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, cls.num_classes_):
r[temp == l] = cls.label_colours[l][0]
g[temp == l] = cls.label_colours[l][1]
b[temp == l] = cls.label_colours[l][2]
#
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
@classmethod
def encode_segmap(cls, mask):
for _validc in cls.valid_classes:
mask[mask == _validc] = cls.class_map[_validc]
# Put all void classes to zero
for _voidc in cls.void_classes:
mask[mask == _voidc] = cls.ignore_index
return mask
@classmethod
def class_weights(cls):
return cls.class_weights_
###########################################
class CityscapesDataLoader(data.Dataset):
def __init__(self, dataset_config, root, split="train", gt="gtFine", transforms=None, image_folders=('leftImg8bit',),
search_images=False, load_segmentation=True, load_depth=False, load_motion=False, load_flow=False,
load_segmentation_five_class=False, inference=False, additional_info=False, input_offsets=None):
super().__init__()
if split not in ['train', 'val', 'test']:
warnings.warn(f'unknown split specified: {split}')
#
self.root = root
self.gt = gt
self.split = split
self.transforms = transforms
self.image_folders = image_folders
self.search_images = search_images
self.files = {}
self.additional_info = additional_info
self.load_segmentation = load_segmentation
self.load_segmentation_five_class = load_segmentation_five_class
self.load_depth = load_depth
self.load_motion = load_motion
self.load_flow = load_flow
self.inference = inference
self.input_offsets = input_offsets
self.image_suffix = (self.image_folders[-1]+'.png') #'.png'
self.image_suffix = self.image_suffix.replace('leftImg8bit_sequence.png', 'leftImg8bit.png')
self.segmentation_suffix = self.gt+'_labelIds.png' #'.png'
if self.load_segmentation_five_class:
self.segmentation_suffix = self.gt+'_labelTrainIds.png'
self.disparity_suffix = 'disparity.png'
self.motion_suffix = self.gt+'_labelTrainIds_motion.png' #'.png'
self.image_base = os.path.join(self.root, image_folders[-1], self.split)
self.segmentation_base = os.path.join(self.root, gt, self.split)
self.disparity_base = os.path.join(self.root, 'disparity', self.split)
self.cameracalib_base = os.path.join(self.root, 'camera', self.split)
self.motion_base = os.path.join(self.root, gt, self.split)
if self.search_images:
self.files = xnn.utils.recursive_glob(rootdir=self.image_base, suffix=self.image_suffix)
else:
self.files = xnn.utils.recursive_glob(rootdir=self.segmentation_base, suffix=self.segmentation_suffix)
#
self.files = sorted(self.files)
if not self.files:
raise Exception("> No files for split=[%s] found in %s" % (split, self.segmentation_base))
#
self.image_files = [None] * len(image_folders)
for image_idx, image_folder in enumerate(image_folders):
image_base = os.path.join(self.root, image_folder, self.split)
self.image_files[image_idx] = sorted(xnn.utils.recursive_glob(rootdir=image_base, suffix='.png'))
assert len(self.image_files[image_idx]) == len(self.image_files[0]), 'all folders should have same number of files'
#
def __len__(self):
return len(self.files)
def __getitem__(self, index):
if self.search_images:
image_path = self.files[index].rstrip()
self.check_file_exists(image_path)
segmentation_path = image_path.replace(self.image_base, self.segmentation_base).replace(self.image_suffix, self.segmentation_suffix)
else:
segmentation_path = self.files[index].rstrip()
self.check_file_exists(segmentation_path)
image_path = segmentation_path.replace(self.segmentation_base, self.image_base).replace(self.segmentation_suffix, self.image_suffix)
#
images = []
images_path = []
for image_idx, image_folder in enumerate(self.image_folders):
sys.stdout.flush()
this_image_path = self.image_files[image_idx][index].rstrip()
if image_idx == (len(self.image_folders)-1):
assert this_image_path == image_path, 'image file name error'
#
self.check_file_exists(this_image_path)
img = cv2.imread(this_image_path)[:,:,::-1]
if self.input_offsets is not None:
img = img - self.input_offsets[image_idx]
#
images.append(img)
images_path.append(this_image_path)
#
targets = []
targets_path = []
if self.load_flow and (not self.inference):
flow_zero = np.zeros((images[0].shape[0],images[0].shape[1],2), dtype=np.float32)
targets.append(flow_zero)
if self.load_depth and (not self.inference):
disparity_path = image_path.replace(self.image_base, self.disparity_base).replace(self.image_suffix, self.disparity_suffix)
self.check_file_exists(disparity_path)
depth = self.depth_loader(disparity_path)
targets.append(depth)
#
if self.load_segmentation and (not self.inference):
lbl = cv2.imread(segmentation_path,0)
lbl = CityscapesBaseSegmentationLoader.encode_segmap(np.array(lbl, dtype=np.uint8))
targets.append(lbl)
targets_path.append(segmentation_path)
#
elif self.load_segmentation_five_class and (not self.inference):
lbl = cv2.imread(segmentation_path,0)
lbl = CityscapesBaseSegmentationLoaderFiveClasses.encode_segmap(np.array(lbl, dtype=np.uint8))
targets.append(lbl)
targets_path.append(segmentation_path)
if self.load_motion and (not self.inference):
motion_path = image_path.replace(self.image_base, self.motion_base).replace(self.image_suffix, self.motion_suffix)
self.check_file_exists(motion_path)
motion = cv2.imread(motion_path,0)
motion = CityscapesBaseMotionLoader.encode_segmap(np.array(motion, dtype=np.uint8))
targets.append(motion)
#
#targets = np.stack(targets, axis=2)
if (self.transforms is not None):
images, targets = self.transforms(images, targets)
#
if self.additional_info:
return images, targets, images_path, targets_path
else:
return images, targets
#
def decode_segmap(self, lbl):
if self.load_segmentation:
return CityscapesBaseSegmentationLoader.decode_segmap(lbl)
elif self.load_segmentation_five_class:
return CityscapesBaseSegmentationLoaderFiveClasses.decode_segmap(lbl)
else:
return CityscapesBaseMotionLoader.decode_segmap(lbl)
#
def check_file_exists(self, file_name):
if not os.path.exists(file_name) or not os.path.isfile(file_name):
raise Exception("{} is not a file, can not open with imread.".format(file_name))
#
def depth_loader(self, disparity_path):
eps = (1e-6)
disparity_range = (eps, 255.0)
depth_range = (1.0, 255.0)
disp = cv2.imread(disparity_path, cv2.IMREAD_UNCHANGED)
disp_valid = (disp > 0) # 0 values have to be ignored
disp = ((disp - 1.0)/256.0)
# convert to depth
calib_path = disparity_path.replace(self.disparity_base, self.cameracalib_base).replace(self.disparity_suffix, 'camera.json')
with open(calib_path) as fp:
cameracalib = json.load(fp)
extrinsic = cameracalib['extrinsic']
intrinsic = cameracalib['intrinsic']
focal_len = intrinsic['fx']
proj = (focal_len * extrinsic['baseline'])
depth = np.divide(proj, disp, out=np.zeros_like(disp), where=(disp!=0))
d_out = | np.clip(depth, depth_range[0], depth_range[1]) | numpy.clip |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
class RadarAxes(PolarAxes):
name = 'radar'
def __init__(self, figure=None, rect=None, spoke_count=0, radar_patch_type="polygon", radar_spine_type="circle", *args, **kwargs):
resolution = kwargs.pop("resolution", 1)
self.spoke_count = spoke_count
self.radar_patch_type = radar_patch_type
self.radar_spine_type = radar_spine_type
if figure == None:
figure = plt.gcf()
if rect == None:
rect = figure.bbox_inches
self.radar_theta = (
1.75 * np.pi *
np.linspace(0, 1 - 1.0 / self.spoke_count, self.spoke_count))
self.radar_theta += np.pi / 2
super(RadarAxes, self).__init__(figure, rect, *args, **kwargs)
def draw_patch(self):
if self.radar_patch_type == "polygon":
return self.draw_poly_patch()
elif self.radar_patch_type == "circle":
return draw_circle_patch()
def draw_poly_patch(self):
verts = unit_poly_verts(self.radar_theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def fill(self, *args, **kwargs):
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(self.radar_theta * 180 / np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if self.radar_patch_type == 'circle':
return PolarAxes._gen_axes_spines(self)
spine_type = 'circle'
verts = unit_poly_verts(self.radar_theta)
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, self.radar_spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
def _as_mpl_axes(self):
return RadarAxes, {"spoke_count": self.spoke_count,
"radar_patch_type": self.radar_patch_type,
"radar_spine_type": self.radar_spine_type}
def draw_circle_patch(self):
return plt.Circle((0.5, 0.5), 0.5)
def unit_poly_verts(theta):
x0, y0, r = [0.5] * 3
verts = [(r * | np.cos(t) | numpy.cos |
import numpy as np
import h5py
import math
import warnings
def zero_one(data_column):
# ((x - x_min)/(x_max - x_min))
copy = np.empty_like(data_column)
copy[:] = data_column
max = np.nanmax(copy)
min = np.nanmin(copy)
def norm(n):
if not np.isnan(n):
if max == min:
return 0
else:
return (n - min) / (max - min)
return n
vfunc = np.vectorize(norm)
return vfunc(copy)
def percent_change(data_column):
# (x[i]): ((x[i] - x[i-1])/x[i-1]); if i == 0: x[i] = 0
copy = np.empty_like(data_column)
copy[:] = data_column
n = np.empty_like(data_column)
n[:] = data_column
try:
start_point = np.argwhere(np.isnan(copy))[-1][0] + 1
except IndexError:
start_point = 0
for i in range(start_point, len(copy)):
if i == start_point:
copy[i] = 0
else:
if n[i - 1] == 0:
copy[i] = 0
else:
copy[i] = ((n[i] - n[i - 1]) / n[i - 1])
return copy
def normal_dist(data_column):
# (x - x_mean)/x_std
copy = np.empty_like(data_column)
copy[:] = data_column
mean = np.nanmean(copy)
std = np.nanstd(copy)
def norm(n):
if not np.isnan(n):
if std == 0:
return 0
return (n - mean) / std
return n
vfunc = np.vectorize(norm)
return vfunc(copy)
def linear_residual(data_column, second_norm=None):
# line from first to last for column, generate inference_function
# x - inference_function(x)
copy = np.empty_like(data_column)
copy[:] = data_column
y = copy[~np.isnan(copy)]
x = np.arange(1, len(y) + 1).astype(np.float32)
line = np.polyfit(x, y, 1)
def norm(n):
if not np.isnan(n):
return n - ((line[0] * n) + line[1])
return n
vfunc = np.vectorize(norm)
first_norm = vfunc(copy)
if second_norm is not None:
return second_norm(first_norm).astype(np.float32)
else:
return first_norm.astype(np.float32)
def exp_residual(data_column, second_norm=None):
# CAGR for column, generate inference_function
# x - inference_function(x)
copy = np.empty_like(data_column)
copy[:] = data_column
year_dif = int(copy.shape[0] / 12)
def cagr_func(f, l, n):
if f == 0:
return 0
return math.pow((l / f), (1 / n)) - 1
first = copy[0]
last = copy[-1]
if first == 0:
first = np.where(copy != 0)[0][0]
if first < 0 < last:
first = np.where(copy > 0)[0][0]
if first > 0 > last:
last = np.where(copy > 0)[0][-1]
cagr = cagr_func(first, last, year_dif)
def norm(n):
if not np.isnan(n):
return n - cagr
return n
vfunc = np.vectorize(norm)
first_norm = vfunc(copy)
if second_norm is not None:
return second_norm(first_norm).astype(np.float32)
else:
return first_norm.astype(np.float32)
def gdp_residual(data_column, gdp_data, second_norm=None):
# (x[i]): ((x[i] - x[i-1])/x[i-1]); if i == 0: x[i] = 0
# x[i] - ((gdp[i] - gdp[i-1])/gdp[i-1]); if i == 0: gdp[i] = 0
copy = np.empty_like(data_column)
copy[:] = data_column
n = percent_change(copy)
gdp = gdp_data
try:
start_point = np.argwhere(np.isnan(copy))[-1][0] + 1
except IndexError:
start_point = 0
for i in range(start_point, len(copy)):
if i == start_point:
copy[i] = 0
else:
if gdp[i - 1] == 0:
copy[i] = 0
else:
copy[i] = (n[i] - ((gdp[i] - gdp[i - 1]) / gdp[i - 1]))
if second_norm is not None:
return second_norm(copy).astype(np.float32)
else:
return copy.astype(np.float32)
def normalize_dataset(dataset, norm_fn, second_norm=None):
"""
In pseudocode:
If norm_fn is one of the residuals, use lambda to create a new function that takes only data_column as input.
Then use np.apply_along_axis to map norm_fn across the vertical axis of the dataset.
Return the new array
"""
copy = np.empty_like(dataset)
copy[:] = dataset
# hdf5 file with gdp data
hdf5 = h5py.File('FREDcast.hdf5')
gdp = np.asarray(hdf5['admin/gdp'])
hdf5.close()
if norm_fn is not None:
for i in range(0, copy.shape[1]):
if not np.count_nonzero(np.isnan(copy[:, i])) == copy[:, i].size:
if norm_fn == gdp_residual:
if second_norm is None:
copy[:, i] = gdp_residual(copy[:, i], gdp)
else:
copy[:, i] = gdp_residual(copy[:, i], gdp, second_norm)
else:
if second_norm is None:
copy[:, i] = norm_fn(copy[:, i])
else:
copy[:, i] = norm_fn(copy[:, i], second_norm)
return copy
else:
raise ValueError('Missing dataset value!')
if __name__ == '__main__':
import unittest
class UnitTester(unittest.TestCase):
def setUp(self):
pass
def test_zero_one(self):
test_data_column = np.array([1, 2, 3, 4, 5], dtype=np.float32)
solution = np.array([0, 0.25, 0.5, 0.75, 1], dtype=np.float32)
test_result = zero_one(test_data_column)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
def test_percent_change(self):
test_data_column = np.array([1, 2, 3, 4, 5], dtype=np.float32)
solution = np.array([0, 1, 0.5, 0.33333334, 0.25], dtype=np.float32)
test_result = percent_change(test_data_column)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
def test_normal_dist(self):
test_data_column = np.array([1, 2, 3, 4, 5], dtype=np.float32)
solution = np.array([-1.414214, -0.707107, 0, 0.707107, 1.414214], dtype=np.float32)
test_result = normal_dist(test_data_column)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=4)
def test_linear_residual(self):
test_data_column_a = np.array([1, 5, 7, 8, 11], dtype=np.float32)
solution = np.array([-0.8, -6, -8.6, -9.9, -13.8], dtype=np.float32)
test_result = linear_residual(test_data_column_a, second_norm=None)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
test_data_column_b = np.array([1, 5, 7, 8, 11], dtype=np.float32)
solution = np.array([1, 0.6, 0.4, 0.3, 0], dtype=np.float32)
test_result = linear_residual(test_data_column_b, second_norm=zero_one)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
test_data_column_c = np.array([1, 5, 7, 8, 11], dtype=np.float32)
solution = np.array([0, 6.5, 0.433333, 0.151163, 0.393939], dtype=np.float32)
test_result = linear_residual(test_data_column_c, second_norm=percent_change)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=14)
test_data_column_d = np.array([1, 5, 7, 8, 11], dtype=np.float32)
solution = np.array([1.625209, 0.42135, -0.180579, -0.481543, -1.38447], dtype=np.float32)
test_result = linear_residual(test_data_column_d, second_norm=normal_dist)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=274)
def test_exp_residual(self):
test_data_column_a = np.array([1, 5, 7, 8, 11], dtype=np.float32)
solution = np.array([-313.749329, -309.749329, -307.749329, -306.749329, -303.749329], dtype=np.float32)
test_result = exp_residual(test_data_column_a, second_norm=None)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
test_data_column_b = np.array([1, 5, 7, 8, 11], dtype=np.float32)
solution = np.array([0, 0.4, 0.6, 0.7, 1], dtype=np.float32)
test_result = exp_residual(test_data_column_b, second_norm=zero_one)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
test_data_column_c = np.array([1, 5, 7, 8, 11], dtype=np.float32)
solution = np.array([0, -0.012749, -0.006457, -0.003249, -0.00978], dtype=np.float32)
test_result = exp_residual(test_data_column_c, second_norm=percent_change)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=1708)
test_data_column_d = np.array([1, 5, 7, 8, 11], dtype=np.float32)
solution = np.array([-1.625209, -0.42135, 0.180579, 0.481543, 1.384437], dtype=np.float32)
test_result = exp_residual(test_data_column_d, second_norm=normal_dist)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=16)
def test_gdp_residual(self):
hdf5 = h5py.File('FREDcast.hdf5')
gdp = np.asarray(hdf5['admin/gdp'])
hdf5.close()
test_data_column_a = np.array([1000, 2000, 4000, 3500, 4500], dtype=np.float32)
solution = np.array([0, 1, 1, -0.139243, 0.285714], dtype=np.float32)
test_result = gdp_residual(test_data_column_a, gdp, second_norm=None)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=26)
test_data_column_b = np.array([1000, 2000, 4000, 3500, 4500], dtype=np.float32)
solution = np.array([0.122224, 1, 1, 0, 0.373017], dtype=np.float32)
test_result = gdp_residual(test_data_column_b, gdp, second_norm=zero_one)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=26)
test_data_column_c = np.array([1000, 2000, 4000, 3500, 4500], dtype=np.float32)
solution = np.array([0, 0, 0, -1.139243, -3.051917], dtype=np.float32)
test_result = gdp_residual(test_data_column_c, gdp, second_norm=percent_change)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=3)
test_data_column_d = np.array([1000, 2000, 4000, 3500, 4500], dtype=np.float32)
solution = np.array([-0.883856, 1.175002, 1.175002, -1.170537, -0.295611], dtype=np.float32)
test_result = gdp_residual(test_data_column_d, gdp, second_norm=normal_dist)
self.assertEqual(test_result.shape, (5,))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=5)
def test_normalize_dataset(self):
test_data_column_1 = np.array([10, 20, 30], dtype=np.float32)
test_data_column_1 = test_data_column_1.reshape(test_data_column_1.size, 1)
test_data_column_2 = np.array([20, 30, 40], dtype=np.float32)
test_data_column_2 = test_data_column_2.reshape(test_data_column_2.size, 1)
test_dataset_a = np.hstack((test_data_column_1, test_data_column_2))
# linear residual
solution_1 = np.array([-90, -180, -270], dtype=np.float32)
solution_1 = solution_1.reshape(solution_1.size, 1)
solution_2 = np.array([-190, -280, -370], dtype=np.float32)
solution_2 = solution_2.reshape(solution_2.size, 1)
solution = np.hstack((solution_1, solution_2))
self.assertRaises(ValueError, lambda: normalize_dataset(test_dataset_a, None))
test_result = normalize_dataset(test_dataset_a, linear_residual)
self.assertEqual(test_result.shape, (3, 2))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
# linear residual + zero_one
test_dataset_b = np.hstack((test_data_column_1, test_data_column_2))
solution_1 = np.array([1, 0.5, 0], dtype=np.float32)
solution_1 = solution_1.reshape(solution_1.size, 1)
solution_2 = np.array([1, 0.5, 0], dtype=np.float32)
solution_2 = solution_2.reshape(solution_2.size, 1)
solution = np.hstack((solution_1, solution_2))
test_result = normalize_dataset(test_dataset_b, linear_residual, zero_one)
self.assertEqual(test_result.shape, (3, 2))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
# linear residual + percent_change
test_dataset_c = np.hstack((test_data_column_1, test_data_column_2))
solution_1 = np.array([0, 1, 0.5], dtype=np.float32)
solution_1 = solution_1.reshape(solution_1.size, 1)
solution_2 = np.array([0, 0.473684, 0.321429], dtype=np.float32)
solution_2 = solution_2.reshape(solution_2.size, 1)
solution = np.hstack((solution_1, solution_2))
test_result = normalize_dataset(test_dataset_c, linear_residual, percent_change)
self.assertEqual(test_result.shape, (3, 2))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=15)
# linear residual + normal_dist
test_dataset_d = np.hstack((test_data_column_1, test_data_column_2))
solution_1 = np.array([1.224745, -3.867705e-16, -1.224745], dtype=np.float32)
solution_1 = solution_1.reshape(solution_1.size, 1)
solution_2 = np.array([1.224745, 0, -1.224745], dtype=np.float32)
solution_2 = solution_2.reshape(solution_2.size, 1)
solution = np.hstack((solution_1, solution_2))
test_result = normalize_dataset(test_dataset_d, linear_residual, normal_dist)
self.assertEqual(test_result.shape, (3, 2))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_almost_equal_nulp(test_result, solution, nulp=1)
# exp residual
test_dataset_e = np.hstack((test_data_column_1, test_data_column_2))
solution_1 = np.array([-70, -60, -50], dtype=np.float32)
solution_1 = solution_1.reshape(solution_1.size, 1)
solution_2 = np.array([5, 15, 25], dtype=np.float32)
solution_2 = solution_2.reshape(solution_2.size, 1)
solution = np.hstack((solution_1, solution_2))
self.assertRaises(ValueError, lambda: normalize_dataset(test_dataset_a, None))
test_result = normalize_dataset(test_dataset_e, exp_residual)
self.assertEqual(test_result.shape, (3, 2))
self.assertEqual(test_result.dtype, np.float32)
np.testing.assert_array_equal(test_result, solution)
# exp residual + zero_one
test_dataset_f = np.hstack((test_data_column_1, test_data_column_2))
solution_1 = np.array([0, 0.5, 1], dtype=np.float32)
solution_1 = solution_1.reshape(solution_1.size, 1)
solution_2 = np.array([0, 0.5, 1], dtype=np.float32)
solution_2 = solution_2.reshape(solution_2.size, 1)
solution = np.hstack((solution_1, solution_2))
test_result = normalize_dataset(test_dataset_f, exp_residual, zero_one)
self.assertEqual(test_result.shape, (3, 2))
self.assertEqual(test_result.dtype, np.float32)
| np.testing.assert_array_equal(test_result, solution) | numpy.testing.assert_array_equal |
#!/usr/bin/env python
# coding: utf-8
# Solve differential flatness and check feasibility of control command
# Use NED coordinate
import os, sys, time, copy, yaml
import numpy as np
from .utils import *
# import cupy as cp
class QuadModel:
def __init__(self, cfg_path=None, drone_model=None):
if cfg_path == None:
curr_path = os.path.dirname(os.path.abspath(__file__))
cfg_path = curr_path+"/../config/multicopter_model.yaml"
if drone_model == None:
drone_model="default"
with open(cfg_path, 'r') as stream:
try:
cfg = yaml.safe_load(stream)
self.thrustCoef = np.double(cfg['motor_model']['thrust_coefficient'])
self.torqueCoef = np.double(cfg['motor_model']['torque_coefficient'])
self.armLength = np.double(cfg['motor_model']['moment_arm'])
self.mass = np.double(cfg['uav_model'][drone_model]['vehicle_mass'])
self.Ixx = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_xx'])
self.Iyy = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_yy'])
self.Izz = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_zz'])
self.w_max = np.double(cfg['motor_model']['max_prop_speed'])
self.w_min = np.double(cfg['motor_model']['min_prop_speed'])
self.gravity = np.double(cfg['simulation']['gravity'])
self.w_sta = np.sqrt(self.mass*self.gravity/self.thrustCoef/4.0)
except yaml.YAMLError as exc:
print(exc)
lt = self.armLength*self.thrustCoef
k0 = self.torqueCoef
k1 = self.thrustCoef
self.G1 = np.array([[lt,-lt,-lt,lt],\
[lt,lt,-lt,-lt],\
[-k0,k0,-k0,k0],\
[-k1,-k1,-k1,-k1]])
self.J = np.diag(np.array([self.Ixx,self.Iyy,self.Izz]))
return
def getWs(self, status):
pos = np.array(status[0:3])
vel = np.array(status[3:6])
acc = np.array(status[6:9])
jer = np.array(status[9:12])
sna = np.array(status[12:15])
yaw = status[15]
dyaw = status[16]
ddyaw = status[17]
# Total thrust
tau_v = acc - np.array([0,0,self.gravity])
tau = -np.linalg.norm(tau_v)
bz = tau_v/tau
Thrust = self.mass*tau
# roll & pitch
roll = np.arcsin(np.dot(bz,[np.sin(yaw),-np.cos(yaw),0]))
pitch = np.arctan(np.dot(bz,[np.cos(yaw),np.sin(yaw),0])/bz[2])
bx = np.array([np.cos(yaw)*np.cos(pitch),np.sin(yaw)*np.cos(pitch),-np.sin(pitch)])
by = np.array([-np.sin(yaw)*np.cos(roll)+np.cos(yaw)*np.sin(pitch)*np.sin(roll),\
np.cos(yaw)*np.cos(roll)+np.sin(yaw)*np.sin(pitch)*np.sin(roll),\
np.cos(pitch)*np.sin(roll)])
# dzhi & Omega
dzhi = np.dot(np.array([-1*by,bx/np.cos(roll),np.zeros(3)]),jer)/tau \
+np.array([np.sin(pitch),-np.cos(pitch)*np.tan(roll),1])*dyaw
S_inv = np.array([[1,0,-np.sin(pitch)],\
[0,np.cos(roll),np.cos(pitch)*np.sin(roll)],\
[0,-np.sin(roll),np.cos(pitch)*np.cos(roll)]])
Omega = np.dot(S_inv,dzhi)
C_inv = np.array([-1*by/tau,bx/np.cos(roll)/tau,bz])
d = np.array([np.cos(yaw)*np.sin(roll)-np.cos(roll)*np.sin(yaw)*np.sin(pitch),\
np.sin(yaw)*np.sin(roll)+np.cos(roll)*np.cos(yaw)*np.sin(pitch),0])*tau
dtau = np.dot(bz,jer-dyaw*d)
# ddzhi & dOmega
dS = np.array([[0,np.cos(roll)*np.tan(pitch),-np.sin(roll)*np.tan(pitch)],\
[0,-np.sin(roll),-np.cos(roll)],\
[0,np.cos(roll)/np.cos(pitch),-np.sin(roll)/np.cos(pitch)]])*dzhi[0]\
+np.array([[0,np.sin(roll)/np.cos(pitch)/np.cos(pitch),np.cos(roll)/np.cos(pitch)/np.cos(pitch)],\
[0,0,0],\
[0,np.sin(roll)*np.tan(pitch)/np.cos(pitch),np.cos(roll)*np.tan(pitch)/np.cos(pitch)]])*dzhi[1]
e = 2*dtau*np.dot(np.array([-1*by,bx,0]).T,Omega)\
+tau*np.dot(np.array([bx,by,bz]).T,np.array([Omega[0]*Omega[2],Omega[1]*Omega[2],-Omega[0]*Omega[0]-Omega[1]*Omega[1]]))\
-tau*np.dot(np.array([-1*by,bx,0]).T,np.dot(S_inv,np.dot(dS,Omega)))
ddzhi = np.dot(C_inv,sna-ddyaw*d-e)
ddzhi[2] = ddyaw
dOmega = -np.dot(S_inv,np.dot(dS,Omega))+np.dot(S_inv,ddzhi)
Mu = np.dot(self.J,dOmega) + np.cross(Omega,np.dot(self.J,Omega))
MT = np.zeros(4)
MT[:3] = Mu
MT[3] = Thrust
G1_inv = np.linalg.inv(self.G1)
Ws2 = np.dot(G1_inv,MT)
# Ws2 = np.clip(Ws2, np.power(self.w_min,2), np.power(self.w_max,2))
# Ws = np.sqrt(Ws2)
Ws = np.copysign(np.sqrt(np.abs(Ws2)),Ws2)
rpy = np.array([roll, pitch, yaw])
rpy_q = Euler2quat(np.array([roll, pitch, yaw]))
state = {
'roll':roll,
'pitch':pitch,
'rpy':rpy,
'rpy_q':rpy_q,
'dzhi':dzhi,
'ddzhi':ddzhi,
'ut':MT
}
return Ws, state
def getWs_vector(self, status):
pos = np.array(status[:,0:3])
vel = np.array(status[:,3:6])
acc = | np.array(status[:,6:9]) | numpy.array |
"""
Non-Time-Uniform Confidence Intervals for Score Differentials
We implement (Lai et al., 2011) as a baseline. Importantly,
these confidence intervals are *not* valid at an arbitrary stopping time.
"""
import logging
from typing import Tuple, Union
import numpy as np
import scipy.stats
from comparecast.scoring import ScoringRule, get_scoring_rule
def confint_lai(
ps: np.ndarray,
qs: np.ndarray = None,
ys: np.ndarray = None,
true_probs: np.ndarray = None,
scoring_rule: Union[ScoringRule, str] = "brier",
alpha: float = 0.05,
) -> Tuple[np.ndarray, np.ndarray]:
"""Non-time-uniform & asymptotic confidence intervals for
forecast scores & score differentials.
See Section 3.2 of (Lai et al., 2011).
NOTE: true_probs is optionally used to better estimate the width;
it is only available for synthetic data.
"""
assert ys is not None, "data is not provided (third argument)"
if qs is None:
logging.info("computing fixed-time asymptotic CI for S(p, y)")
raise NotImplementedError
else:
logging.info("computing fixed-time asymptotic CI for S(p, y) - S(q, y)")
assert ps.shape == qs.shape == ys.shape
assert 0 <= alpha <= 1
score = get_scoring_rule(scoring_rule)
T = len(ps)
times = np.arange(1, T + 1)
if scoring_rule == "winkler":
fsds = np.cumsum(score(ps, ys, cs=qs)) / times
else:
fsds = np.cumsum(score(ps, ys) - score(qs, ys)) / times
# variance: use true_probs if known
all_zeros, all_ones = np.repeat(0, T), np.repeat(1, T)
dsq = ((score(ps, all_ones) - score(ps, all_zeros)) -
(score(qs, all_ones) - score(qs, all_zeros))) ** 2
if scoring_rule == "winkler":
# qs is assumed to be the baseline forecaster
s = get_scoring_rule("brier")
lsq = np.where(ps >= qs,
s(ps, all_ones) - s(qs, all_ones),
s(ps, all_zeros) - s(qs, all_zeros)) ** 2
lsq = | np.where(lsq != 0, lsq, 1e-8) | numpy.where |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python 2.7
@author: <NAME>
<EMAIL>
Last Update: 23.8.2018
Use Generative Model for posture extrapolation
"""
from datetime import datetime
import os, sys, numpy as np, argparse
from time import time
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
#from skimage.transform import resize
from scipy.io import loadmat
from scipy.misc import imread
from scipy.spatial.distance import euclidean, cdist
from PIL import Image
import imageio
import matplotlib.gridspec as gridspec
from sklearn.preprocessing import normalize
try:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
except:
from sklearn.lda import LDA
from sklearn.svm import LinearSVC
import peakutils
from utils import load_table,load_features, draw_border
sys.path.append('./magnification/')
from Generator import Generator, find_differences
#import config_pytorch as cfg
import config_pytorch_human as cfg
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--pages",type=int,default=10,
help="Number of plots to save")
parser.add_argument("-q", "--queries",type=int,default=10,
help="Number of queries per plot")
parser.add_argument("-n", "--nn",type=int,default=100,
help="Number of queries per plot")
parser.add_argument("-g", "--gpu",type=int,default=0,
help="GPU device to use for image generation")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu)
############################################
# 0. Functions
############################################
def load_image(v,f,c=None):
im_path = (cfg.crops_path+v+'/%06d.jpg'%f)
if not os.path.exists(im_path):
return 255*np.ones([128,128,3],'uint8')
im = Image.open(im_path)
return np.array(im.resize((128,128)))
############################################
# 0. Prepare magnifier object
############################################
generator = Generator(z_dim=cfg.vae_encode_dim,path_model=cfg.vae_weights_path)
############################################
# 1. Load sequences and features
############################################
detections = load_table(cfg.detection_file,asDict=False)
det_cohort= np.array(detections['cohort']) # Used for classifier and plots
det_time = | np.array(detections['time']) | numpy.array |
import argparse
import torch
from torch.nn import functional as F
import numpy as np
from scipy.stats import sem
from pandas import read_csv
from torch.utils import data
from Model.model import Model
from Utils.record import record
from DataLoader.dataset import Dataset
from DataLoader.collate import custom_collate
parser = argparse.ArgumentParser('Predict')
parser.add_argument('--job_id', type=int)
parser.add_argument('--epoch', type=int)
parser.add_argument('--gamma_size', type=int, default = 25)
parser.add_argument('--z_size', type=int, default = 20)
parser.add_argument('--decoder_size', type=int, default = 65)
parser.add_argument('--Nflows', type=int, default = 3)
parser.add_argument('--flow_hidden', type=int, default = 24)
parser.add_argument('--f_nn_size', type=int, default = 12)
parser.add_argument('--W_prior_scale', type=float, default = 0.1)
args = parser.parse_args()
torch.set_num_threads(6)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
N = 29
sims = 250
dt = 0.5
length = 50
pop_avg = np.load('Data/Population_averages.npy')
pop_avg_env = np.load('Data/Population_averages_env.npy')
pop_std = np.load('Data/Population_std.npy')
pop_avg_ = torch.from_numpy(pop_avg[...,1:]).float()
pop_avg_env = torch.from_numpy(pop_avg_env).float()
pop_std = torch.from_numpy(pop_std[...,1:]).float()
pop_avg_bins = np.arange(40, 105, 3)[:-2]
test_name = 'Data/test.csv'
test_set = Dataset(test_name, N, pop=False, min_count=10)
num_test = 400
test_generator = data.DataLoader(test_set, batch_size = num_test, shuffle = False, collate_fn = lambda x: custom_collate(x, pop_avg_, pop_avg_env, pop_std, 1.0))
mean_T = test_set.mean_T
std_T = test_set.std_T
mean_deficits = torch.Tensor(read_csv('Data/mean_deficits.txt', index_col=0,sep=',',header=None).values[1:-3].flatten())
std_deficits = torch.Tensor(read_csv('Data/std_deficits.txt', index_col=0,sep=',',header=None, names = ['variable']).values[1:-3].flatten())
model = Model(device, N, args.gamma_size, args.z_size, args.decoder_size, args.Nflows, args.flow_hidden, args.f_nn_size, mean_T, std_T, dt, length).to(device)
model.load_state_dict(torch.load('Parameters/train%d_Model_DJIN_epoch%d.params'%(args.job_id, args.epoch),map_location=device))
model = model.eval()
mean_results = np.zeros((test_set.__len__(), 100, N+1)) * np.nan
std_results = np.zeros((test_set.__len__(), 100, N+1)) * np.nan
S_results = np.zeros((test_set.__len__(), 100, 3)) * np.nan
with torch.no_grad():
sigma_posterior = torch.distributions.gamma.Gamma(model.logalpha.exp(), model.logbeta.exp())
start = 0
for data in test_generator:
size = data['Y'].shape[0]
X = torch.zeros(sims, size, int(length/dt), N).to(device)
X_std = torch.zeros(sims, size, int(length/dt), N).to(device)
S = torch.zeros(sims, size, int(length/dt)).to(device)
alive = torch.ones(sims, size, int(length/dt)).to(device)
for s in range(sims):
sigma_y = sigma_posterior.sample((data['Y'].shape[0], length*2))
pred_X, t, pred_S, pred_logGamma, pred_sigma_X, context, y, times, mask, survival_mask, dead_mask, after_dead_mask, censored, sample_weights, med, env, z_sample, prior_entropy, log_det, recon_mean_x0, drifts, mask0, W = model(data, sigma_y, test=True)
X[s] = pred_X
X_std[s] = pred_X + sigma_y*torch.randn_like(pred_X)
S[s] = pred_S.exp()
alive[s,:,1:] = torch.cumprod(torch.bernoulli(torch.exp(-1*pred_logGamma.exp()[:,:-1]*dt)), dim=1)
t0 = t[:,0]
record_times = [torch.from_numpy(np.arange(t0[b].cpu(), 121, 1)).to(device) for b in range(size)]
X_record, S_record = record(t, X, S, record_times, dt)
X_std_record, alive_record = record(t, X_std, alive, record_times, dt)
t0 = t0.cpu()
X_sum = []
X_sum_std = []
X_sum2 = []
X_count = []
for b in range(size):
X_sum.append(torch.sum(X_record[b].permute(2,0,1)*alive_record[b], dim = 1).cpu())
X_sum_std.append(torch.sum(X_std_record[b].permute(2,0,1)*alive_record[b], dim = 1).cpu())
X_sum2.append(torch.sum(X_std_record[b].pow(2).permute(2,0,1)*alive_record[b], dim = 1).cpu())
X_count.append(torch.sum(alive_record[b], dim = 0).cpu())
for b in range(size):
mean_results[start+b, :len(np.arange(t0[b], 121, 1)), 0] = np.arange(t0[b], 121, 1)
std_results[start+b, :len( | np.arange(t0[b], 121, 1) | numpy.arange |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.